filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
wallet/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wallet.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
action/event.py | import os
import discord
from .helper import *
from dotenv import load_dotenv
load_dotenv()
QUEUE_SERVER_NAME = os.getenv('QUEUE_SERVER_NAME')
QUEUE_CHANNEL_NAME = os.getenv('QUEUE_CHANNEL_NAME')
# Defines how the bot should respond to specific Discord events.
def define_events(bot):
# Retrieve the queue text channel.
server = discord.utils.get(bot.guilds, name=QUEUE_SERVER_NAME)
queue = discord.utils.get(server.text_channels, name=QUEUE_CHANNEL_NAME)
# Event: the bot is connected and is ready.
@bot.event
async def on_ready():
print("{0.user} connected and ready.".format(bot))
# Clear the queue of all tickets.
await purge_text_channel(queue)
print("{0.name} purged.".format(queue))
# Event: a message was sent on the server.
@bot.event
async def on_message(message):
# Ignore messages from the bot itself.
if message.author is bot.user:
return
# If message was sent to the queue channel...
if message.channel is queue:
# Delete the message
await message.delete()
await bot.process_commands(message)
# # Event: a message was sent on the server.
# @queueBot.event
# async def on_message(message):
# global deletedByManager
# # If message is from queueBot, ignore it.
# if message.author == queueBot.user:
# return
# # Let the queueBot process the commands.
# if message.content.startswith('!'):
# await queueBot.process_commands(message)
# return
# # If message is from the queue channel...
# if message.channel.name == CHANNEL_NAME:
# # Delete the message before replacing it with a formal ticket.
# deletedByManager = message
# await message.delete()
# # If client already has a message in the queue...
# if message.author in [ticket.message.author for ticket in ticketQueue]:
# # Do not create a formal ticket. Send reply instead.
# reply = "You already have a request in the queue. You may not" \
# " have more than one request in a the queue at any given time."
# await message.author.send(reply)
# else:
# # Create a formal ticket.
# newTicket = Ticket(message)
# ticketQueue.add(newTicket)
# await output_ticket_queue(message.channel, ticketQueue, newTicket)
# # Event: an error occured after a command issue.
# @queueBot.event
# async def on_command_error(context, exception):
# # Because the client does not have permissions.
# if isinstance(exception, commands.errors.MissingRole):
# # Send a reply to the author in a private message.
# reply = "You do not have permissions to execute this command." \
# " Make sure your message does not begin with '!' if did not intend" \
# " to type a command."
# await context.author.send(reply)
# # If the message is from the queue channel, delete it.
# if context.channel.name == CHANNEL_NAME:
# await context.message.delete()
# elif isinstance(exception, commands.errors.NoPrivateMessage):
# # Send a reply to the author in a private message.
# reply = "You cannot execute this command in private messages."
# await context.author.send(reply)
# else:
# print(type(exception))
# print(exception)
#
#
#
#
#
#
#
#
# # Event: a message was deleted by a client.
# @queueBot.event
# async def on_message_delete(message):
# global deletedByManager
# global bulkDelete
# global deletedCommand
# # Ignore this event if the message was deleted by the queueBot.
# if deletedByManager is message:
# deletedByManager = None
# return
# # Ignore delete if part of a bulk deletion.
# if bulkDelete is True:
# return
# # Ignore if this is a deleted command.
# if message.content.startswith('!'):
# return
# # If message was from the queue channel...
# if message.channel.name == CHANNEL_NAME:
# # Remove the corresponding ticket from the queue.
# for ticket in ticketQueue:
# if ticket.message.author == message.author:
# break
# ticketQueue.remove(ticket)
# reply = "Your request was removed from the wait queue."
# await message.author.send(reply)
| [] | [] | [
"QUEUE_SERVER_NAME",
"QUEUE_CHANNEL_NAME"
] | [] | ["QUEUE_SERVER_NAME", "QUEUE_CHANNEL_NAME"] | python | 2 | 0 | |
vendor/kmodules.xyz/client-go/meta/incluster.go | /*
Copyright The Kmodules Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package meta
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"strings"
"time"
core "k8s.io/api/core/v1"
"k8s.io/client-go/rest"
)
func Namespace() string {
if ns := os.Getenv("KUBE_NAMESPACE"); ns != "" {
return ns
}
if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
return ns
}
}
return core.NamespaceDefault
}
// PossiblyInCluster returns true if loading an inside-kubernetes-cluster is possible.
func PossiblyInCluster() bool {
fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token")
return os.Getenv("KUBERNETES_SERVICE_HOST") != "" &&
os.Getenv("KUBERNETES_SERVICE_PORT") != "" &&
err == nil && !fi.IsDir()
}
func APIServerCertificate(cfg *rest.Config) (*x509.Certificate, error) {
err := rest.LoadTLSFiles(cfg)
if err != nil {
return nil, err
}
// create ca cert pool
caCertPool := x509.NewCertPool()
ok := caCertPool.AppendCertsFromPEM(cfg.CAData)
if !ok {
return nil, fmt.Errorf("can't append caCert to caCertPool")
}
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{RootCAs: caCertPool},
}
client := &http.Client{Transport: tr}
resp, err := client.Get(cfg.Host)
if err != nil {
return nil, err
}
for i := range resp.TLS.VerifiedChains {
return resp.TLS.VerifiedChains[i][0], nil
}
return nil, fmt.Errorf("no cert found")
}
| [
"\"KUBE_NAMESPACE\"",
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
] | [] | [
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT",
"KUBE_NAMESPACE"
] | [] | ["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT", "KUBE_NAMESPACE"] | go | 3 | 0 | |
cmd/docker/docker_test.go | package main
import (
"bytes"
"io"
"io/ioutil"
"os"
"testing"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/debug"
"github.com/sirupsen/logrus"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestClientDebugEnabled(t *testing.T) {
defer debug.Disable()
tcmd := newDockerCommand(&command.DockerCli{})
tcmd.SetFlag("debug", "true")
cmd, _, err := tcmd.HandleGlobalFlags()
assert.NilError(t, err)
assert.NilError(t, tcmd.Initialize())
err = cmd.PersistentPreRunE(cmd, []string{})
assert.NilError(t, err)
assert.Check(t, is.Equal("1", os.Getenv("DEBUG")))
assert.Check(t, is.Equal(logrus.DebugLevel, logrus.GetLevel()))
}
var discard = ioutil.NopCloser(bytes.NewBuffer(nil))
func runCliCommand(t *testing.T, r io.ReadCloser, w io.Writer, args ...string) error {
t.Helper()
if r == nil {
r = discard
}
if w == nil {
w = ioutil.Discard
}
cli, err := command.NewDockerCli(command.WithInputStream(r), command.WithCombinedStreams(w))
assert.NilError(t, err)
tcmd := newDockerCommand(cli)
tcmd.SetArgs(args)
cmd, _, err := tcmd.HandleGlobalFlags()
assert.NilError(t, err)
assert.NilError(t, tcmd.Initialize())
return cmd.Execute()
}
func TestExitStatusForInvalidSubcommandWithHelpFlag(t *testing.T) {
err := runCliCommand(t, nil, nil, "help", "invalid")
assert.Error(t, err, "unknown help topic: invalid")
}
func TestExitStatusForInvalidSubcommand(t *testing.T) {
err := runCliCommand(t, nil, nil, "invalid")
assert.Check(t, is.ErrorContains(err, "docker: 'invalid' is not a docker command."))
}
func TestVersion(t *testing.T) {
var b bytes.Buffer
err := runCliCommand(t, nil, &b, "--version")
assert.NilError(t, err)
assert.Check(t, is.Contains(b.String(), "Docker version"))
}
| [
"\"DEBUG\""
] | [] | [
"DEBUG"
] | [] | ["DEBUG"] | go | 1 | 0 | |
cmd/snyk2nvd/snyk2nvd.go | // Copyright (c) Facebook, Inc. and its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"io"
"os"
"strings"
"github.com/facebookincubator/flog"
"github.com/facebookincubator/nvdtools/providers/lib/client"
"github.com/facebookincubator/nvdtools/providers/lib/runner"
"github.com/facebookincubator/nvdtools/providers/snyk/api"
"github.com/facebookincubator/nvdtools/providers/snyk/schema"
)
var lf languageFilter
func Read(r io.Reader, c chan runner.Convertible) error {
var vulns map[string]*schema.Advisory
if err := json.NewDecoder(r).Decode(&vulns); err != nil {
return fmt.Errorf("can't decode into vulns: %v", err)
}
for _, vuln := range vulns {
if lf.accepts(vuln) {
c <- vuln
}
}
return nil
}
func FetchSince(ctx context.Context, c client.Client, baseURL string, since int64) (<-chan runner.Convertible, error) {
consumerID := os.Getenv("SNYK_ID")
if consumerID == "" {
return nil, fmt.Errorf("please set SNYK_ID in environment")
}
secret := os.Getenv("SNYK_READONLY_KEY")
if secret == "" {
return nil, fmt.Errorf("please set SNYK_READONLY_KEY in environment")
}
client := api.NewClient(c, baseURL, consumerID, secret)
advs, err := client.FetchAllVulnerabilities(ctx, since)
return lf.filter(advs), err
}
func main() {
flag.Var(&lf, "language", "Comma separated list of languages to download/convert. If not set, then use all available")
r := runner.Runner{
Config: runner.Config{
BaseURL: "https://data.snyk.io/api/v4",
ClientConfig: client.Config{
UserAgent: "snyk2nvd",
},
},
FetchSince: FetchSince,
Read: Read,
}
if err := r.Run(); err != nil {
flog.Fatalln(err)
}
}
// language filter
type languageFilter map[string]bool
// String is a part of flag.Value interface implementation.
func (lf *languageFilter) String() string {
languages := make([]string, 0, len(*lf))
for language := range *lf {
languages = append(languages, language)
}
return strings.Join(languages, ",")
}
// Set is a part of flag.Value interface implementation.
func (lf *languageFilter) Set(val string) error {
if val == "" {
return nil
}
if *lf == nil {
*lf = make(languageFilter)
}
for _, v := range strings.Split(val, ",") {
if v != "" {
(*lf)[v] = true
}
}
return nil
}
func (lf *languageFilter) accepts(adv *schema.Advisory) bool {
return lf == nil || len(*lf) == 0 || (*lf)[adv.Language]
}
func (lf *languageFilter) filter(ch <-chan *schema.Advisory) <-chan runner.Convertible {
output := make(chan runner.Convertible)
go func() {
defer close(output)
for adv := range ch {
if lf.accepts(adv) {
output <- adv
}
}
}()
return output
}
| [
"\"SNYK_ID\"",
"\"SNYK_READONLY_KEY\""
] | [] | [
"SNYK_READONLY_KEY",
"SNYK_ID"
] | [] | ["SNYK_READONLY_KEY", "SNYK_ID"] | go | 2 | 0 | |
all4depth/utils/reduce.py |
import torch
import numpy as np
from collections import OrderedDict
from all4depth.utils.horovod import reduce_value
from all4depth.utils.logging import prepare_dataset_prefix
def reduce_dict(data, to_item=False):
"""
Reduce the mean values of a dictionary from all GPUs
Parameters
----------
data : dict
Dictionary to be reduced
to_item : bool
True if the reduced values will be return as .item()
Returns
-------
dict : dict
Reduced dictionary
"""
for key, val in data.items():
data[key] = reduce_value(data[key], average=True, name=key)
if to_item:
data[key] = data[key].item()
return data
def all_reduce_metrics(output_data_batch, datasets, name='depth'):
"""
Reduce metrics for all batches and all datasets using Horovod
Parameters
----------
output_data_batch : list
List of outputs for each batch
datasets : list
List of all considered datasets
name : str
Name of the task for the metric
Returns
-------
all_metrics_dict : list
List of reduced metrics
"""
# If there is only one dataset, wrap in a list
if isinstance(output_data_batch[0], dict):
output_data_batch = [output_data_batch]
# Get metrics keys and dimensions
names = [key for key in list(output_data_batch[0][0].keys()) if key.startswith(name)]
dims = [output_data_batch[0][0][name].shape[0] for name in names]
# List storing metrics for all datasets
all_metrics_dict = []
# Loop over all datasets and all batches
for output_batch, dataset in zip(output_data_batch, datasets):
metrics_dict = OrderedDict()
length = len(dataset)
# Count how many times each sample was seen
seen = torch.zeros(length)
for output in output_batch:
for i, idx in enumerate(output['idx']):
seen[idx] += 1
seen = reduce_value(seen, average=False, name='idx')
assert not np.any(seen.numpy() == 0), \
'Not all samples were seen during evaluation'
# Reduce all relevant metrics
for name, dim in zip(names, dims):
metrics = torch.zeros(length, dim)
for output in output_batch:
for i, idx in enumerate(output['idx']):
metrics[idx] = output[name]
metrics = reduce_value(metrics, average=False, name=name)
metrics_dict[name] = (metrics / seen.view(-1, 1)).mean(0)
# Append metrics dictionary to the list
all_metrics_dict.append(metrics_dict)
# Return list of metrics dictionary
return all_metrics_dict
########################################################################################################################
def collate_metrics(output_data_batch, name='depth'):
"""
Collate epoch output to produce average metrics
Parameters
----------
output_data_batch : list
List of outputs for each batch
name : str
Name of the task for the metric
Returns
-------
metrics_data : list
List of collated metrics
"""
# If there is only one dataset, wrap in a list
if isinstance(output_data_batch[0], dict):
output_data_batch = [output_data_batch]
# Calculate the mean of all metrics
metrics_data = []
# For all datasets
for i, output_batch in enumerate(output_data_batch):
metrics = OrderedDict()
# For all keys (assume they are the same for all batches)
for key, val in output_batch[0].items():
if key.startswith(name):
metrics[key] = torch.stack([output[key] for output in output_batch], 0)
metrics[key] = torch.mean(metrics[key], 0)
metrics_data.append(metrics)
# Return metrics data
return metrics_data
def create_dict(metrics_data, metrics_keys, metrics_modes,
dataset, name='depth'):
"""
Creates a dictionary from collated metrics
Parameters
----------
metrics_data : list
List containing collated metrics
metrics_keys : list
List of keys for the metrics
metrics_modes
List of modes for the metrics
dataset : CfgNode
Dataset configuration file
name : str
Name of the task for the metric
Returns
-------
metrics_dict : dict
Metrics dictionary
"""
# Create metrics dictionary
metrics_dict = {}
# For all datasets
for n, metrics in enumerate(metrics_data):
if metrics: # If there are calculated metrics
prefix = prepare_dataset_prefix(dataset, n)
# For all keys
for i, key in enumerate(metrics_keys):
for mode in metrics_modes:
metrics_dict['{}-{}{}'.format(prefix, key, mode)] =\
metrics['{}{}'.format(name, mode)][i].item()
# Return metrics dictionary
return metrics_dict
########################################################################################################################
def average_key(batch_list, key):
"""
Average key in a list of batches
Parameters
----------
batch_list : list of dict
List containing dictionaries with the same keys
key : str
Key to be averaged
Returns
-------
average : float
Average of the value contained in key for all batches
"""
values = [batch[key] for batch in batch_list]
return sum(values) / len(values)
def average_sub_key(batch_list, key, sub_key):
"""
Average subkey in a dictionary in a list of batches
Parameters
----------
batch_list : list of dict
List containing dictionaries with the same keys
key : str
Key to be averaged
sub_key :
Sub key to be averaged (belonging to key)
Returns
-------
average : float
Average of the value contained in the sub_key of key for all batches
"""
values = [batch[key][sub_key] for batch in batch_list]
return sum(values) / len(values)
def average_loss_and_metrics(batch_list, prefix):
"""
Average loss and metrics values in a list of batches
Parameters
----------
batch_list : list of dict
List containing dictionaries with the same keys
prefix : str
Prefix string for metrics logging
Returns
-------
values : dict
Dictionary containing a 'loss' float entry and a 'metrics' dict entry
"""
values = OrderedDict()
key = 'loss'
values['{}-{}'.format(prefix, key)] = \
average_key(batch_list, key)
key = 'metrics'
for sub_key in batch_list[0][key].keys():
values['{}-{}'.format(prefix, sub_key)] = \
average_sub_key(batch_list, key, sub_key)
return values
########################################################################################################################
| [] | [] | [] | [] | [] | python | null | null | null |
skipthoughts/training/tools.py | """
A selection of functions for extracting vectors
Encoder + vocab expansion
Modified: William Blackie
Reason: PEP8 formatting, refactoring name of load_googlenews for clarity, refactoring path and name of vectors to FastText.
Reason: Replace dict with dict literal
TODO need to modify for Fast-text + some naming conventions for clarity...
"""
import gensim
import theano
import theano.tensor as tensor
from gensim.models import KeyedVectors
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import warnings
import cPickle as Pkl
import numpy
import nltk
from collections import OrderedDict, defaultdict
from nltk.tokenize import word_tokenize
from scipy.linalg import norm
warnings.filterwarnings(action='ignore', category=UserWarning,
module='gensim') # Remove genism warnings for windows as they are irelevent
from sklearn.linear_model import LinearRegression
from utils import load_params, init_tparams
from model import init_params, build_encoder, build_encoder_w2v
# -----------------------------------------------------------------------------#
# Specify model and dictionary locations here
# -----------------------------------------------------------------------------#
path_to_model = r'D:\Projects\skip-thoughts\models\my_bi_skip.npz'
path_to_dictionary = r'D:\Projects\skip-thoughts\models\corpus\saved_dict.pk1'
path_to_fasttext = r'D:\Projects\skip-thoughts\models\vectors\wiki.en.vec'
# -----------------------------------------------------------------------------#
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="1"
def load_model(embed_map=None):
"""
Load all model components + apply vocab expansion
Modified: to load FastText vectors
"""
# Load the worddict
print 'Loading dictionary...'
with open(path_to_dictionary, 'r') as f:
worddict = Pkl.load(f)
# Create inverted dictionary
print 'Creating inverted dictionary...'
word_idict = dict()
for kk, vv in worddict.iteritems():
word_idict[vv] = kk
word_idict[0] = '<eos>'
word_idict[1] = 'UNK'
# Load model options
print 'Loading model options...'
with open('%s.pkl' % path_to_model, 'rb') as f:
options = Pkl.load(f)
# Load parameters
print 'Loading model parameters...'
params = init_params(options)
params = load_params(path_to_model, params)
tparams = init_tparams(params)
# Extractor functions
print 'Compiling encoder...'
trng = RandomStreams(1234)
trng, x, x_mask, ctx, emb = build_encoder(tparams, options)
f_enc = theano.function([x, x_mask], ctx, name='f_enc')
f_emb = theano.function([x], emb, name='f_emb')
trng, embedding, x_mask, ctxw2v = build_encoder_w2v(tparams, options)
f_w2v = theano.function([embedding, x_mask], ctxw2v, name='f_w2v')
# Load word2vec, if applicable
if embed_map == None:
print 'Loading FastText embeddings...'
embed_map = load_fasttext_vectors(path_to_fasttext)
# Lookup table using vocab expansion trick
print 'Creating word lookup tables...'
table = lookup_table(options, embed_map, worddict, word_idict, f_emb)
# Store everything we need in a dictionary
print 'Packing up...'
model = {'options': options, 'table': table, 'f_w2v': f_w2v}
return model
def encode(model, X, use_norm=True, verbose=True, batch_size=128, use_eos=False):
"""
Encode sentences in the list X. Each entry will return a vector
"""
# first, do preprocessing
X = preprocess(X)
# word dictionary and init
d = defaultdict(lambda: 0)
for w in model['table'].keys():
d[w] = 1
features = numpy.zeros((len(X), model['options']['dim']), dtype='float32')
# length dictionary
ds = defaultdict(list)
captions = [s.split() for s in X]
for i, s in enumerate(captions):
ds[len(s)].append(i)
# Get features. This encodes by length, in order to avoid wasting computation
for k in ds.keys():
if verbose:
print k
numbatches = len(ds[k]) / batch_size + 1
for minibatch in range(numbatches):
caps = ds[k][minibatch::numbatches]
if use_eos:
embedding = numpy.zeros((k + 1, len(caps), model['options']['dim_word']), dtype='float32')
else:
embedding = numpy.zeros((k, len(caps), model['options']['dim_word']), dtype='float32')
for ind, c in enumerate(caps):
caption = captions[c]
for j in range(len(caption)):
if d[caption[j]] > 0:
embedding[j, ind] = model['table'][caption[j]]
else:
embedding[j, ind] = model['table']['UNK']
if use_eos:
embedding[-1, ind] = model['table']['<eos>']
if use_eos:
ff = model['f_w2v'](embedding, numpy.ones((len(caption) + 1, len(caps)), dtype='float32'))
else:
ff = model['f_w2v'](embedding, numpy.ones((len(caption), len(caps)), dtype='float32'))
if use_norm:
for j in range(len(ff)):
ff[j] /= norm(ff[j])
for ind, c in enumerate(caps):
features[c] = ff[ind]
return features
def preprocess(text):
"""
Preprocess text for encoder
"""
X = []
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
for t in text:
sents = sent_detector.tokenize(t)
result = ''
for s in sents:
tokens = word_tokenize(s)
result += ' ' + ' '.join(tokens)
X.append(result)
return X
def load_fasttext_vectors():
"""
load the word2vec fasttext vectors
"""
# fast_text_model = KeyedVectors.load_word2vec_format(path_to_word2vec, binary=False)
embed_map = KeyedVectors.load_word2vec_format(path_to_fasttext, binary=False)
return embed_map
def lookup_table(options, embed_map, worddict, word_idict, f_emb, use_norm=False):
"""
Create a lookup table from linear mapping of word2vec into RNN word space
"""
wordvecs = get_embeddings(options, word_idict, f_emb)
clf = train_regressor(options, embed_map, wordvecs, worddict)
table = apply_regressor(clf, embed_map, use_norm=use_norm)
for i in range(options['n_words']):
w = word_idict[i]
table[w] = wordvecs[w]
if use_norm:
table[w] /= norm(table[w])
return table
def get_embeddings(options, word_idict, f_emb, use_norm=False):
"""
Extract the RNN embeddings from the model
"""
d = OrderedDict()
for i in range(options['n_words']):
caption = [i]
ff = f_emb(numpy.array(caption).reshape(1, 1)).flatten()
if use_norm:
ff /= norm(ff)
d[word_idict[i]] = ff
return d
def train_regressor(options, embed_map, wordvecs, worddict):
"""
Return regressor to map word2vec to RNN word space
Modified: Changed to accept FastText object
"""
# Gather all words from word2vec that appear in wordvecs
d = defaultdict(lambda: 0)
for w in embed_map.vocab.keys():
d[w] = 1
shared = OrderedDict()
count = 0
for w in worddict.keys()[:options['n_words'] - 2]:
if d[w] > 0:
shared[w] = count
count += 1
# Get the vectors for all words in 'shared'
w2v = numpy.zeros((len(shared), 300), dtype='float32')
sg = numpy.zeros((len(shared), options['dim_word']), dtype='float32')
for w in shared.keys():
w2v[shared[w]] = embed_map[w]
sg[shared[w]] = wordvecs[w]
clf = LinearRegression()
clf.fit(w2v, sg)
return clf
def apply_regressor(clf, embed_map, use_norm=False):
"""
Map words from word2vec into RNN word space
"""
wordvecs = OrderedDict()
embed_map = numpy.array(embed_map.vocab.keys()).reshape(1, -1) # to try after thi
for i, w in enumerate(embed_map):
if '_' not in w:
wordvecs[w] = clf.predict(embed_map[w]).astype('float32')
if use_norm:
wordvecs[w] /= norm(wordvecs[w])
return wordvecs
| [] | [] | [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
tests/test_reproducibility.py | import tests.generate_fake_dataset as gen
import tests.initialize_db as initdb
import yaml
import testing.postgresql
import psycopg2
import psycopg2.extras
from mock import patch
from pgdedupe.utils import load_config, filename_friendly_hash, create_model_definition
from pgdedupe.run import process_options, preprocess, create_blocking, cluster, train
def test_reproducibility():
"""Test that two dedupers trained with the same config and data
come up with the same results"""
psql = testing.postgresql.Postgresql()
with open('db.yaml', 'w') as f:
yaml.dump(psql.dsn(), f)
pop = gen.create_population(100)
gen.create_csv(pop, 'pop.csv')
initdb.init('db.yaml', 'pop.csv')
dbconfig = load_config('db.yaml')
base_config = {
'schema': 'dedupe',
'table': 'dedupe.entries',
'key': 'entry_id',
'fields': [
{'field': 'ssn', 'type': 'String', 'has_missing': True},
{'field': 'first_name', 'type': 'String'},
{'field': 'last_name', 'type': 'String'},
{'field': 'dob', 'type': 'String'},
{'field': 'race', 'type': 'Categorical', 'categories': ['pacisland', 'amindian', 'asian', 'other', 'black', 'white']},
{'field': 'ethnicity', 'type': 'Categorical', 'categories': ['hispanic', 'nonhispanic']},
{'field': 'sex', 'type': 'Categorical', 'categories': ['M', 'F']}
],
'interactions': [
['last_name', 'dob'],
['ssn', 'dob']
],
'filter_condition': 'last_name is not null AND (ssn is not null OR (first_name is not null AND dob is not null))',
'recall': 0.99,
'prompt_for_labels': False,
'seed': 0,
'training_file': 'tests/dedup_postgres_training.json'
}
config = process_options(base_config)
con = psycopg2.connect(cursor_factory=psycopg2.extras.RealDictCursor, **dbconfig)
preprocess(con, config)
# train two versions of the deduper with the same configuration
with patch.dict('os.environ', {'PYTHONHASHSEED': '123'}):
old_deduper = train(con, config)
con = psycopg2.connect(cursor_factory=psycopg2.extras.RealDictCursor, **dbconfig)
with patch.dict('os.environ', {'PYTHONHASHSEED': '123'}):
new_deduper = train(con, config)
# ensure that the two models come up with the same hash
model_hash = filename_friendly_hash(create_model_definition(config, old_deduper))
new_model_hash = filename_friendly_hash(create_model_definition(config, new_deduper))
assert new_model_hash == model_hash
# run clustering on each of the dedupers
create_blocking(old_deduper, con, config)
old_dupes = cluster(old_deduper, con, config)
create_blocking(new_deduper, con, config)
new_dupes = cluster(new_deduper, con, config)
# each deduper should come up with the same list of clusters
assert [records for records, scores in old_dupes] == [records for records, scores in new_dupes]
| [] | [] | [] | [] | [] | python | 0 | 0 | |
python/apogee/aspcap/elem.py | # routines related to individual element calibration for APOGEE/ASPCAP
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import numpy as np
from apogee.utils import apload
from apogee.utils import apselect
from apogee.aspcap import err
from tools import plots
from tools import html
from tools import fit
from tools import match
import pdb
from astropy.io import fits
from astropy.io import ascii
try:
import esutil
except:
pass
import copy
import os
def read(file='allStar-testcal.fits') :
'''
Read allStar file, get main structure, elem_symbol, and elemtoh
'''
dr13load=apload.ApLoad(dr='dr13')
#a=apload.allStar()[1].data
c=dr13load.allStar()[3].data
#a=fits.open('../dist/allStar+.fits')[1].data
#x,y,z,r = galmodel.lbd2xyz(a['GLON'],a['GLAT'],a['DISO'][:,2]/1000.)
#zone=np.where((r>9) & (r<11) & (dt<40))[0]
a=fits.open(file)[1].data
#c=fits.open(file)[3].data
elem=c['ELEM_SYMBOL'][0]
elemtoh=c['ELEMTOH'][0]
return a, elem, elemtoh
def arctabun(el) :
'''
Define Arcturus abundances, and return requested abundance
'''
abun = { "C" : 0.090000, "CI" : 0.09, "N" : 0.400000, "O" : 0.480000, "Na" : 0.210000, "Mg" : 0.370000, "Al" : 0.400000, "Si" : 0.330000, "P" : 0.070000, "S" : 0.350000, "K" : 0.200000, "Ca" : 0.090000, "Sc" : 0.070000, "Ti" : 0.250000, "TiII" : 0.25, "V" : 0.160000, "Cr" : -0.050000, "Mn" : -0.120000, "Fe" : -0.000000, "Co" : 0.040000, "Ni" : 0.030000, "Cu" : -0.050000, "Ge" : 0.000000, "Rb" : 0.000000, "Y" : 0.000000, "Ce" : -0.190000, "Nd" : 0.130000, "Yb" : 0., "M" : 0., "alpha" : 0.3}
return(abun[el])
def optabun(el) :
'''
??? define abundance offsets from some optical analysis ???
'''
abun = {"Na" : -0.15, "Mg" : 0.06, "Al" : 0.04, "Si" : -0.21, "Ca" : 0.11, "Ti" : -0.14, "TiII" : 0.08, "V" : -0.15, "Cr" : -0.04, "Mn" : -0.36, "Fe" : 0.06, "Co" : -0.26}
try :
return(abun[el])
except :
return(-9999.)
def refabun(el,dwarf=False) :
'''
Return reference abundance: 0 if giant, Arcturus if not?
'''
if dwarf :
return 0.
else :
return arctabun(el)
def plot(a,elem,etoh,dwarf=False,suffix='',gcal=None,dcal=None,glon=None,glat=None,res=None,usemh=False,sn=[200,1000]) :
'''
Make a bunch of plots for elemental abundances
'''
try: os.mkdir('elem')
except: pass
# selection
#dt=a['FPARAM'][:,0]-(4468+(a['FPARAM'][:,1]-2.5)/0.0018 - 382.5*a['FPARAM'][:,3])
#gd=apselect.select(a[zone],badval='STAR_BAD',logg=[-1,3.5],sn=[200,1000],teff=[4000,4800])
#gd=zone[gd]
if dwarf :
tit = 'Dwarfs, S/N>200'
prefix = 'd'+suffix
tmax=6500
gd=apselect.select(a,badval='STAR_BAD',sn=sn,raw=True,glon=glon,glat=glat,dwarfs=True)
etoh[0]=1
etoh[1]=1
etoh[2]=1
ref=apselect.select(a,id='VESTA')
else :
tit = 'Giants, S/N>200'
prefix = 'g'+suffix
tmax=6500
gd=apselect.select(a,badval='STAR_BAD',sn=sn,raw=True,glon=glon,glat=glat,giants=True)
ref=apselect.select(a,id='alpha_Boo')
out = open('elem/'+prefix+'.dat','w')
# get the indices for different grids, and for stars near solar metallicity
fgrid=apselect.select(a[gd],grid='F',raw=True)
gkgrid=apselect.select(a[gd],grid='GK',raw=True)
mgrid=apselect.select(a[gd],grid='M',raw=True)
solar=apselect.select(a[gd],mh=[-0.1,0.1],raw=True)
ifeh=17
if len(a['FELEM'].shape) == 2: felem_feh = a['FELEM'][:,ifeh]
else : felem_feh = a['FELEM'][:,0,ifeh]
ytit=[]
files=[]
# loop over elements
nelem=len(elem)
for ielem in range(nelem+2) :
file=[]
if ielem < nelem :
el = elem[ielem].strip()
#eelem = a['ELEM'][gd,ielem]
eelem = a['X_M'][gd,ielem]
if len(a['FELEM'].shape) == 2: felem = a['FELEM'][gd,ielem]
else : felem = a['FELEM'][gd,0,ielem]
eelem_err = a['X_M_ERR'][gd,ielem]
if len(a['FELEM'].shape) == 2: felem_err = a['FELEM_ERR'][gd,ielem]
else: felem_err = a['FELEM_ERR'][gd,0,ielem]
tmp=etoh[ielem]
if ielem > 2 :
if usemh and etoh[ielem] :
#eelem -= a['FPARAM'][gd,3]
felem -= a['FPARAM'][gd,3]
elif not usemh and not etoh[ielem] :
#eelem += a['FPARAM'][gd,3]
felem += a['FPARAM'][gd,3]
else :
giants = apselect.select(a[gd],grid='g_',raw=True)
if not usemh :
#eelem[giants] += a['FPARAM'][gd[giants],3]
felem[giants] += a['FPARAM'][gd[giants],3]
dwarfs = apselect.select(a[gd],grid='d_',raw=True)
if usemh :
#eelem[dwarfs] -= a['FPARAM'][gd[dwarfs],3]
felem[dwarfs] -= a['FPARAM'][gd[dwarfs],3]
elif ielem == nelem :
el = 'M'
eelem = a['PARAM'][gd,0]
felem = a['FPARAM'][gd,3]
eelem_err = np.sqrt(a['PARAM_COV'][gd,3,3])
felem_err = np.sqrt(a['FPARAM_COV'][gd,3,3])
tmp = 1
else :
el = 'alpha'
eelem = a['PARAM'][gd,6]
felem = a['FPARAM'][gd,6]
eelem_err = np.sqrt(a['PARAM_COV'][gd,6,6])
felem_err = np.sqrt(a['FPARAM_COV'][gd,6,6])
tmp = 0
if not usemh :
eelem += a['FPARAM'][gd,3]
felem += a['FPARAM'][gd,3]
if (tmp == 1 and not usemh) or (tmp == 0 and usemh ):
refoffset=0
else :
refoffset=a['FPARAM'][ref,3]
if usemh :
refoffset *= -1
name=prefix+el
print(name)
fname = 'elem/'+name
# loop over plots
xtit = []
for iplot in range(0,8) :
#for iplot in range(2,3) :
if iplot == 0 :
#x = a['ELEM'][gd,ifeh]
x = a['X_H'][gd,ifeh]
xr = [-1.5,1.]
xt= '[Fe/H] (cal)'
y = eelem
if not usemh: y-=a['PARAM'][gd,3]
yr=[-0.25,0.5]
yt = '['+name+'/M](cal)'
z = a['FPARAM'][gd,0]
zr = [3000,tmax]
zt='Teff'
xtit.append('calibrated vs [Fe/H]')
elif iplot == 1 :
x = felem_feh[gd]
xr = [-1.5,1.]
xt= '[Fe/H] (raw)'
y = felem
if not usemh: y-=a['FPARAM'][gd,3]
yr=[-0.25,0.5]
yt = '['+name+'/M](raw)'
z = a['FPARAM'][gd,0]
zr = [3000,tmax]
zt='Teff'
xtit.append('raw vs [Fe/H]')
elif iplot == 2 :
x = a['FPARAM'][gd,0]
xr = [2500,tmax]
xt= 'Teff'
y = eelem
if not usemh: y-=a['PARAM'][gd,3]
yr=[-0.25,0.5]
yt = '['+name+'/M](cal)'
#z = a['ELEM'][gd,ifeh]
z = a['X_H'][gd,ifeh]
zr = [-1.5,1.]
zt='[Fe/H]'
xtit.append('calibrated vs Teff')
elif iplot == 3 :
x = a['FPARAM'][gd,0]
xr = [2500,tmax]
xt= 'Teff'
y = felem
if not usemh: y-=a['FPARAM'][gd,3]
yr=[-0.25,0.5]
yt = '['+name+'/M](raw)'
z = felem_feh[gd]
zr = [-1.5,1.]
zt='[Fe/H]'
xtit.append('raw vs Teff')
elif iplot == 4 :
x = a['FPARAM'][gd,0]
xr = [3000,tmax]
xt = 'Teff'
y = eelem-felem
yr = [-0.3,0.3]
yt = 'cal - raw'
z = felem_feh[gd]
zr = [-1.5,1.]
zt='[Fe/H]'
xtit.append('calibration')
elif iplot == 5 :
x = a['FPARAM'][gd,0]
xr = [2500,tmax]
xt = 'Teff'
y = eelem_err
yr= [0,0.3]
yt = 'Empirical uncertainty'
z = felem_feh[gd]
zr = [-1.5,1.]
zt='[Fe/H]'
xtit.append('empirical uncertainty')
elif iplot == 6 :
x = a['FPARAM'][gd,0]
xr = [2500,tmax]
xt = 'Teff'
y = felem_err
yr= [0,0.3]
yt = 'FERRE uncertainty'
z = felem_feh[gd]
zr = [-1.5,1.]
zt='[Fe/H]'
xtit.append('FERRE uncertainty')
elif iplot == 7 :
x = a['FPARAM'][gd,0]
xr = [2500,tmax]
xt = 'Teff'
if ielem < nelem :
y = a['ELEM_CHI2'][gd,ielem]
else :
y = x*0.
yr= [0,50]
yt = 'ELEM_CHI2'
z = felem_feh[gd]
zr = [-1.5,1.]
zt='[Fe/H]'
xtit.append('CHI2 from element fit')
fig=plt.figure(figsize=(10,8))
ax=fig.add_subplot(111)
if len(x) > 0 :
if len(fgrid) > 0 :
plots.plotc(ax,x[fgrid],y[fgrid],z[fgrid],xr=xr,yr=yr,zr=zr,colorbar=False,size=10,marker='s',yt=yt,xt=xt,zt=zt)
if len(gkgrid) > 0 :
plots.plotc(ax,x[gkgrid],y[gkgrid],z[gkgrid],xr=xr,yr=yr,zr=zr,size=10,marker='o',yt=yt,xt=xt,zt=zt)
if len(mgrid) > 0 :
plots.plotc(ax,x[mgrid],y[mgrid],z[mgrid],xr=xr,yr=yr,zr=zr,size=7,marker='^',yt=yt,xt=xt,zt=zt)
if (iplot == 0 or iplot == 2) :
if res is not None :
clust, = np.where(res['col3'] == ielem)
plots.plotp(ax,res['col4'][clust],res['col9'][clust],xr=xr,yr=yr,size=50,marker='o',facecolors='none',linewidth=1)
# plot the reference star abundance (Arcturus or Vesta)
if ielem < nelem-2 :
#refval = a['ELEM'][ref,ielem]+refoffset
#referr = a['ELEM_ERR'][ref,ielem]
refval = a['X_M'][ref,ielem]
referr = a['X_M_ERR'][ref,ielem]
elif ielem == nelem-2 :
refval = a['PARAM'][ref,3]+refoffset
referr = np.sqrt(a['PARAM_COV'][ref,3,3])
else :
refval = a['PARAM'][ref,6]+refoffset
referr = np.sqrt(a['PARAM_COV'][ref,6,6])
if not usemh: refval -= a['PARAM'][ref,3]
reflit = (refabun(el,dwarf=dwarf)-refabun('Fe',dwarf=dwarf))
plots.plotl(ax,xr,[refval-reflit,refval-reflit],color='r')
# Plot the median of solar abundance stars
cal=np.where(y[solar] > -9000)[0]
med = np.median(y[solar[cal]])
plots.plotl(ax,xr,[med,med],color='y')
plt.text(xr[0]+0.05*(xr[1]-xr[0]),yr[0]+0.85*(yr[1]-yr[0]),'solar metallicity stars: {:4.2f}'.format(med),color='y')
if iplot == 0 :
out.write(el+'{:8.3f} {:8d}\n'.format(med,len(cal)))
# Plot the offset from the optical analysis
opt=optabun(el)
plots.plotl(ax,xr,[opt,opt],color='m')
plt.text(xr[0]+0.05*(xr[1]-xr[0]),yr[0]+0.75*(yr[1]-yr[0]),'optical offset: {:4.2f}'.format(opt),color='m')
# Plot M67 points
#clust=fits.open('../../cal/clust.fits')[1].data
#m67=np.where(np.core.defchararray.find(clust['FIELD'],'M67') >= 0)
#m1, m2 = esutil.numpy_util.match(a['APOGEE_ID'][gd],clust['APOGEE_ID'][m67])
#plots.plotp(ax,x[m1],y[m1],size=6,color='k',xr=xr,yr=yr)
#if iplot == 2 :
# print(m1, x[m1])
if dwarf :
refstar = 'VESTA'
if dcal is not None :
refclust=dcal[ielem] #-dcal[ifeh]
plots.plotl(ax,xr,[refclust,refclust],color='g')
plt.text(xr[0]+0.05*(xr[1]-xr[0]),yr[0]+0.95*(yr[1]-yr[0]),'M67 dwarfs: {:4.2f}'.format(refclust),color='g')
else :
refstar = 'Arcturus'
if gcal is not None :
refclust=gcal[ielem] #-gcal[ifeh]
plots.plotl(ax,xr,[refclust,refclust],color='g')
plt.text(xr[0]+0.05*(xr[1]-xr[0]),yr[0]+0.95*(yr[1]-yr[0]),'M67 giants: {:4.2f}'.format(refclust),color='g')
if dcal is not None :
drefclust=dcal[ielem] #-dcal[ifeh]
plots.plotl(ax,xr,[refclust-drefclust,refclust-drefclust],color='b')
plt.text(xr[0]+0.05*(xr[1]-xr[0]),yr[0]+0.90*(yr[1]-yr[0]),'M67 dwarfs: {:4.2f}'.format(drefclust),color='b')
plt.text(xr[0]+0.05*(xr[1]-xr[0]),yr[0]+0.05*(yr[1]-yr[0]),
refstar+' ASPCAP: {:4.2f}+/-{:4.2f}'.format(refval[0],referr[0])+' lit: '+'{:4.2f}'.format(reflit),color='r')
#plt.show()
plt.savefig(fname+'{:1d}.png'.format(iplot))
plt.close()
file.append(name+'{:1d}.png'.format(iplot))
ytit.append(name)
# plt.show()
files.append(file)
out.close()
html.htmltab(files,ytitle=ytit,file='elem/'+prefix+'elem.html',xtitle=xtit,header=tit)
def elemindex() :
''' Make the HTML pages (assumes plots have already been made) for individual elements '''
a,elem,elemtoh = read()
# loop over elements
nelem=len(elem)
for ielem in range(len(elem)+2) :
if ielem < len(elem) :
el = elem[ielem].strip()
elif ielem == nelem :
el = 'M'
elif ielem == nelem+1 :
el = 'alpha'
ytit=[]
files=[]
for prefix in [ 'g','d' ] :
for suffix in [ '', 'gal' ] :
name=prefix+el
file = [prefix+'1mh'+suffix+el+'2.png',prefix+'1emh'+suffix+el+'2.png',prefix+'1eemh'+suffix+el+'2.png',
prefix+'2mh'+suffix+el+'2.png',prefix+'2emh'+suffix+el+'2.png',prefix+'2eemh'+suffix+el+'2.png',
prefix+'3mh'+suffix+el+'2.png',prefix+'3emh'+suffix+el+'2.png',prefix+'3eemh'+suffix+el+'2.png']
xtit = ['Linear 4000-5250','Linear 3750-5250','Linear 3500-5250',
'Quadratic 4000-5250','Quadratic 3750-5250','Quadratic 3500-5250',
'Cubic 4000-5250','Cubic 3750-5250','Cubic 3500-5250']
files.append(file)
ytit = ['Giants (full)','Giants (70<l<110)','Dwarfs (full)','Dwarfs (70<l<110)']
html.htmltab(files,file='elem/'+el+'.html',xtitle=xtit,ytitle=ytit)
def main() :
''' Make series of plots and web pages for each calibration "type" '''
#files = ['testcal','testcal1mh','testcal2mh','testcal3mh',
# 'testcal1emh','testcal2emh','testcal3emh',
# 'testcal1eemh','testcal2eemh','testcal3eemh']
#dirs = ['../testcal','testcal1mh','testcal2mh','testcal3mh',
# 'testcal1emh','testcal2emh','testcal3emh',
# 'testcal1eemh','testcal2eemh','testcal3eemh']
#suffixes = ['','1mh','2mh','3mh','1emh','2emh','3emh','1eemh','2eemh','3eemh']
files = ['l30e.2']
dirs = ['../cal']
suffixes = ['']
for i in range(len(files)) :
a,e,etoh = read(file='allStar-'+files[i]+'.fits')
gcal = fits.open(dirs[i]+'/giantcal.fits')[2].data['ABUN'][0,:,17]
dcal = fits.open(dirs[i]+'/dwarfcal.fits')[2].data['ABUN'][0,:,17]
for d in [ False, True ] :
if d :
res = ascii.read(dirs[i]+'/dwarfcal.res')
else :
res = ascii.read(dirs[i]+'/giantcal.res')
tmp=etoh # since etoh gets changed for dwarfs
plot(a,e,tmp,suffix=suffixes[i],dwarf=d,gcal=gcal,dcal=dcal,res=None,usemh=True,sn=[200,1000])
plot(a,e,tmp,suffix=suffixes[i]+'gal',dwarf=d,gcal=gcal,dcal=dcal,glon=[70,110],glat=[-5,5],res=None,usemh=True)
#a,e,etoh = read()
#plot(a,e,etoh)
#plot(a,e,etoh,dwarf=True)
def globalscatter(allstar,elems,vscatter=[0,0.2],pm=True,dist=True) :
'''
Compute scatter in clusters
'''
clust=apselect.clustdata()
gd=apselect.select(allstar,badval='STAR_BAD',vscatter=vscatter)
members=[]
print('selecting')
clusts = ['N2420', 'M67', 'N188', 'N7789', 'N6819', 'N6791']
fp=open('global.dat','w')
for cluster in clusts :
j=np.array(apselect.clustmember(allstar[gd],cluster,raw=True,pm=pm,dist=dist))
print(cluster,len(j))
members.append(j)
for jj in j :
fp.write('{:s} {:s} {:8.3f} {:8.1f} {:8.1f} {:8.1f} {:8.2f} {:s}\n'.format(
cluster,allstar['APOGEE_ID'][gd[jj]],allstar['FE_H'][gd[jj]],allstar['TEFF'][gd[jj]],
allstar['SNR'][gd[jj]],allstar['ASPCAP_CHI2'][gd[jj]],
allstar['VSCATTER'][gd[jj]],allstar['STARFLAGS'][gd[jj]]))
fp.close()
iel=0
nels=len(elems[0])+2
fig,ax=plots.multi(2,int(round(nels/2.)),hspace=0.001,wspace=0.001,figsize=(8,10))
plots.event(fig)
plots._data=allstar
plots._id_cols=['APOGEE_ID']
color=['r','g','b','c','m','y']
for iel,el in enumerate(np.append(elems,['M','alpha'])) :
iclust=0
all=np.array([])
ix=iel%2
iy=iel/2
for cluster in clusts :
i=np.where(clust.name == cluster)
mh=clust[i].mh
name=clust[i].name
# get cluster members
j=members[iclust]
if len(j) > 0 :
if el.strip() == 'Fe' :
abun=allstar['X_H'][gd[j],iel]
ok=np.where(((allstar['ELEMFLAG'][gd[j],iel] & 255) == 0) & (allstar['X_H_ERR'][gd[j],iel] < 0.2))[0]
elif el.strip() == 'M' :
abun=allstar['M_H'][gd[j]]
ok=np.where(((allstar['PARAMFLAG'][gd[j],3] & 255) == 0) & (allstar['M_H_ERR'][gd[j]] < 0.2))[0]
elif el.strip() == 'alpha' :
abun=allstar['ALPHA_M'][gd[j]]
ok=np.where(((allstar['PARAMFLAG'][gd[j],6] & 255) == 0) & (allstar['ALPHA_M_ERR'][gd[j]] < 0.2))[0]
else :
abun=allstar['X_M'][gd[j],iel]
ok=np.where(((allstar['ELEMFLAG'][gd[j],iel] & 255) == 0) & (allstar['X_M_ERR'][gd[j],iel] < 0.2) & (allstar['X_M'][gd[j],iel] > -999) )[0]
if len(ok) > 3 :
all=np.append(all,abun[ok]-abun[ok].mean())
plots.plotp(ax[iy,ix],allstar['TEFF'][gd[j[ok]]],abun[ok]-abun[ok].mean(),color=color[iclust],size=10,yr=[-0.5,0.5])
iclust+=1
print('{:s} {:10.3f} {:10.3f} {:d}\n'.format(el, all.mean(), all.std(), len(all)))
ax[iy,ix].text(0.1,0.9,el.strip(),ha='left',va='top',transform=ax[iy,ix].transAxes)
ax[iy,ix].text(0.9,0.9,'{:8.3f}'.format(all.std()),ha='right',va='top',transform=ax[iy,ix].transAxes)
iel+=1
def getabun(data,elems,elemtoh,el,xh=False,terange=[-1,10000],calib=False,line=0) :
'''
Return the abundance of the requested element, given data array, elem array, element
'''
if calib :
param = 'PARAM'
else :
param = 'FPARAM'
if el.strip() == 'M' :
ok=np.where(((data['PARAMFLAG'][:,3] & 255) == 0) & (data['FPARAM_COV'][:,3,3] < 0.2) &
(data['FPARAM'][:,0] >= terange[0]) & (data['FPARAM'][:,0] <= terange[1]) & (data[param][:,3] > -9990.) )[0]
abun = data[param][:,3]
elif el.strip() == 'alpha' :
ok=np.where(((data['PARAMFLAG'][:,6] & 255) == 0) & (data['FPARAM_COV'][:,6,6] < 0.2) &
(data['FPARAM'][:,0] >= terange[0]) & (data['FPARAM'][:,0] <= terange[1]) & (data[param][:,6] > -9990.) )[0]
abun = data[param][:,6]
if xh : abun+=data['FPARAM'][:,3]
else :
iel=np.where(np.core.defchararray.strip(elems) == el.strip())[0][0]
if calib :
if xh :
abun = data['X_H'][:,iel]
abunerr = data['X_H_ERR'][:,iel]
else :
abun = data['X_M'][:,iel]
abunerr = data['X_M_ERR'][:,iel]
else :
if len(data['FELEM'].shape) == 2:
abun = data['FELEM'][:,iel]
abunerr = data['FELEM_ERR'][:,iel]
else :
abun = data['FELEM'][:,line,iel]
abunerr = data['FELEM_ERR'][:,line,iel]
if xh and not elemtoh[iel] : abun+=data['FPARAM'][:,3]
if not xh and elemtoh[iel] : abun-=data['FPARAM'][:,3]
#if el.strip() == 'C' or el.strip() == 'CI' or el.strip() == 'N' :
# # special case for C and N for dwarfs, since those use [M/H] dimension
# try :
# dw = np.where((np.core.defchararray.find(data['ASPCAP_CLASS'],'GKd')>=0) | (np.core.defchararray.find(data['ASPCAP_CLASS'],'Fd')>=0) |
# (np.core.defchararray.find(data['ASPCAP_CLASS'],'Md')>=0))[0]
# except :
# dw = np.where((np.core.defchararray.find(data['CLASS'],'GKd')>=0) | (np.core.defchararray.find(data['CLASS'],'Fd')>=0) |
# (np.core.defchararray.find(data['CLASS'],'Md')>=0))[0]
# if xh : abun[dw]-=data['FPARAM'][dw,3]
# else : abun[dw]-=data['FPARAM'][dw,3]
if calib : badflag = 255
else : badflag = 0
ok=np.where(( (data['ELEMFLAG'][:,iel] & badflag) == 0) &
(abunerr < 0.2) &
(data['FPARAM'][:,0] >= terange[0]) &
(data['FPARAM'][:,0] <= terange[1]) &
(abun > -9990.) )[0]
return abun, ok
def cal(allstar,elems,elemtoh,doels,xh=False,plot=True,sepplot=False,hard=None, maxvisit=100,cal='default',dwarfs=False,inter=False,
errpar=False,calib=False,nx=4,ny=2,maxvscatter=0.2,pm=True,dist=True, lines=False) :
'''
Determine internal calibration relations for elements
Args:
allstar : allStar-like HDUList
elems : list of elems
elemtoh : coresponding list of elemtoh code
Keyword args:
xh : fit in [X/H]? (default=False, i.e. fit in [X/M])
plot : show individual element plots
'''
# select cluster members from array that don't have STAR_BAD into data structure
clusters=apselect.clustdata()
calclusters=['M92','M15','M13','M3','M5','M12','M35','N2420','N188','M67','N7789','Pleiades','N6819','N6791',
'N6397','M55','N3201','N6752','N362','M4','N2808','47TUC']
#calclusters=['N2420','N188','M67','N7789','Pleiades','N6819','N6791']
clusts = clusters.name
types = np.arange(len(clusts))
markers = np.chararray(len(clusts))
colors = np.chararray(len(clusts))
markers[np.where(clusters.mh > -999)[0]] = 's'
markers[np.where(clusters.mh < -1)[0]] = 'o'
markers[np.where(clusters.mh > 0)[0]] = '^'
allcol=['r','g','b','c','m','y']
for i in range(len(colors)) : colors[i] = allcol[i%6]
if dwarfs :
logg=[3.8,5.5]
reject=0.25
glon=[0,360]
else :
logg=[-1,3.8]
reject=0.15
glon=[70,110]
#solar=apselect.select(allstar[1].data,badval='STAR_BAD',badtarg=['YOUNG','EMBEDDED','EMISSION','EXTENDED'],
# raw=True,logg=logg,glon=glon,glat=[-5,5],sn=[200,10000])
#solar=apselect.select(allstar[1].data,badval='STAR_BAD',badtarg=['YOUNG','EMBEDDED','EMISSION','EXTENDED'],
# raw=True,logg=logg,sn=[200,10000],maxdist=500.)
solar=apselect.select(allstar[1].data,badval='STAR_BAD',badtarg=['YOUNG','EMBEDDED','EMISSION','EXTENDED'],
raw=True,logg=logg,sn=[200,10000])
try :
gd=np.where((allstar[1].data['gaia_parallax_error'][solar]/abs(allstar[1].data['gaia_parallax'][solar]) < 0.1) )[0]
solar=solar[gd]
distance = 1000./allstar[1].data['gaia_parallax'][solar]
x,y,z,r=lbd2xyz(allstar[1].data['GLON'][solar],allstar[1].data['GLAT'][solar],distance/1000.)
gd = np.where((abs(z) < 0.5) & (r>8) & (r<9))[0]
solar=solar[gd]
except:
print('no distance information available for solar sample, using glon/glat')
solar=apselect.select(allstar[1].data,badval='STAR_BAD',badtarg=['YOUNG','EMBEDDED','EMISSION','EXTENDED'],
raw=True,logg=logg,glon=glon,glat=[-5,5],sn=[200,10000])
gd=apselect.select(allstar[1].data,badval='STAR_BAD',raw=True,logg=logg)
print('ngd: ',len(gd))
print('nsolar: ',len(solar))
try :
v=np.where(allstar[1].data['VISIT'][gd]<= maxvisit)[0]
gd=gd[v]
except :
print('VISIT keyword does not exist')
# preselect with fast HTM method at largest cluster radius
try:
print('pre-selecting cluster members using HTM')
h=esutil.htm.HTM()
maxrad=clusters['rad'].max()
m1,m2,rad=h.match(clusters['ra'],clusters['dec'],allstar[1].data['RA'][gd],allstar[1].data['DEC'][gd],maxrad,maxmatch=500)
gd=gd[m2]
except :
pass
# now select per cluster
print('selecting cluster members')
all=[]
for cluster in clusts :
if cluster in calclusters :
#clustdir=os.environ['APOGEE_REDUX']+'/r12/stars/junk/'
#if clustdir :
# stars=ascii.read(clustdir+'/'+cluster+'.txt',names=['APOGEE_ID'],format='no_header')
# jsaved,j2 = match.match(allstar[1].data[gd]['APOGEE_ID'],stars['APOGEE_ID'])
j=apselect.clustmember(allstar[1].data[gd],cluster,raw=True,firstgen=True,firstpos=False,logg=logg,
pm=pm,dist=dist)
#print(cluster,len(j),len(jsaved))
if len(j) < 1 :
j=apselect.clustmember(allstar[1].data[gd],cluster,raw=True,logg=logg,pm=pm,dist=dist)
all=set(all).union(gd[j].tolist())
data=allstar[1].data[list(all)]
# in the abbreviated array, get the lists of cluster members
members=[]
fig,ax=plots.multi(1,1,figsize=(16,8))
for label in ax.axes.get_xticklabels():
label.set_visible(False)
for label in ax.axes.get_yticklabels():
label.set_visible(False)
iplot=0
for iclust,cluster in enumerate(clusts) :
if cluster in calclusters :
ax.scatter((iplot//12)*0.1+0.25,12-iplot%12,marker=markers[iclust],color=colors[iclust])
ax.text((iplot//12)*0.1+0.26,12-iplot%12,clusts[iclust]+' ( '+str(clusters[iclust].mh)+')',color=colors[iclust],va='center')
ax.set_xlim(0.23,0.8)
j=apselect.clustmember(data,clusts[iclust],raw=True,firstgen=True,firstpos=False,logg=logg,pm=pm,dist=dist)
if len(j) < 1 :
j=apselect.clustmember(data,clusts[iclust],raw=True,logg=logg, pm=pm, dist=dist)
iplot+=1
else :
j=[]
# members is a list of lists of cluster members
members.append(j)
if hard is not None :
fig.savefig(hard+'clust_key.png')
fig.savefig(hard+'clust_key.pdf')
plt.close(fig)
# setup output structured array
rec = np.zeros(len(doels),dtype=[
('elem','S5'),
('elemfit','i4'),
('mhmin','f4'),
('te0','f4'),
('temin','f4'),
('temax','f4'),
('femin','f4'),
('femax','f4'),
('caltemin','f4'),
('caltemax','f4'),
('extfit','i4'),
('extpar','3f4'),
('clust','{:1d}S16'.format(len(clusts))),
('par','3f4'),
('abun','{:1d}f4'.format(len(clusts))),
('nstars','{:1d}i4'.format(len(clusts))),
('mean','{:1d}f4'.format(len(clusts))),
('rms','{:1d}f4'.format(len(clusts))),
('rmsgd','{:1d}f4'.format(len(clusts))),
('rawmean','{:1d}f4'.format(len(clusts))),
('errpar','4f4'),
])
# empirical scatter bin setup: these are bin left edges
if dwarfs :
dmhbin=3.
mhbins=np.arange(-2.25,0.75,dmhbin)
nerrfit=2
xr=[3000,7500]
else :
dmhbin=0.5
mhbins=np.arange(-2.25,0.75,dmhbin)
nerrfit=3
xr=[3000,5500]
dteffbin=250
teffbins=np.arange(3500,6000,dteffbin)
dsnbin=50
snbins=np.arange(50,250,dsnbin)
# plot setup
if plot and not sepplot :
fig,ax = plots.multi(nx,ny,hspace=0.001,wspace=0.5,figsize=(18,6))
# plot setup for summary all-element plots
if plot and len(doels) > 2 :
nels=0
for el in doels :
# parameters for the fit for this element
if cal == 'dr13' :
pars = dr13cal(el,dwarfs=dwarfs)
elif cal == 'dr14' :
pars = dr14cal(el,dwarfs=dwarfs)
else :
pars = defaultcal(el,dwarfs=dwarfs)
if pars['elemfit'] >=0 : nels+=1
allfig,allax=plots.multi(2,(nels-1)/2+1,hspace=0.001,wspace=0.3,figsize=(12,18))
if len(solar) > 0 : allsolarfig,allsolarax=plots.multi(2,(nels-1)/2+1,hspace=0.001,wspace=0.3,figsize=(12,18))
if errpar :
errfig,errax=plots.multi(len(snbins),len(doels),hspace=0.001,wspace=0.001,figsize=(3*len(snbins),2*len(doels)))
# loop over all the elements!
iel=0
#iplot=0
grid=[]
yt=[]
for iplot,el in enumerate(doels) :
if lines :
jelem = np.where(allstar[3].data['ELEM_SYMBOL'][0] == el)[0]
nlines = len(np.where(allstar[3].data['FELEM_WIND'][0][0,:,jelem] > 0)[0])
if nlines > 0 :
linefig,lineax=plots.multi(2,nlines+1,hspace=0.001,wspace=0.4,figsize=(10,18))
else : nlines = 0
# parameters for the fit for this element
if cal == 'dr13' :
pars = dr13cal(el,dwarfs=dwarfs)
elif cal == 'dr14' :
pars = dr14cal(el,dwarfs=dwarfs)
elif cal == 'dr16' :
pars = dr16cal(el,dwarfs=dwarfs)
else :
pars = defaultcal(el,dwarfs=dwarfs)
pars['clust'] = np.array(clusts,dtype='S16')
pars['abun'] = np.zeros(len(clusts))
pars['par'] = np.zeros(3)
pars['elem'] = el
pars['errpar'] = np.zeros(4)
elemfit = pars['elemfit']
while elemfit >= 0 :
# get the good abundance data for this element, load variables for fit (teff, abun, clust)
abundata, ok = getabun(data,elems,elemtoh,el,xh=xh,calib=calib)
snr=np.clip(data['SNR'],0.,snbins[-1]+dsnbin-0.1)
print(el,pars['elemfit'],pars['mhmin'],len(ok))
# get cluster members
ind=np.array([],dtype=int)
clust=np.array([],dtype='S16')
apogee_id=np.array([],dtype='S16')
jclust=[]
for iclust,cluster in enumerate(clusts) :
#if cluster in calclusters :
i=np.where(clusters.name == clusts[iclust])
# get cluster members: intersection of all cluster members and good ones for this element
j=list(set(ok).intersection(members[iclust]))
jclust.append(j)
if clusters[i].mh > pars['mhmin'] and len(j) > 3 :
# ind has the indices of all stars above the [M/H] threshold and good abundances
ind=np.append(ind,j)
clust=np.append(clust,[clusts[iclust]]*len(j))
for iline in range(1+nlines) :
abundata, ok = getabun(data,elems,elemtoh,el,xh=xh,calib=calib,line=iline)
teff=data['FPARAM'][ind,0]
mh=data['FPARAM'][ind,3]
vscatter=data['VSCATTER'][ind]
abun=abundata[ind]
try :
visit=data['VISIT'][ind]
except :
visit = np.zeros(len(ind))
# only use visits=0 and vscatter<maxvscatter[gd] for fit, but we'll plot all
gd=np.where((visit == 0) & (vscatter<maxvscatter) & (teff>=pars['temin']) & (teff<=pars['temax']))[0]
bd=np.where((visit > 0) | (vscatter>=maxvscatter) | (teff<pars['temin']) | (teff>pars['temax']))[0]
if len(gd) > 2 :
print(el,len(ind))
for iter in range(2) :
print(el,iter,len(gd),pars['temin'],pars['temax'])
deriv=calderiv(teff[gd]-pars['te0'],abun[gd],clust[gd],order=pars['elemfit'])
soln,inv = fit.linear(abun[gd],deriv)
nclust = len(np.unique(clust[gd]))
pars['clust'] = np.sort(np.unique(clust[gd]))
pars['par'][0:pars['elemfit']] = soln[nclust:len(soln)]
pars['abun'] = soln[0:nclust]
func=calfunc(pars,teff,mh,abun,clust,order=pars['elemfit'],extcal=False)
if iter == 0 :
res=abun-func
gd=np.where((visit == 0) & (vscatter<maxvscatter) & (teff>=pars['temin']) & (teff<=pars['temax']) & (abs(res) <= reject))[0]
tmpreject=reject
while len(gd) < 10 and tmpreject<reject*8 :
tmpreject*=2.
gd=np.where((visit == 0) & (vscatter<maxvscatter) & (teff>=pars['temin']) & (teff<=pars['temax']) & (abs(res) <= tmpreject))[0]
bd=np.where((visit > 0) | (vscatter>=maxvscatter) | (teff<pars['temin']) | (teff>pars['temax']) | (abs(res) > tmpreject))[0]
print('\nGlobal {:<8s} {:8.3f} (summed) {:8.3f} (with 3 visits)'.format(el, (abun[gd]-func[gd]).std(), (abun[bd]-func[bd]).std()))
# loop through all clusters and determine mean and scatter for each cluster, and accumulate
# data for scatter as f([M/H],Teff,S/N)
print(' Clusters: mean std (cal) mean std (raw)')
rmsdata=[]
rmsderiv=[]
if errpar and iline == 0 and hard is not None:
f=open(hard+el.strip()+'_err_obj.dat','w')
fc=open(hard+el.strip()+'_err_clust.dat','w')
tedata=[]
sndata=[]
mhdata=[]
val=[]
for iclust,cluster in enumerate(clusts) :
if cluster in calclusters and len(jclust[iclust])>3 :
j=np.array(jclust[iclust])
try:
cgd=np.where((data['VISIT'][j] == 0) & (data['VSCATTER'][j]<maxvscatter) &
(data['FPARAM'][j,0]>=pars['temin']) & (data['FPARAM'][j,0]<=pars['temax']))[0]
except:
cgd=np.where((data['VSCATTER'][j]<maxvscatter) & (data['FPARAM'][j,0]>=pars['temin']) & (data['FPARAM'][j,0]<=pars['temax']))[0]
if len(gd) > 1 :
rmsgd = (abundata[j[cgd]]-calfunc(pars,data['FPARAM'][j[cgd],0],data['FPARAM'][j[cgd],3],abundata[j[cgd]],''*len(j),order=pars['elemfit'])).std()
else :
rmsgd=-1.
rec['rms'][iel,iclust] = (abundata[j]-calfunc(pars,data['FPARAM'][j,0],data['FPARAM'][j,3],abundata[j],''*len(j),order=pars['elemfit'])).std()
rec['rmsgd'][iel,iclust] = rmsgd
rec['mean'][iel,iclust] = (abundata[j]-calfunc(pars,data['FPARAM'][j,0],data['FPARAM'][j,3],abundata[j],''*len(j),order=pars['elemfit'])).mean()
rec['rawmean'][iel,iclust] = abundata[j].mean()
rec['nstars'][iel,iclust] = len(j)
print(' {:<10s}{:8.3f}{:8.3f}{:8.3f}{:6d}{:6d}{:8.3f}{:8.3f}'.format(
clusts[iclust],rec['mean'][iel,iclust],rec['rms'][iel,iclust],rmsgd,rec['nstars'][iel,iclust],
len(cgd),abundata[j].mean(),abundata[j].std()))
# empirical uncertainties
if errpar and iline==0 :
tedata.extend(data['FPARAM'][j,0])
sndata.extend(snr[j])
mhdata.extend(data['FPARAM'][j,3])
val.extend(abundata[j]-rec['mean'][iel,iclust])
if hard is not None:
for jj in j :
f.write('{:8.1f}{:8.2f}{:8.2f}{:8.3f}{:8.1f} {:s} {:s}\n'.format(
data['FPARAM'][jj,0],snr[jj],data['FPARAM'][jj,3],abundata[jj]-rec['mean'][iel,iclust],
rec['mean'][iel,iclust],clusts[iclust],data['APOGEE_ID'][jj]))
i=np.where(clusters.name == clusts[iclust])
for mhbin in mhbins :
if (clusters[i].mh > mhbin) and (clusters[i].mh <= mhbin+dmhbin) :
for teffbin in teffbins :
for snbin in snbins :
ibin = np.where(( data['FPARAM'][j,0] > teffbin) & (data['FPARAM'][j,0] <= teffbin+dteffbin) &
( snr[j] > snbin) & (snr[j] <= snbin+dsnbin) & (abs(abundata[j]-rec['mean'][iel,iclust]) < 0.3) )[0]
if len(ibin) > 3 :
if not np.isfinite(np.log(abundata[np.array(j)[ibin]].std())) :
pdb.set_trace()
rmsdata.append(np.log(abundata[np.array(j)[ibin]].std()))
if dwarfs :
rmsderiv.append([1.,teffbin+dteffbin/2.-4500.,snbin+dsnbin/2.-100.])
else :
rmsderiv.append([1.,teffbin+dteffbin/2.-4500.,snbin+dsnbin/2.-100.,mhbin+dmhbin/2.])
if hard is not None:
fc.write('{:8.1f}{:8.2f}{:8.2f}{:8.2f}{:5d}{:8.3f} {:s}\n'.format(
teffbin+dteffbin/2.,snbin+dsnbin/2.,mhbin+dmhbin/2.,clusters[i].mh[0],len(ibin),abundata[np.array(j)[ibin]].std(),clusts[iclust]))
iplt = np.where(snbins == snbin)[0][0]
plots.plotc(errax[iel,iplt],clusters[i].mh,teffbin+dteffbin/2.,abundata[np.array(j)[ibin]].std(),
size=30,zr=[0,0.1],xr=[-2.5,0.5],yr=[3500,5500],linewidth=1)
if errpar and iline==0 :
if hard is not None:
f.close()
fc.close()
#empirical uncertainties
rmsdata=np.array(rmsdata)
rmsderiv=np.array(rmsderiv)
if len(rmsdata) > 5 :
soln,inv = fit.linear(rmsdata,rmsderiv.transpose())
y, x = np.mgrid[3500:5500:200j,-2.5:0.5:200j]
for iplt in range(len(snbins)) :
sn = snbins[iplt]+dsnbin/2.
errax[iel,iplt].imshow(elemerr(soln,y-4500.,sn-100.,x),extent=[-2.5,0.5,3500,5500], aspect='auto',vmin=0,vmax=0.1, origin='lower',cmap='rainbow')
errax[iel,iplt].text(0.98,0.98,el+' S/N={:4.0f}'.format(sn),va='top',ha='right',transform=errax[iel,iplt].transAxes)
pars['errpar'] = soln
# send all points to generic errfit function (not rms within each bin) for alternative approach and to get plots
try:
soln2 = err.errfit(np.array(tedata),np.array(sndata),np.array(mhdata),np.array(val),out=hard+el.strip(),mkhtml=False)
grid.append([os.path.basename(hard+el.strip()+'_err.png'),os.path.basename(hard+el.strip()+'_err_sn.png')])
yt.append(el.strip())
except:
print('errfit failed: ',el)
# get calibrated values before external calibration
func_cal=calfunc(pars,teff,abun,mh,clust,order=pars['elemfit'],extcal=False)
func_uncal=calfunc(pars,teff,abun,mh,clust,order=0,extcal=False)
# get the abundances of the "solar circle" stars
if len(solar) > 0 and len(doels) > 2 :
solar_teff=allstar[1].data['FPARAM'][solar,0]
solar_mh=allstar[1].data['FPARAM'][solar,3]
solar_abun,solar_ok= getabun(allstar[1].data[solar],elems,elemtoh,el,xh=xh,calib=calib)
solar_func=calfunc(pars,solar_teff,solar_mh,solar_abun,np.array(['']*len(solar_teff)),order=pars['elemfit'],calib=calib)
# get mean and scatter of solar metallicity stars, rejecting points more than 0.2 from mean
ss=np.where((solar_mh[solar_ok] > -0.05) & (solar_mh[solar_ok] < 0.05) &
(solar_teff[solar_ok] > pars['temin']) & (solar_teff[solar_ok] < pars['temax']))[0]
median=np.median(solar_abun[solar_ok[ss]]-solar_func[solar_ok[ss]])
ss=np.where((solar_mh[solar_ok] > -0.05) & (solar_mh[solar_ok] < 0.05) &
(solar_teff[solar_ok] > pars['temin']) & (solar_teff[solar_ok] < pars['temax']) &
(np.abs(solar_abun[solar_ok]-solar_func[solar_ok])<0.2))[0]
std=(solar_abun[solar_ok[ss]]-solar_func[solar_ok[ss]]).std()
if pars['extfit'] == 4 :
pars['extpar'] = np.array([median,0.,0.])
median_uncal=np.median(solar_abun[solar_ok[ss]])
std_uncal=solar_abun[solar_ok[ss]].std()
if pars['extfit'] == 10 :
j=np.where(rec['nstars'][iel]>0)[0]
pars['extpar'][0] = np.median(rec['mean'][iel][j]-clusters[j].mh)
elif pars['extfit'] == 11 :
j=np.where((clusters.mh < -1) & (rec['nstars'][iel]>0))[0]
pars['extpar'][0] = np.median(rec['mean'][iel][j]-clusters[j].mh)
j=np.where((clusters.mh > -0.5) & (rec['nstars'][iel]>0))[0]
pars['extpar'][1] = np.median(rec['mean'][iel][j]-clusters[j].mh)
# make plots!
if plot :
if sepplot :
fig,ax = plots.multi(nx,ny,hspace=0.001,wspace=0.5,figsize=[12,6])
fig1,ax1 = plots.multi(1,1,figsize=[12,4])
fig2,ax2 = plots.multi(1,1,figsize=[12,4])
else :
for iy in range(ny) :
for ix in range(nx) :
ax[iy,ix].cla()
if iline == 0 :
#after calibration
plots.plotp(ax[0,0],teff[gd],abun[gd]-func_cal[gd], typeref=clust[gd],yr=[-0.29,0.29],xr=xr,
types=clusts,color=colors,marker=markers,size=16,yt=el)
plots.plotp(ax[0,0],teff[bd],abun[bd]-func_cal[bd],typeref=clust[bd],yr=[-0.29,0.29],xr=xr,
types=clusts,color=colors,marker=markers,size=16,facecolors='none',linewidths=0.2)
ax[0,0].text(0.98,0.98,'{:5.3f}'.format((abun[gd]-func_cal[gd]).std()),transform=ax[0,0].transAxes,va='top',ha='right')
#before calibration
plots.plotp(ax[1,0],teff[gd],abun[gd]-func_uncal[gd],typeref=clust[gd],yr=[-0.29,0.29],xr=xr,
types=clusts,color=colors,marker=markers,size=16,xt='Teff',yt=el)
plots.plotp(ax[1,0],teff[bd],abun[bd]-func_uncal[bd],typeref=clust[bd],yr=[-0.29,0.29],xr=xr,
types=clusts,color=colors,marker=markers,size=16,facecolors='none',linewidths=0.2)
if sepplot:
plots.plotp(ax1,teff[gd],abun[gd]-func_uncal[gd],typeref=clust[gd],yr=[-0.29,0.29],xr=xr,
types=clusts,color=colors,marker=markers,size=16,xt='Teff',yt=el)
plots.plotp(ax1,teff[bd],abun[bd]-func_uncal[bd],typeref=clust[bd],yr=[-0.29,0.29],xr=xr,
types=clusts,color=colors,marker=markers,size=16,facecolors='none',linewidths=0.2)
ax[1,0].text(0.98,0.98,'{:5.3f}'.format((abun[gd]-func_uncal[gd]).std()),transform=ax[1,0].transAxes,va='top',ha='right')
# figure with all elements on same plot
if len(doels) > 2 :
if iline == 0 :
plots.plotp(allax[iplot//2,iplot%2],teff[gd],abun[gd]-func_uncal[gd],typeref=clust[gd],yr=[-0.29,0.29],xr=[3500,5500],
types=clusts,color=colors,marker=markers,size=8,xt='Teff',yt=el)
plots.plotp(allax[iplot//2,iplot%2],teff[bd],abun[bd]-func_uncal[bd],typeref=clust[bd],yr=[-0.29,0.29],xr=[3500,5500],
types=clusts,color=colors,marker=markers,size=8,facecolors='none',linewidths=0.2)
allax[iplot//2,iplot%2].text(0.98,0.98,'{:5.3f}'.format(
(abun[gd]-func_uncal[gd]).std()),transform=allax[iplot//2,iplot%2].transAxes,va='top',ha='right')
m67 = np.where(clusts == 'M67')[0][0]
allax[iplot//2,iplot%2].text(0.98,0.75,'{:5.3f}'.format(
rec['rms'][iel,m67]),transform=allax[iplot//2,iplot%2].transAxes,va='top',ha='right',color='r')
allax[iplot//2,iplot%2].yaxis.set_major_locator(MultipleLocator(0.2))
allax[iplot//2,iplot%2].yaxis.set_minor_locator(MultipleLocator(0.05))
label = allax[iplot//2,iplot%2].yaxis.get_label()
if len(label.get_text()) < 5 : label.set_rotation(0)
if nlines > 0 :
plots.plotp(lineax[iline,0],teff[gd],abun[gd]-func_uncal[gd],typeref=clust[gd],yr=[-0.29,0.29],xr=xr,
types=clusts,color=colors,marker=markers,size=16,xt='Teff',yt=el)
plots.plotp(lineax[iline,0],teff[bd],abun[bd]-func_uncal[bd],typeref=clust[bd],yr=[-0.29,0.29],xr=xr,
types=clusts,color=colors,marker=markers,size=16,facecolors='none',linewidths=0.2)
plots.plotp(lineax[iline,1],teff[gd],abun[gd]-func_uncal[gd],typeref=clust[gd],yr=[-2.,2.],xr=xr,
types=clusts,color=colors,marker=markers,size=16,xt='Teff',yt=el)
plots.plotp(lineax[iline,1],teff[bd],abun[bd]-func_uncal[bd],typeref=clust[bd],yr=[-2,2],xr=xr,
types=clusts,color=colors,marker=markers,size=16,facecolors='none',linewidths=0.2)
if iline > 0 :
w=np.squeeze(allstar[3].data['FELEM_WIND'][0][:,iline-1,jelem])
lineax[iline,0].text(0.05,0.8,'{:8.2f}-{:8.2f} {:8.2f}'.format(w[0],w[1],w[2]),transform=lineax[iline,0].transAxes,fontsize=10)
lineax[iline,1].text(0.05,0.8,'{:8.2f}-{:8.2f} {:8.2f}'.format(w[0],w[1],w[2]),transform=lineax[iline,1].transAxes,fontsize=10)
# stuff for interactive plots
plots._id_cols=['APOGEE_ID','VISIT']
plots._id_cols=['APOGEE_ID']
plots._data=data[ind]
plots._data_x=teff
plots._data_y=abun-func
# plot fits
x=np.linspace(pars['caltemin'],pars['caltemax'],200)
func=calfunc(pars,x,x*0.,x*0,np.array(['']*len(x)),order=pars['elemfit'],extcal=False)
plots.plotl(ax[1,0],x,func)
if sepplot: plots.plotl(ax1,x,func)
if len(doels) > 2 :
# figure with all elements on same plot
if iline==0 : plots.plotl(allax[iplot//2,iplot%2],x,func)
# solar circle stars
if iline==0 and len(solar) > 0 :
plots.plotc(allsolarax[iplot//2,iplot%2],solar_teff[solar_ok],solar_abun[solar_ok]-solar_func[solar_ok],solar_mh[solar_ok],
xr=xr,yr=[-0.5,0.5],zr=[-1,0.5],xt='Teff',yt=el)
plots.plotl(allsolarax[iplot//2,iplot%2],[pars['temin'],pars['temax']],[median,median],color='k')
plots.plotl(allsolarax[iplot//2,iplot%2],xr,[median,median],color='k',ls=':')
allsolarax[iplot//2,iplot%2].text(0.98,0.98,'{:5.3f}'.format(std),ha='right',va='top',transform=allsolarax[iplot//2,iplot%2].transAxes)
allsolarax[iplot//2,iplot%2].text(0.98,0.02,'{:5.3f}'.format(median),ha='right',va='bottom',transform=allsolarax[iplot//2,iplot%2].transAxes)
label = allsolarax[iplot//2,iplot%2].yaxis.get_label()
if len(label.get_text()) < 5 : label.set_rotation(0)
plots.plotc(ax[0,2],solar_teff[solar_ok],solar_abun[solar_ok]-solar_func[solar_ok],solar_mh[solar_ok],xr=xr,yr=[-0.5,0.5],zr=[-1,0.5])
plots.plotl(ax[0,2],xr,[median,median],color='orange')
ax[0,2].text(0.98,0.98,'{:5.3f}'.format(std),ha='right',va='top',transform=ax[0,2].transAxes)
ax[0,2].text(0.98,0.02,'{:5.3f}'.format(median),ha='right',va='bottom',transform=ax[0,2].transAxes)
plots.plotc(ax[0,3],solar_mh[solar_ok],solar_abun[solar_ok]-solar_func[solar_ok],solar_teff[solar_ok],yr=[-0.5,0.5],zr=xr)
#uncalibrated
plots.plotc(ax[1,2],solar_teff[solar_ok],solar_abun[solar_ok],solar_mh[solar_ok],xr=xr,yr=[-0.5,0.5],zr=[-1,0.5])
plots.plotl(ax[1,2],xr,[median_uncal,median_uncal],color='orange')
ax[1,2].text(0.98,0.98,'{:5.3f}'.format(std_uncal),ha='right',va='top',transform=ax[1,2].transAxes)
ax[1,2].text(0.98,0.02,'{:5.3f}'.format(median_uncal),ha='right',va='bottom',transform=ax[1,2].transAxes)
plots.plotc(ax[1,3],solar_mh[solar_ok],solar_abun[solar_ok],solar_teff[solar_ok],yr=[-0.5,0.5],zr=xr)
if xh or el == 'M' :
gdplt=np.where(rec['nstars'][iel]>0)[0]
plots.plotp(ax[0,1],clusters[gdplt].mh,rec['rawmean'][iel][gdplt]-clusters[gdplt].mh,
typeref=clusters[gdplt].name,types=clusts,color=colors,marker=markers,size=16,
xr=[-2.5,0.5],yr=[-0.6,0.6],xt='Lit [M/H]',yt='ASPCAP-lit [M/H]',yerr=rec['rms'][iel])
plots.plotp(ax[1,1],clusters[gdplt].mh,rec['mean'][iel][gdplt]-clusters[gdplt].mh,
typeref=clusters[gdplt].name,types=clusts,color=colors,marker=markers,size=16,
xr=[-2.5,0.5],yr=[-0.6,0.6],xt='Lit [M/H]',yt='ASPCAP-lit [M/H]',yerr=rec['rms'][iel])
if sepplot :
plots.plotp(ax2,clusters[gdplt].mh,rec['mean'][iel][gdplt]-clusters[gdplt].mh,
typeref=clusters[gdplt].name,types=clusts,color=colors,marker=markers,size=16,
xr=[-2.5,0.5],yr=[-0.6,0.6],xt='Lit [M/H]',yt='ASPCAP-lit [M/H]',yerr=rec['rms'][iel])
ax2.plot([-2.5,-1.0],[0.108797,0.108797],color='k')
ax2.plot([-1.0,-0.5],[0.108797,-0.0272657],color='k')
ax2.plot([-0.5,0.5],[-0.0272657,-0.0272657],color='k')
else :
gdplt=np.where(rec['nstars'][iel]>0)[0]
plots.plotp(ax[0,1],clusters[gdplt].mh,rec['rawmean'][iel][gdplt],
typeref=clusters[gdplt].name,types=clusts,color=colors,marker=markers,size=16,
xr=[-2.5,0.5],yr=[-0.6,0.6],xt='Lit [M/H]',yt='ASPCAP-lit [M/H]',yerr=rec['rms'][iel])
plots.plotp(ax[1,1],clusters[gdplt].mh,rec['mean'][iel][gdplt],
typeref=clusters[gdplt].name,types=clusts,color=colors,marker=markers,size=16,
xr=[-2.5,0.5],yr=[-0.6,0.6],xt='Lit [M/H]',yt='ASPCAP',yerr=rec['rms'][iel])
plots.event(fig)
#if iline == nlines : iplot+=1
#if not sepplot and cal != 'inter' : pdb.set_trace()
if iline == nlines and hard is not None :
fig.savefig(hard+el.strip()+'.png')
plt.close(fig)
if sepplot:
fig1.savefig(hard+el+'.pdf')
fig2.savefig(hard+el+'_lit.pdf')
plt.close(fig1)
plt.close(fig2)
if nlines > 0 :
linefig.savefig(hard+el+'_lines.png')
linefig.savefig(hard+el+'_lines.pdf')
plt.close(linefig)
if inter :
# with interactive options, can adjust fit order and limits and redo
plt.draw()
plt.pause(1)
print('elemfit: ',elemfit)
s = raw_input('enter new elemfit (-1 to continue to next element, l for new fit limits): ')
try:
elemfit = int(s)
except:
s = raw_input('enter new lower and upper fit limits in Teff: ')
pars['temin'] = int(s.split()[0])
pars['temax'] = int(s.split()[1])
if elemfit >=0 : pars['elemfit'] = elemfit
else :
elemfit = -1
# transfer results for this element to output summary array
for key in ['elem','elemfit','mhmin','te0','temin','temax','caltemin','caltemax','extfit','extpar','clust','abun','par','errpar'] :
print(key, pars[key], pars['elem'], pars['elemfit'])
if key == 'clust' or key == 'abun' or key == 'errpar':
n=len(pars[key])
rec[iel][key][0:n]=pars[key]
elif key == 'par' :
# reverse for aspcap_correct
rec[iel][key][:]=pars[key][::-1]
elif key == 'extpar' :
print(pars[key])
rec[iel][key][:]=pars[key][:]
else :
rec[iel][key]=pars[key]
rec[iel]['femin'] = -99.999
rec[iel]['femax'] = 99.999
iel+=1
#if plot and iplot%2 == 1 :
if plot and len(doels)%2 == 1 :
allax[iplot//2,iplot%2].set_visible(False)
ticklabels = allax[iplot//2-1,iplot%2].get_xticklabels()
plt.setp(ticklabels, visible=True)
if plot and hard is not None and len(doels) > 2:
allfig.savefig(hard+'all.png')
if len(solar) > 0 : allsolarfig.savefig(hard+'allsolar.png')
if errpar and hard is not None :
try: html.htmltab(grid,ytitle=yt,file=hard+'err_all.html')
except: pass
errfig.savefig(hard+'err_all.png')
plt.close(errfig)
return rec
def calfunc(pars,teff,mh,abun,clust,order=1,calib=False,extcal=True) :
'''
Apply calibration function. If clust is not '', then include the mean abundance for the cluster as determined from the fit,
otherwise only apply the temperature correction
'''
npts=len(teff)
func=np.zeros([npts])
# if we are given clusters that are not part of the calibration, set them to -999
j=np.where(clust != '')[0]
func[j]=-999.
# start with the cluster mean abundances if requested
for iclust in range(len(pars['clust'])) :
j=np.where(clust == pars['clust'][iclust].strip())[0]
func[j] = pars['abun'][iclust]
# add the temperature terms, truncating at temin and temax
if calib == False :
if order >= 1:
temp=copy.copy(teff)
bd=np.where(temp < pars['temin'])[0]
temp[bd]=pars['temin']
bd=np.where(temp > pars['temax'])[0]
temp[bd]=pars['temax']
for iorder in range(0,order) :
func += pars['par'][iorder]*(temp-pars['te0'])**(iorder+1)
if extcal :
if pars['extfit'] == 4 :
func += pars['extpar'][0]
elif pars['extfit'] == 10 :
func += pars['extpar'][0]+pars['extpar'][1]*mh+pars['extpar'][2]*mh**2
elif pars['extfit'] == 11 :
mhclip=np.clip(mh,-1.,-0.5)
func += pars['extpar'][0] + (mhclip-(-1.))*(pars['extpar'][1]-pars['extpar'][0])/0.5
return func
def calderiv(teff,abun,clust,order=1) :
'''
Function/derivatives for abundance calibration
'''
uclust=np.sort(np.unique(clust))
npar=order+len(uclust)
npts=len(teff)
deriv=np.zeros([npar,npts])
for iclust in range(len(uclust)) :
j=np.where(clust == uclust[iclust])[0]
deriv[iclust,j] = 1.
if order >= 1:
for iorder in range(0,order) :
deriv[len(uclust)+iorder,:] = teff**(iorder+1)
return deriv
def defaultcal(el,dwarfs=False) :
'''
Return default parameters for abundance calibrtion
'''
te0=4500
temin=4000
if dwarfs : temax=6000
else : temax=5000
elemfit=1
extfit=0
caltemin=3532.5
caltemax=6500
extpar=[0.,0.,0.]
mhmin=-1
return {'elemfit': elemfit, 'mhmin' : mhmin, 'te0': te0, 'temin': temin, 'temax': temax,
'caltemin': caltemin, 'caltemax' : caltemax, 'extfit' : extfit, 'extpar' : np.array(extpar)}
def dr16cal(el,dwarfs=False) :
'''
Return default parameters for abundance calibrtion
'''
te0=4500
# values for WARN and to use for fits, if any
temin=0
if dwarfs : temax=100000
else : temax=10000
# default method/order for fit with Teff (0=none)
elemfit=0
# default method for zeropoint (4=solar neighborhood)
extfit=4
# values for BAD, i.e. no calibration
caltemin=3032.5
caltemax=7500
extpar=[0.,0.,0.]
# minimum metallicity to use in clusters
mhmin=-1
if el.strip() == 'Ge' : elemfit=-1
if el.strip() == 'Rb' : elemfit=-1
if el.strip() == 'Nd' : elemfit=-1
if el.strip() == 'Yb' : elemfit=-1
if not dwarfs :
if el.strip() == 'C' :
extfit=0
if el.strip() == 'CI' :
extfit=0
if el.strip() == 'N' :
extfit=0
if el.strip() == 'O' :
temax=5000
if el.strip() == 'Na' :
temin=3750
elif el.strip() == 'Al' :
temin=3400
elif el.strip() == 'K' :
temin=3900
elif el.strip() == 'P' :
temax=6000
elif el.strip() == 'Ti' :
temin=4200
elif el.strip() == 'TiII' :
temin=4000
elif el.strip() == 'V' :
temax=4800
elif el.strip() == 'Mn' :
temin=4000
elif el.strip() == 'Fe' :
extfit=0
elif el.strip() == 'Co' :
temin=3300
temax=6500
elif el.strip() == 'Cu' :
temin=4000
elif el.strip() == 'Ce' :
temin=4000
temax=5000
else :
if el.strip() == 'O' :
temax=5000
elif el.strip() == 'Na' :
temin=5500
temax=5500
elif el.strip() == 'P' :
temin=4300
temin=5500
temax=5500
elif el.strip() == 'S' :
temin=4260
elif el.strip() == 'K' :
temin=4000
temax=6500
elif el.strip() == 'Ti' :
temin=4000
temax=6000
elif el.strip() == 'TiII' :
temin=5500
temax=6000
elif el.strip() == 'V' :
temin=4800
temax=5500
elif el.strip() == 'Cr' :
temin=3800
temax=6200
elif el.strip() == 'Mn' :
temin=3800
elif el.strip() == 'Fe' :
extfit=0
elif el.strip() == 'Co' :
temax=6500
elif el.strip() == 'Cu' :
temax=6200
elif el.strip() == 'Ce' :
temin=4200
temin=5500
temax=5500
return {'elemfit': elemfit, 'mhmin' : mhmin, 'te0': te0, 'temin': temin, 'temax': temax,
'caltemin': caltemin, 'caltemax' : caltemax, 'extfit' : extfit, 'extpar' : np.array(extpar)}
def dr14cal(el,dwarfs=False) :
'''
Return calibration parameters for requested element for DR14 choices
elemfit gives order/type of polynomial in cluster fit: 1 (linear), 2 (quadratic), 3 (cubic)
temin/temax gives range over which fit is performed
caltemin/caltemax gives range over which calibration can be applied (bad outside range)
extfit gives source of external calibration: 1 (Arcturus), 2 (Vesta), 3 (M67), 4 (solar sequence), 10 (quadratic fit to clusters), 11(piecewise fit to clusters)
extpar gives the values of the external calibration
'''
# defaults
te0=4500
temin=4000
temax=5000
elemfit=1
extfit=0
caltemin=3532.5
caltemax=6500
extpar=[0.,0.,0.]
mhmin=-1
if el.strip() == 'Ca' : mhmin = -2.
if el.strip() == 'C' : mhmin = -0.6
if el.strip() == 'Fe' : mhmin = -3.
if el.strip() == 'K' : mhmin = -0.6
if el.strip() == 'Mn' : mhmin = -2.0
if el.strip() == 'Na' : mhmin = -0.6
if el.strip() == 'Ni' : mhmin = -3.0
if el.strip() == 'N' : mhmin = -0.6
if el.strip() == 'O' : mhmin = -0.6
if el.strip() == 'Si' : mhmin = -3.0
if el.strip() == 'V' : mhmin = -0.6
# nothing below -1
if mhmin < -1 : mhmin=-1.
if not dwarfs :
# calibration parameters for giants
if el.strip() == 'C' :
elemfit= 0
elif el.strip() == 'CI' :
elemfit= 0
elif el.strip() == 'N' :
elemfit= 0
elif el.strip() == 'O' :
elemfit= 2
temin= 3750
extfit= 4
elif el.strip() == 'Na' :
elemfit= 1
extfit= 4
elif el.strip() == 'Mg' :
elemfit= 1
temax= 5250
extfit= 4
elif el.strip() == 'Al' :
elemfit= 1
extfit= 4
elif el.strip() == 'Si' :
elemfit= 2
temin= 3750
temax= 5250
extfit= 4
elif el.strip() == 'P' :
elemfit= 1
extfit= 4
elif el.strip() == 'S' :
elemfit= 1
extfit= 4
elif el.strip() == 'K' :
elemfit= 1
extfit= 4
elif el.strip() == 'Ca' :
elemfit= 1
temin= 3750
extfit= 4
elif el.strip() == 'Ti' :
elemfit= 1
temin= 3750
extfit= 4
elif el.strip() == 'TiII' :
elemfit= 1
extfit= 4
elif el.strip() == 'V' :
elemfit= 1
extfit= 4
elif el.strip() == 'Cr' :
elemfit= 1
temin= 3750
extfit= 4
elif el.strip() == 'Mn' :
elemfit= 1
extfit= 4
elif el.strip() == 'Fe' :
elemfit= 1
extfit= 4
elif el.strip() == 'Co' :
elemfit= 1
extfit= 4
elif el.strip() == 'Ni' :
elemfit= 1
extfit= 4
elif el.strip() == 'Cu' :
elemfit= -1
elif el.strip() == 'Ge' :
elemfit= -1
elif el.strip() == 'Ce' :
elemfit= -1
elif el.strip() == 'Rb' :
elemfit= -1
#elemfit= 1
#extfit= 4
elif el.strip() == 'Y' :
elemfit= -1
elif el.strip() == 'Nd' :
elemfit= -1
elif el.strip() == 'M' :
elemfit= 1
extfit=11
elif el.strip() == 'alpha' :
elemfit= 1
temax=5250
extfit= 4
else :
# default values for dwarfs
temin=3200
temax=6250
elemfit=3
caltemin=-1
caltemax=6500
extfit=0
extpar=[0.,0.,0.]
# manual overrides for each element, dwarfs
if el.strip() == 'C' :
elemfit=1
extfit=4
elif el.strip() == 'CI' :
elemfit=1
caltemin=3500
caltemax=5000
extfit=4
elif el.strip() == 'N' :
elemfit=0
caltemin=3500
extfit=4
elif el.strip() == 'O' :
elemfit=2
temin=3500
temax=4500
extfit=4
elif el.strip() == 'Na' :
elemfit=-1 #0
temin=3750
temax=5500
caltemin=3750
extfit=4
elif el.strip() == 'Mg' :
elemfit=1
temin=3750
extfit=4
elif el.strip() == 'Al' :
elemfit=2
temin=3750
caltemin=3500
extfit=4
elif el.strip() == 'Si' :
elemfit=1
temin=3500
extfit=4
elif el.strip() == 'P' :
elemfit=0
caltemin=3750
caltemax=5000
extfit=0
elif el.strip() == 'S' :
elemfit=1
temin=3750
caltemin=3532
extfit=4
elif el.strip() == 'K' :
elemfit=2
temin=3750
caltemin=3750
extfit=4
elif el.strip() == 'Ca' :
elemfit=1
temin=3750
caltemin=3750
extfit=4
elif el.strip() == 'Ti' :
elemfit=3
temin=3750
temax=5250
caltemin=3750
extfit=4
elif el.strip() == 'TiII' :
elemfit=-1
caltemax=-1
extfit=0
elif el.strip() == 'V' :
elemfit=2
temax=5250
caltemin=3750
extfit=4
elif el.strip() == 'Cr' :
elemfit=0
temax=5250
caltemin=3750
extfit=4
elif el.strip() == 'Mn' :
elemfit=3
temin=3500
caltemin=3500
extfit=4
elif el.strip() == 'Fe' :
elemfit=2
temin=3500
extfit=4
elif el.strip() == 'Co' :
elemfit=-1
elif el.strip() == 'Ni' :
elemfit=1
temin=3500
caltemin=3500
extfit=4
elif el.strip() == 'Cu' :
elemfit=-1 #2
temin=3750
caltemin=3750
extfit=4
elif el.strip() == 'Ge' :
elemfit=-1
elif el.strip() == 'Ce' :
elemfit=-1
elif el.strip() == 'Rb' :
elemfit=-1 #1
caltemin=3500
temin=3200
temax=5250
extfit=4
elif el.strip() == 'Y' :
elemfit=-1
elif el.strip() == 'Nd' :
elemfit=-1
elif el.strip() == 'M' :
elemfit=1
temin=3200
extfit=10
elif el.strip() == 'alpha' :
elemfit=2
temin=3500
caltemin=3500
extfit=4
return {'elemfit': elemfit, 'mhmin' : mhmin, 'te0': te0, 'temin': temin, 'temax': temax,
'caltemin': caltemin, 'caltemax' : caltemax, 'extfit' : extfit, 'extpar' : np.array(extpar)}
def dr13cal(el,dwarfs=False) :
'''
Return calibration parameters for requested element for DR13 choices
elemfit gives order/type of polynomial in cluster fit: 1 (linear), 2 (quadratic), 3 (cubic)
temin/temax gives range over which fit is performed
caltemin/caltemax gives range over which calibration can be applied (bad outside range)
extfit gives source of external calibration: 1 (Arcturus), 2 (Vesta), 3 (M67), 4 (solar sequence), 10 (quadratic fit to clusters)
extpar gives the values of the external calibration
'''
# defaults
te0=4500
temin=4000
temax=5000
elemfit=1
extfit=0
caltemin=3532.5
caltemax=6500
extpar=[0.,0.,0.]
mhmin=-1
if el.strip() == 'Ca' : mhmin = -2.
if el.strip() == 'C' : mhmin = -0.6
if el.strip() == 'Fe' : mhmin = -3.
if el.strip() == 'K' : mhmin = -0.6
if el.strip() == 'Mn' : mhmin = -2.0
if el.strip() == 'Na' : mhmin = -0.6
if el.strip() == 'Ni' : mhmin = -3.0
if el.strip() == 'N' : mhmin = -0.6
if el.strip() == 'O' : mhmin = -0.6
if el.strip() == 'Si' : mhmin = -3.0
if el.strip() == 'V' : mhmin = -0.6
# nothing below -1
if mhmin < -1 : mhmin=-1.
if not dwarfs :
# calibration parameters for giants
if el.strip() == 'C' :
elemfit= 0
elif el.strip() == 'CI' :
elemfit= 0
elif el.strip() == 'N' :
elemfit= 0
elif el.strip() == 'O' :
elemfit= 2
temin= 3750
extfit= 4
extpar= [0.060,0.,0.]
elif el.strip() == 'Na' :
elemfit= 2
extfit= 4
extpar= [0.186,0.,0.]
elif el.strip() == 'Mg' :
elemfit= 3
temin= 3500
extfit= 4
extpar= [0.045,0.,0.]
elif el.strip() == 'Al' :
elemfit= 3
extfit= 4
extpar= [0.108,0.,0.]
elif el.strip() == 'Si' :
elemfit= 3
temin= 3500
extfit= 4
extpar= [0.107,0.,0.]
elif el.strip() == 'P' :
elemfit= 2
extfit= 4
extpar= [-0.008,0.,0.]
elif el.strip() == 'S' :
elemfit= 2
extfit= 4
extpar= [-0.092,0.,0.]
elif el.strip() == 'K' :
elemfit= 1
extfit= 4
extpar= [-0.026,0.,0.]
elif el.strip() == 'Ca' :
elemfit= 3
temin= 3750
extfit= 4
extpar= [-0.021,0.,0.]
elif el.strip() == 'Ti' :
elemfit= 3
temin= 3500
extfit= 4
extpar= [-0.014,0.,0.]
elif el.strip() == 'TiII' :
elemfit= 2
extfit= 4
extpar= [0.166,0.,0.]
elif el.strip() == 'V' :
elemfit= 3
temin= 3750
extfit= 4
extpar= [0.110,0.,0.]
elif el.strip() == 'Cr' :
elemfit= 2
temin= 3500
extfit= 4
extpar= [-0.057,0.,0.]
elif el.strip() == 'Mn' :
elemfit= 1
extfit= 4
extpar= [0.041,0.,0.]
elif el.strip() == 'Fe' :
elemfit= 2
temin= 3500
extfit= 4
extpar= [-0.005,0.,0.]
elif el.strip() == 'Co' :
elemfit= 3
extfit= 4
extpar= [0.003,0.,0.]
elif el.strip() == 'Ni' :
elemfit= 2
temin= 3750
extfit= 4
extpar= [-0.001,0.,0.]
elif el.strip() == 'Cu' :
elemfit= 3
temin= 3
extfit= 4
extpar= [0.452,0.,0.]
elif el.strip() == 'Ge' :
elemfit= 2
extfit= 4
extpar= [0.354,0.,0.]
elif el.strip() == 'Ce' :
elemfit= -1
elif el.strip() == 'Rb' :
elemfit= 2
temin= 3750
extfit= 4
extpar= [-0.105,0.,0.]
elif el.strip() == 'Y' :
elemfit= -1
elif el.strip() == 'Nd' :
elemfit= -1
elif el.strip() == 'M' :
elemfit= 1
elif el.strip() == 'alpha' :
elemfit= 2
extfit= 4
extpar = [0.056,0.,0.]
else :
# default values for dwarfs
temin=3200
temax=6250
elemfit=3
caltemin=-1
caltemax=6500
extfit=0
extpar=[0.,0.,0.]
# manual overrides for each element, dwarfs
if el.strip() == 'C' :
elemfit=1
extfit=4
extpar=[-0.019,0.,0.]
elif el.strip() == 'CI' :
extfit=4
extpar=[-0.026,0.,0.]
elif el.strip() == 'N' :
extfit=4
extpar=[-0.01,0.,0.]
elif el.strip() == 'O' :
elemfit=3
temin=3500
temax=4500
extfit=4
extpar=[0.068,0.,0.]
elif el.strip() == 'Na' :
elemfit=1
temin=3750
temax=5500
caltemin=3750
extfit=4
extpar=[0.096,0.,0.]
elif el.strip() == 'Mg' :
elemfit=3
temin=3750
extfit=4
extpar=[-0.003,0.,0.]
elif el.strip() == 'Al' :
elemfit=2
temin=3750
caltemin=3500
extfit=4
extpar=[0.043,0.,0.]
elif el.strip() == 'Si' :
elemfit=1
temin=3500
extfit=4
extpar=[-0.023,0.,0.]
elif el.strip() == 'P' :
caltemax=-1
extfit=0
extpar=[0.,0.,0.]
elif el.strip() == 'S' :
elemfit=1
temin=3750
caltemin=5500
extfit=4
extpar=[-0.017,0.,0.]
elif el.strip() == 'K' :
elemfit=2
temin=3750
caltemin=3750
extfit=4
extpar=[-0.029,0.,0.]
elif el.strip() == 'Ca' :
elemfit=1
temin=3750
caltemin=3750
extfit=4
extpar=[0.023,0.,0.]
elif el.strip() == 'Ti' :
elemfit=3
temin=3750
temax=5250
caltemin=3750
extfit=4
extpar=[-0.002,0.,0.]
elif el.strip() == 'TiII' :
caltemax=-1
extfit=0
extpar=[0.,0.,0.]
elif el.strip() == 'V' :
elemfit=2
temax=5250
caltemin=3750
extfit=4
extpar=[0.002,0.,0.]
elif el.strip() == 'Cr' :
elemfit=1
temax=5250
caltemin=3750
extfit=4
extpar=[-0.044,0.,0.]
elif el.strip() == 'Mn' :
elemfit=3
temin=3500
caltemin=3500
extfit=4
extpar=[-0.077,0.,0.]
elif el.strip() == 'Fe' :
elemfit=2
temin=3500
extfit=4
extpar=[0.016,0.,0.]
elif el.strip() == 'Co' :
elemfit=-1
elif el.strip() == 'Ni' :
elemfit=1
temin=3500
caltemin=3500
extfit=4
extpar=[0.03,0.,0.]
elif el.strip() == 'Cu' :
elemfit=2
temin=3750
caltemin=4500
extfit=4
extpar=[0.026,0.,0.]
elif el.strip() == 'Ge' :
elemfit=-1
elif el.strip() == 'Ce' :
elemfit=-1
elif el.strip() == 'Rb' :
elemfit=1
temin=3200
temax=5250
extfit=4
extpar=[-0.217,0.,0.]
elif el.strip() == 'Y' :
elemfit=-1
elif el.strip() == 'Nd' :
elemfit=-1
elif el.strip() == 'M' :
elemfit=3
temin=3200
extfit=0
extpar=[0.0,0.,0.]
elif el.strip() == 'alpha' :
elemfit=1
extfit=4
extpar=[-0.004,0.,0.]
return {'elemfit': elemfit, 'mhmin' : mhmin, 'te0': te0, 'temin': temin, 'temax': temax,
'caltemin': caltemin, 'caltemax' : caltemax, 'extfit' : extfit, 'extpar' : np.array(extpar)}
def elemerr(soln,te,sn,fe) :
'''
Function to evaluate function for empirical uncertainties
'''
out=soln[0]+soln[1]*te+soln[2]*sn
if len(soln) > 3: out+= soln[3]*fe
return np.exp(out)
if __name__ == '__main__' :
main()
def lbd2xyz(l,b,d,R0=8.5) :
''' Angular coordinates + distance -> galactocentry x,y,z '''
brad = b*np.pi/180.
lrad = l*np.pi/180.
x = d*np.sin(0.5*np.pi-brad)*np.cos(lrad)-R0
y = d*np.sin(0.5*np.pi-brad)*np.sin(lrad)
z = d*np.cos(0.5*np.pi-brad)
r = np.sqrt(x**2+y**2)
return x, y, z, r
| [] | [] | [
"APOGEE_REDUX"
] | [] | ["APOGEE_REDUX"] | python | 1 | 0 | |
test/unit/tools/test_conda_resolution.py | import os
import shutil
from tempfile import mkdtemp
import unittest as unittest
from galaxy.tools.deps import DependencyManager
from galaxy.tools.deps.resolvers.conda import CondaDependencyResolver
from galaxy.tools.deps import conda_util
def skip_unless_environ(var):
if var in os.environ:
return lambda func: func
template = "Environment variable %s not found, dependent test skipped."
return unittest.skip(template % var)
@skip_unless_environ("GALAXY_TEST_INCLUDE_SLOW")
def test_conda_resolution():
base_path = mkdtemp()
try:
job_dir = os.path.join(base_path, "000")
dependency_manager = DependencyManager(base_path)
resolver = CondaDependencyResolver(
dependency_manager,
auto_init=True,
auto_install=True,
use_path_exec=False, # For the test ensure this is always a clean install
)
conda_context = resolver.conda_context
assert len(list(conda_util.installed_conda_targets(conda_context))) == 0
dependency = resolver.resolve(name="samtools", version=None, type="package", job_directory=job_dir)
assert dependency.shell_commands(None) is not None
installed_targets = list(conda_util.installed_conda_targets(conda_context))
assert len(installed_targets) == 1
samtools_target = installed_targets[0]
assert samtools_target.package == "samtools"
assert samtools_target.version is None
finally:
shutil.rmtree(base_path)
@skip_unless_environ("GALAXY_TEST_INCLUDE_SLOW")
def test_conda_resolution_failure():
"""This test is specifically designed to trigger https://github.com/rtfd/readthedocs.org/issues/1902
and thus it expects the install to fail. If this test fails it is a sign that the upstream
conda issue has been fixed.
"""
base_path = mkdtemp(prefix='x' * 80) # a ridiculously long prefix
try:
job_dir = os.path.join(base_path, "000")
dependency_manager = DependencyManager(base_path)
resolver = CondaDependencyResolver(
dependency_manager,
auto_init=True,
auto_install=True,
use_path_exec=False, # For the test ensure this is always a clean install
)
conda_context = resolver.conda_context
assert len(list(conda_util.installed_conda_targets(conda_context))) == 0
dependency = resolver.resolve(name="samtools", version=None, type="package", job_directory=job_dir)
assert dependency.shell_commands(None) is None # install should fail
installed_targets = list(conda_util.installed_conda_targets(conda_context))
assert len(installed_targets) == 0
finally:
shutil.rmtree(base_path)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
core/src/overture/go/src/cmd/cgo/gcc.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Annotate Ref in Prog with C types by parsing gcc debug output.
// Conversion of debug output to Go types.
package main
import (
"bytes"
"debug/dwarf"
"debug/elf"
"debug/macho"
"debug/pe"
"encoding/binary"
"errors"
"flag"
"fmt"
"go/ast"
"go/parser"
"go/token"
"math"
"os"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
var debugDefine = flag.Bool("debug-define", false, "print relevant #defines")
var debugGcc = flag.Bool("debug-gcc", false, "print gcc invocations")
var nameToC = map[string]string{
"schar": "signed char",
"uchar": "unsigned char",
"ushort": "unsigned short",
"uint": "unsigned int",
"ulong": "unsigned long",
"longlong": "long long",
"ulonglong": "unsigned long long",
"complexfloat": "float _Complex",
"complexdouble": "double _Complex",
}
// cname returns the C name to use for C.s.
// The expansions are listed in nameToC and also
// struct_foo becomes "struct foo", and similarly for
// union and enum.
func cname(s string) string {
if t, ok := nameToC[s]; ok {
return t
}
if strings.HasPrefix(s, "struct_") {
return "struct " + s[len("struct_"):]
}
if strings.HasPrefix(s, "union_") {
return "union " + s[len("union_"):]
}
if strings.HasPrefix(s, "enum_") {
return "enum " + s[len("enum_"):]
}
if strings.HasPrefix(s, "sizeof_") {
return "sizeof(" + cname(s[len("sizeof_"):]) + ")"
}
return s
}
// DiscardCgoDirectives processes the import C preamble, and discards
// all #cgo CFLAGS and LDFLAGS directives, so they don't make their
// way into _cgo_export.h.
func (f *File) DiscardCgoDirectives() {
linesIn := strings.Split(f.Preamble, "\n")
linesOut := make([]string, 0, len(linesIn))
for _, line := range linesIn {
l := strings.TrimSpace(line)
if len(l) < 5 || l[:4] != "#cgo" || !unicode.IsSpace(rune(l[4])) {
linesOut = append(linesOut, line)
} else {
linesOut = append(linesOut, "")
}
}
f.Preamble = strings.Join(linesOut, "\n")
}
// addToFlag appends args to flag. All flags are later written out onto the
// _cgo_flags file for the build system to use.
func (p *Package) addToFlag(flag string, args []string) {
p.CgoFlags[flag] = append(p.CgoFlags[flag], args...)
if flag == "CFLAGS" {
// We'll also need these when preprocessing for dwarf information.
p.GccOptions = append(p.GccOptions, args...)
}
}
// splitQuoted splits the string s around each instance of one or more consecutive
// white space characters while taking into account quotes and escaping, and
// returns an array of substrings of s or an empty list if s contains only white space.
// Single quotes and double quotes are recognized to prevent splitting within the
// quoted region, and are removed from the resulting substrings. If a quote in s
// isn't closed err will be set and r will have the unclosed argument as the
// last element. The backslash is used for escaping.
//
// For example, the following string:
//
// `a b:"c d" 'e''f' "g\""`
//
// Would be parsed as:
//
// []string{"a", "b:c d", "ef", `g"`}
//
func splitQuoted(s string) (r []string, err error) {
var args []string
arg := make([]rune, len(s))
escaped := false
quoted := false
quote := '\x00'
i := 0
for _, r := range s {
switch {
case escaped:
escaped = false
case r == '\\':
escaped = true
continue
case quote != 0:
if r == quote {
quote = 0
continue
}
case r == '"' || r == '\'':
quoted = true
quote = r
continue
case unicode.IsSpace(r):
if quoted || i > 0 {
quoted = false
args = append(args, string(arg[:i]))
i = 0
}
continue
}
arg[i] = r
i++
}
if quoted || i > 0 {
args = append(args, string(arg[:i]))
}
if quote != 0 {
err = errors.New("unclosed quote")
} else if escaped {
err = errors.New("unfinished escaping")
}
return args, err
}
// Translate rewrites f.AST, the original Go input, to remove
// references to the imported package C, replacing them with
// references to the equivalent Go types, functions, and variables.
func (p *Package) Translate(f *File) {
for _, cref := range f.Ref {
// Convert C.ulong to C.unsigned long, etc.
cref.Name.C = cname(cref.Name.Go)
}
p.loadDefines(f)
needType := p.guessKinds(f)
if len(needType) > 0 {
p.loadDWARF(f, needType)
}
if p.rewriteCalls(f) {
// Add `import _cgo_unsafe "unsafe"` as the first decl
// after the package statement.
imp := &ast.GenDecl{
Tok: token.IMPORT,
Specs: []ast.Spec{
&ast.ImportSpec{
Name: ast.NewIdent("_cgo_unsafe"),
Path: &ast.BasicLit{
Kind: token.STRING,
Value: `"unsafe"`,
},
},
},
}
f.AST.Decls = append([]ast.Decl{imp}, f.AST.Decls...)
}
p.rewriteRef(f)
}
// loadDefines coerces gcc into spitting out the #defines in use
// in the file f and saves relevant renamings in f.Name[name].Define.
func (p *Package) loadDefines(f *File) {
var b bytes.Buffer
b.WriteString(f.Preamble)
b.WriteString(builtinProlog)
stdout := p.gccDefines(b.Bytes())
for _, line := range strings.Split(stdout, "\n") {
if len(line) < 9 || line[0:7] != "#define" {
continue
}
line = strings.TrimSpace(line[8:])
var key, val string
spaceIndex := strings.Index(line, " ")
tabIndex := strings.Index(line, "\t")
if spaceIndex == -1 && tabIndex == -1 {
continue
} else if tabIndex == -1 || (spaceIndex != -1 && spaceIndex < tabIndex) {
key = line[0:spaceIndex]
val = strings.TrimSpace(line[spaceIndex:])
} else {
key = line[0:tabIndex]
val = strings.TrimSpace(line[tabIndex:])
}
if key == "__clang__" {
p.GccIsClang = true
}
if n := f.Name[key]; n != nil {
if *debugDefine {
fmt.Fprintf(os.Stderr, "#define %s %s\n", key, val)
}
n.Define = val
}
}
}
// guessKinds tricks gcc into revealing the kind of each
// name xxx for the references C.xxx in the Go input.
// The kind is either a constant, type, or variable.
func (p *Package) guessKinds(f *File) []*Name {
// Determine kinds for names we already know about,
// like #defines or 'struct foo', before bothering with gcc.
var names, needType []*Name
for _, key := range nameKeys(f.Name) {
n := f.Name[key]
// If we've already found this name as a #define
// and we can translate it as a constant value, do so.
if n.Define != "" {
if i, err := strconv.ParseInt(n.Define, 0, 64); err == nil {
n.Kind = "iconst"
// Turn decimal into hex, just for consistency
// with enum-derived constants. Otherwise
// in the cgo -godefs output half the constants
// are in hex and half are in whatever the #define used.
n.Const = fmt.Sprintf("%#x", i)
} else if n.Define[0] == '\'' {
if _, err := parser.ParseExpr(n.Define); err == nil {
n.Kind = "iconst"
n.Const = n.Define
}
} else if n.Define[0] == '"' {
if _, err := parser.ParseExpr(n.Define); err == nil {
n.Kind = "sconst"
n.Const = n.Define
}
}
if n.IsConst() {
continue
}
if isName(n.Define) {
n.C = n.Define
}
}
// If this is a struct, union, or enum type name, no need to guess the kind.
if strings.HasPrefix(n.C, "struct ") || strings.HasPrefix(n.C, "union ") || strings.HasPrefix(n.C, "enum ") {
n.Kind = "type"
needType = append(needType, n)
continue
}
// Otherwise, we'll need to find out from gcc.
names = append(names, n)
}
// Bypass gcc if there's nothing left to find out.
if len(names) == 0 {
return needType
}
// Coerce gcc into telling us whether each name is a type, a value, or undeclared.
// For names, find out whether they are integer constants.
// We used to look at specific warning or error messages here, but that tied the
// behavior too closely to specific versions of the compilers.
// Instead, arrange that we can infer what we need from only the presence or absence
// of an error on a specific line.
//
// For each name, we generate these lines, where xxx is the index in toSniff plus one.
//
// #line xxx "not-declared"
// void __cgo_f_xxx_1(void) { __typeof__(name) *__cgo_undefined__; }
// #line xxx "not-type"
// void __cgo_f_xxx_2(void) { name *__cgo_undefined__; }
// #line xxx "not-int-const"
// void __cgo_f_xxx_3(void) { enum { __cgo_undefined__ = (name)*1 }; }
// #line xxx "not-num-const"
// void __cgo_f_xxx_4(void) { static const double x = (name); }
// #line xxx "not-str-lit"
// void __cgo_f_xxx_5(void) { static const char x[] = (name); }
// #line xxx "not-signed-int-const"
// #if 0 < -(name)
// #line xxx "not-signed-int-const"
// #error found unsigned int
// #endif
//
// If we see an error at not-declared:xxx, the corresponding name is not declared.
// If we see an error at not-type:xxx, the corresponding name is a type.
// If we see an error at not-int-const:xxx, the corresponding name is not an integer constant.
// If we see an error at not-num-const:xxx, the corresponding name is not a number constant.
// If we see an error at not-str-lit:xxx, the corresponding name is not a string literal.
// If we see an error at not-signed-int-const:xxx, the corresponding name is not a signed integer literal.
//
// The specific input forms are chosen so that they are valid C syntax regardless of
// whether name denotes a type or an expression.
var b bytes.Buffer
b.WriteString(f.Preamble)
b.WriteString(builtinProlog)
for i, n := range names {
fmt.Fprintf(&b, "#line %d \"not-declared\"\n"+
"void __cgo_f_%d_1(void) { __typeof__(%s) *__cgo_undefined__; }\n"+
"#line %d \"not-type\"\n"+
"void __cgo_f_%d_2(void) { %s *__cgo_undefined__; }\n"+
"#line %d \"not-int-const\"\n"+
"void __cgo_f_%d_3(void) { enum { __cgo_undefined__ = (%s)*1 }; }\n"+
"#line %d \"not-num-const\"\n"+
"void __cgo_f_%d_4(void) { static const double x = (%s); }\n"+
"#line %d \"not-str-lit\"\n"+
"void __cgo_f_%d_5(void) { static const char s[] = (%s); }\n"+
"#line %d \"not-signed-int-const\"\n"+
"#if 0 < (%s)\n"+
"#line %d \"not-signed-int-const\"\n"+
"#error found unsigned int\n"+
"#endif\n",
i+1, i+1, n.C,
i+1, i+1, n.C,
i+1, i+1, n.C,
i+1, i+1, n.C,
i+1, i+1, n.C,
i+1, n.C, i+1,
)
}
fmt.Fprintf(&b, "#line 1 \"completed\"\n"+
"int __cgo__1 = __cgo__2;\n")
stderr := p.gccErrors(b.Bytes())
if stderr == "" {
fatalf("%s produced no output\non input:\n%s", p.gccBaseCmd()[0], b.Bytes())
}
completed := false
sniff := make([]int, len(names))
const (
notType = 1 << iota
notIntConst
notNumConst
notStrLiteral
notDeclared
notSignedIntConst
)
sawUnmatchedErrors := false
for _, line := range strings.Split(stderr, "\n") {
// Ignore warnings and random comments, with one
// exception: newer GCC versions will sometimes emit
// an error on a macro #define with a note referring
// to where the expansion occurs. We care about where
// the expansion occurs, so in that case treat the note
// as an error.
isError := strings.Contains(line, ": error:")
isErrorNote := strings.Contains(line, ": note:") && sawUnmatchedErrors
if !isError && !isErrorNote {
continue
}
c1 := strings.Index(line, ":")
if c1 < 0 {
continue
}
c2 := strings.Index(line[c1+1:], ":")
if c2 < 0 {
continue
}
c2 += c1 + 1
filename := line[:c1]
i, _ := strconv.Atoi(line[c1+1 : c2])
i--
if i < 0 || i >= len(names) {
if isError {
sawUnmatchedErrors = true
}
continue
}
switch filename {
case "completed":
// Strictly speaking, there is no guarantee that seeing the error at completed:1
// (at the end of the file) means we've seen all the errors from earlier in the file,
// but usually it does. Certainly if we don't see the completed:1 error, we did
// not get all the errors we expected.
completed = true
case "not-declared":
sniff[i] |= notDeclared
case "not-type":
sniff[i] |= notType
case "not-int-const":
sniff[i] |= notIntConst
case "not-num-const":
sniff[i] |= notNumConst
case "not-str-lit":
sniff[i] |= notStrLiteral
case "not-signed-int-const":
sniff[i] |= notSignedIntConst
default:
if isError {
sawUnmatchedErrors = true
}
continue
}
sawUnmatchedErrors = false
}
if !completed {
fatalf("%s did not produce error at completed:1\non input:\n%s\nfull error output:\n%s", p.gccBaseCmd()[0], b.Bytes(), stderr)
}
for i, n := range names {
switch sniff[i] &^ notSignedIntConst {
default:
var tpos token.Pos
for _, ref := range f.Ref {
if ref.Name == n {
tpos = ref.Pos()
break
}
}
error_(tpos, "could not determine kind of name for C.%s", fixGo(n.Go))
case notStrLiteral | notType:
if sniff[i]¬SignedIntConst != 0 {
n.Kind = "uconst"
} else {
n.Kind = "iconst"
}
case notIntConst | notStrLiteral | notType:
n.Kind = "fconst"
case notIntConst | notNumConst | notType:
n.Kind = "sconst"
case notIntConst | notNumConst | notStrLiteral:
n.Kind = "type"
case notIntConst | notNumConst | notStrLiteral | notType:
n.Kind = "not-type"
}
}
if nerrors > 0 {
// Check if compiling the preamble by itself causes any errors,
// because the messages we've printed out so far aren't helpful
// to users debugging preamble mistakes. See issue 8442.
preambleErrors := p.gccErrors([]byte(f.Preamble))
if len(preambleErrors) > 0 {
error_(token.NoPos, "\n%s errors for preamble:\n%s", p.gccBaseCmd()[0], preambleErrors)
}
fatalf("unresolved names")
}
needType = append(needType, names...)
return needType
}
// loadDWARF parses the DWARF debug information generated
// by gcc to learn the details of the constants, variables, and types
// being referred to as C.xxx.
func (p *Package) loadDWARF(f *File, names []*Name) {
// Extract the types from the DWARF section of an object
// from a well-formed C program. Gcc only generates DWARF info
// for symbols in the object file, so it is not enough to print the
// preamble and hope the symbols we care about will be there.
// Instead, emit
// __typeof__(names[i]) *__cgo__i;
// for each entry in names and then dereference the type we
// learn for __cgo__i.
var b bytes.Buffer
b.WriteString(f.Preamble)
b.WriteString(builtinProlog)
b.WriteString("#line 1 \"cgo-dwarf-inference\"\n")
for i, n := range names {
fmt.Fprintf(&b, "__typeof__(%s) *__cgo__%d;\n", n.C, i)
if n.Kind == "iconst" || n.Kind == "uconst" {
fmt.Fprintf(&b, "enum { __cgo_enum__%d = %s };\n", i, n.C)
}
}
// We create a data block initialized with the values,
// so we can read them out of the object file.
fmt.Fprintf(&b, "long long __cgodebug_ints[] = {\n")
for _, n := range names {
if n.Kind == "iconst" || n.Kind == "uconst" {
fmt.Fprintf(&b, "\t%s,\n", n.C)
} else {
fmt.Fprintf(&b, "\t0,\n")
}
}
// for the last entry, we cannot use 0, otherwise
// in case all __cgodebug_data is zero initialized,
// LLVM-based gcc will place the it in the __DATA.__common
// zero-filled section (our debug/macho doesn't support
// this)
fmt.Fprintf(&b, "\t1\n")
fmt.Fprintf(&b, "};\n")
// do the same work for floats.
fmt.Fprintf(&b, "double __cgodebug_floats[] = {\n")
for _, n := range names {
if n.Kind == "fconst" {
fmt.Fprintf(&b, "\t%s,\n", n.C)
} else {
fmt.Fprintf(&b, "\t0,\n")
}
}
fmt.Fprintf(&b, "\t1\n")
fmt.Fprintf(&b, "};\n")
// do the same work for strings.
for i, n := range names {
if n.Kind == "sconst" {
fmt.Fprintf(&b, "const char __cgodebug_str__%d[] = %s;\n", i, n.C)
fmt.Fprintf(&b, "const unsigned long long __cgodebug_strlen__%d = sizeof(%s)-1;\n", i, n.C)
}
}
d, ints, floats, strs := p.gccDebug(b.Bytes(), len(names))
// Scan DWARF info for top-level TagVariable entries with AttrName __cgo__i.
types := make([]dwarf.Type, len(names))
nameToIndex := make(map[*Name]int)
for i, n := range names {
nameToIndex[n] = i
}
nameToRef := make(map[*Name]*Ref)
for _, ref := range f.Ref {
nameToRef[ref.Name] = ref
}
r := d.Reader()
for {
e, err := r.Next()
if err != nil {
fatalf("reading DWARF entry: %s", err)
}
if e == nil {
break
}
switch e.Tag {
case dwarf.TagVariable:
name, _ := e.Val(dwarf.AttrName).(string)
typOff, _ := e.Val(dwarf.AttrType).(dwarf.Offset)
if name == "" || typOff == 0 {
if e.Val(dwarf.AttrSpecification) != nil {
// Since we are reading all the DWARF,
// assume we will see the variable elsewhere.
break
}
fatalf("malformed DWARF TagVariable entry")
}
if !strings.HasPrefix(name, "__cgo__") {
break
}
typ, err := d.Type(typOff)
if err != nil {
fatalf("loading DWARF type: %s", err)
}
t, ok := typ.(*dwarf.PtrType)
if !ok || t == nil {
fatalf("internal error: %s has non-pointer type", name)
}
i, err := strconv.Atoi(name[7:])
if err != nil {
fatalf("malformed __cgo__ name: %s", name)
}
types[i] = t.Type
}
if e.Tag != dwarf.TagCompileUnit {
r.SkipChildren()
}
}
// Record types and typedef information.
var conv typeConv
conv.Init(p.PtrSize, p.IntSize)
for i, n := range names {
if types[i] == nil {
continue
}
pos := token.NoPos
if ref, ok := nameToRef[n]; ok {
pos = ref.Pos()
}
f, fok := types[i].(*dwarf.FuncType)
if n.Kind != "type" && fok {
n.Kind = "func"
n.FuncType = conv.FuncType(f, pos)
} else {
n.Type = conv.Type(types[i], pos)
switch n.Kind {
case "iconst":
if i < len(ints) {
n.Const = fmt.Sprintf("%#x", ints[i])
}
case "uconst":
if i < len(ints) {
n.Const = fmt.Sprintf("%#x", uint64(ints[i]))
}
case "fconst":
if i < len(floats) {
n.Const = fmt.Sprintf("%f", floats[i])
}
case "sconst":
if i < len(strs) {
n.Const = fmt.Sprintf("%q", strs[i])
}
}
}
conv.FinishType(pos)
}
}
// mangleName does name mangling to translate names
// from the original Go source files to the names
// used in the final Go files generated by cgo.
func (p *Package) mangleName(n *Name) {
// When using gccgo variables have to be
// exported so that they become global symbols
// that the C code can refer to.
prefix := "_C"
if *gccgo && n.IsVar() {
prefix = "C"
}
n.Mangle = prefix + n.Kind + "_" + n.Go
}
// rewriteCalls rewrites all calls that pass pointers to check that
// they follow the rules for passing pointers between Go and C.
// This returns whether the package needs to import unsafe as _cgo_unsafe.
func (p *Package) rewriteCalls(f *File) bool {
needsUnsafe := false
for _, call := range f.Calls {
// This is a call to C.xxx; set goname to "xxx".
goname := call.Call.Fun.(*ast.SelectorExpr).Sel.Name
if goname == "malloc" {
continue
}
name := f.Name[goname]
if name.Kind != "func" {
// Probably a type conversion.
continue
}
if p.rewriteCall(f, call, name) {
needsUnsafe = true
}
}
return needsUnsafe
}
// rewriteCall rewrites one call to add pointer checks.
// If any pointer checks are required, we rewrite the call into a
// function literal that calls _cgoCheckPointer for each pointer
// argument and then calls the original function.
// This returns whether the package needs to import unsafe as _cgo_unsafe.
func (p *Package) rewriteCall(f *File, call *Call, name *Name) bool {
// Avoid a crash if the number of arguments is
// less than the number of parameters.
// This will be caught when the generated file is compiled.
if len(call.Call.Args) < len(name.FuncType.Params) {
return false
}
any := false
for i, param := range name.FuncType.Params {
if p.needsPointerCheck(f, param.Go, call.Call.Args[i]) {
any = true
break
}
}
if !any {
return false
}
// We need to rewrite this call.
//
// We are going to rewrite C.f(p) to
// func (_cgo0 ptype) {
// _cgoCheckPointer(_cgo0)
// C.f(_cgo0)
// }(p)
// Using a function literal like this lets us do correct
// argument type checking, and works correctly if the call is
// deferred.
needsUnsafe := false
params := make([]*ast.Field, len(name.FuncType.Params))
nargs := make([]ast.Expr, len(name.FuncType.Params))
var stmts []ast.Stmt
for i, param := range name.FuncType.Params {
// params is going to become the parameters of the
// function literal.
// nargs is going to become the list of arguments made
// by the call within the function literal.
// nparam is the parameter of the function literal that
// corresponds to param.
origArg := call.Call.Args[i]
nparam := ast.NewIdent(fmt.Sprintf("_cgo%d", i))
nargs[i] = nparam
// The Go version of the C type might use unsafe.Pointer,
// but the file might not import unsafe.
// Rewrite the Go type if necessary to use _cgo_unsafe.
ptype := p.rewriteUnsafe(param.Go)
if ptype != param.Go {
needsUnsafe = true
}
params[i] = &ast.Field{
Names: []*ast.Ident{nparam},
Type: ptype,
}
if !p.needsPointerCheck(f, param.Go, origArg) {
continue
}
// Run the cgo pointer checks on nparam.
// Change the function literal to call the real function
// with the parameter passed through _cgoCheckPointer.
c := &ast.CallExpr{
Fun: ast.NewIdent("_cgoCheckPointer"),
Args: []ast.Expr{
nparam,
},
}
// Add optional additional arguments for an address
// expression.
c.Args = p.checkAddrArgs(f, c.Args, origArg)
stmt := &ast.ExprStmt{
X: c,
}
stmts = append(stmts, stmt)
}
fcall := &ast.CallExpr{
Fun: call.Call.Fun,
Args: nargs,
}
ftype := &ast.FuncType{
Params: &ast.FieldList{
List: params,
},
}
if name.FuncType.Result != nil {
rtype := p.rewriteUnsafe(name.FuncType.Result.Go)
if rtype != name.FuncType.Result.Go {
needsUnsafe = true
}
ftype.Results = &ast.FieldList{
List: []*ast.Field{
&ast.Field{
Type: rtype,
},
},
}
}
// There is a Ref pointing to the old call.Call.Fun.
for _, ref := range f.Ref {
if ref.Expr == &call.Call.Fun {
ref.Expr = &fcall.Fun
// If this call expects two results, we have to
// adjust the results of the function we generated.
if ref.Context == "call2" {
if ftype.Results == nil {
// An explicit void argument
// looks odd but it seems to
// be how cgo has worked historically.
ftype.Results = &ast.FieldList{
List: []*ast.Field{
&ast.Field{
Type: ast.NewIdent("_Ctype_void"),
},
},
}
}
ftype.Results.List = append(ftype.Results.List,
&ast.Field{
Type: ast.NewIdent("error"),
})
}
}
}
var fbody ast.Stmt
if ftype.Results == nil {
fbody = &ast.ExprStmt{
X: fcall,
}
} else {
fbody = &ast.ReturnStmt{
Results: []ast.Expr{fcall},
}
}
call.Call.Fun = &ast.FuncLit{
Type: ftype,
Body: &ast.BlockStmt{
List: append(stmts, fbody),
},
}
call.Call.Lparen = token.NoPos
call.Call.Rparen = token.NoPos
return needsUnsafe
}
// needsPointerCheck returns whether the type t needs a pointer check.
// This is true if t is a pointer and if the value to which it points
// might contain a pointer.
func (p *Package) needsPointerCheck(f *File, t ast.Expr, arg ast.Expr) bool {
// An untyped nil does not need a pointer check, and when
// _cgoCheckPointer returns the untyped nil the type assertion we
// are going to insert will fail. Easier to just skip nil arguments.
// TODO: Note that this fails if nil is shadowed.
if id, ok := arg.(*ast.Ident); ok && id.Name == "nil" {
return false
}
return p.hasPointer(f, t, true)
}
// hasPointer is used by needsPointerCheck. If top is true it returns
// whether t is or contains a pointer that might point to a pointer.
// If top is false it returns whether t is or contains a pointer.
// f may be nil.
func (p *Package) hasPointer(f *File, t ast.Expr, top bool) bool {
switch t := t.(type) {
case *ast.ArrayType:
if t.Len == nil {
if !top {
return true
}
return p.hasPointer(f, t.Elt, false)
}
return p.hasPointer(f, t.Elt, top)
case *ast.StructType:
for _, field := range t.Fields.List {
if p.hasPointer(f, field.Type, top) {
return true
}
}
return false
case *ast.StarExpr: // Pointer type.
if !top {
return true
}
// Check whether this is a pointer to a C union (or class)
// type that contains a pointer.
if unionWithPointer[t.X] {
return true
}
return p.hasPointer(f, t.X, false)
case *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
return true
case *ast.Ident:
// TODO: Handle types defined within function.
for _, d := range p.Decl {
gd, ok := d.(*ast.GenDecl)
if !ok || gd.Tok != token.TYPE {
continue
}
for _, spec := range gd.Specs {
ts, ok := spec.(*ast.TypeSpec)
if !ok {
continue
}
if ts.Name.Name == t.Name {
return p.hasPointer(f, ts.Type, top)
}
}
}
if def := typedef[t.Name]; def != nil {
return p.hasPointer(f, def.Go, top)
}
if t.Name == "string" {
return !top
}
if t.Name == "error" {
return true
}
if goTypes[t.Name] != nil {
return false
}
// We can't figure out the type. Conservative
// approach is to assume it has a pointer.
return true
case *ast.SelectorExpr:
if l, ok := t.X.(*ast.Ident); !ok || l.Name != "C" {
// Type defined in a different package.
// Conservative approach is to assume it has a
// pointer.
return true
}
if f == nil {
// Conservative approach: assume pointer.
return true
}
name := f.Name[t.Sel.Name]
if name != nil && name.Kind == "type" && name.Type != nil && name.Type.Go != nil {
return p.hasPointer(f, name.Type.Go, top)
}
// We can't figure out the type. Conservative
// approach is to assume it has a pointer.
return true
default:
error_(t.Pos(), "could not understand type %s", gofmt(t))
return true
}
}
// checkAddrArgs tries to add arguments to the call of
// _cgoCheckPointer when the argument is an address expression. We
// pass true to mean that the argument is an address operation of
// something other than a slice index, which means that it's only
// necessary to check the specific element pointed to, not the entire
// object. This is for &s.f, where f is a field in a struct. We can
// pass a slice or array, meaning that we should check the entire
// slice or array but need not check any other part of the object.
// This is for &s.a[i], where we need to check all of a. However, we
// only pass the slice or array if we can refer to it without side
// effects.
func (p *Package) checkAddrArgs(f *File, args []ast.Expr, x ast.Expr) []ast.Expr {
// Strip type conversions.
for {
c, ok := x.(*ast.CallExpr)
if !ok || len(c.Args) != 1 || !p.isType(c.Fun) {
break
}
x = c.Args[0]
}
u, ok := x.(*ast.UnaryExpr)
if !ok || u.Op != token.AND {
return args
}
index, ok := u.X.(*ast.IndexExpr)
if !ok {
// This is the address of something that is not an
// index expression. We only need to examine the
// single value to which it points.
// TODO: what if true is shadowed?
return append(args, ast.NewIdent("true"))
}
if !p.hasSideEffects(f, index.X) {
// Examine the entire slice.
return append(args, index.X)
}
// Treat the pointer as unknown.
return args
}
// hasSideEffects returns whether the expression x has any side
// effects. x is an expression, not a statement, so the only side
// effect is a function call.
func (p *Package) hasSideEffects(f *File, x ast.Expr) bool {
found := false
f.walk(x, "expr",
func(f *File, x interface{}, context string) {
switch x.(type) {
case *ast.CallExpr:
found = true
}
})
return found
}
// isType returns whether the expression is definitely a type.
// This is conservative--it returns false for an unknown identifier.
func (p *Package) isType(t ast.Expr) bool {
switch t := t.(type) {
case *ast.SelectorExpr:
id, ok := t.X.(*ast.Ident)
if !ok {
return false
}
if id.Name == "unsafe" && t.Sel.Name == "Pointer" {
return true
}
if id.Name == "C" && typedef["_Ctype_"+t.Sel.Name] != nil {
return true
}
return false
case *ast.Ident:
// TODO: This ignores shadowing.
switch t.Name {
case "unsafe.Pointer", "bool", "byte",
"complex64", "complex128",
"error",
"float32", "float64",
"int", "int8", "int16", "int32", "int64",
"rune", "string",
"uint", "uint8", "uint16", "uint32", "uint64", "uintptr":
return true
}
case *ast.StarExpr:
return p.isType(t.X)
case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType,
*ast.MapType, *ast.ChanType:
return true
}
return false
}
// rewriteUnsafe returns a version of t with references to unsafe.Pointer
// rewritten to use _cgo_unsafe.Pointer instead.
func (p *Package) rewriteUnsafe(t ast.Expr) ast.Expr {
switch t := t.(type) {
case *ast.Ident:
// We don't see a SelectorExpr for unsafe.Pointer;
// this is created by code in this file.
if t.Name == "unsafe.Pointer" {
return ast.NewIdent("_cgo_unsafe.Pointer")
}
case *ast.ArrayType:
t1 := p.rewriteUnsafe(t.Elt)
if t1 != t.Elt {
r := *t
r.Elt = t1
return &r
}
case *ast.StructType:
changed := false
fields := *t.Fields
fields.List = nil
for _, f := range t.Fields.List {
ft := p.rewriteUnsafe(f.Type)
if ft == f.Type {
fields.List = append(fields.List, f)
} else {
fn := *f
fn.Type = ft
fields.List = append(fields.List, &fn)
changed = true
}
}
if changed {
r := *t
r.Fields = &fields
return &r
}
case *ast.StarExpr: // Pointer type.
x1 := p.rewriteUnsafe(t.X)
if x1 != t.X {
r := *t
r.X = x1
return &r
}
}
return t
}
// rewriteRef rewrites all the C.xxx references in f.AST to refer to the
// Go equivalents, now that we have figured out the meaning of all
// the xxx. In *godefs mode, rewriteRef replaces the names
// with full definitions instead of mangled names.
func (p *Package) rewriteRef(f *File) {
// Keep a list of all the functions, to remove the ones
// only used as expressions and avoid generating bridge
// code for them.
functions := make(map[string]bool)
// Assign mangled names.
for _, n := range f.Name {
if n.Kind == "not-type" {
n.Kind = "var"
}
if n.Mangle == "" {
p.mangleName(n)
}
if n.Kind == "func" {
functions[n.Go] = false
}
}
// Now that we have all the name types filled in,
// scan through the Refs to identify the ones that
// are trying to do a ,err call. Also check that
// functions are only used in calls.
for _, r := range f.Ref {
if r.Name.IsConst() && r.Name.Const == "" {
error_(r.Pos(), "unable to find value of constant C.%s", fixGo(r.Name.Go))
}
var expr ast.Expr = ast.NewIdent(r.Name.Mangle) // default
switch r.Context {
case "call", "call2":
if r.Name.Kind != "func" {
if r.Name.Kind == "type" {
r.Context = "type"
if r.Name.Type == nil {
error_(r.Pos(), "invalid conversion to C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C)
break
}
expr = r.Name.Type.Go
break
}
error_(r.Pos(), "call of non-function C.%s", fixGo(r.Name.Go))
break
}
functions[r.Name.Go] = true
if r.Context == "call2" {
if r.Name.Go == "_CMalloc" {
error_(r.Pos(), "no two-result form for C.malloc")
break
}
// Invent new Name for the two-result function.
n := f.Name["2"+r.Name.Go]
if n == nil {
n = new(Name)
*n = *r.Name
n.AddError = true
n.Mangle = "_C2func_" + n.Go
f.Name["2"+r.Name.Go] = n
}
expr = ast.NewIdent(n.Mangle)
r.Name = n
break
}
case "expr":
if r.Name.Kind == "func" {
if builtinDefs[r.Name.C] != "" {
error_(r.Pos(), "use of builtin '%s' not in function call", fixGo(r.Name.C))
}
// Function is being used in an expression, to e.g. pass around a C function pointer.
// Create a new Name for this Ref which causes the variable to be declared in Go land.
fpName := "fp_" + r.Name.Go
name := f.Name[fpName]
if name == nil {
name = &Name{
Go: fpName,
C: r.Name.C,
Kind: "fpvar",
Type: &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("void*"), Go: ast.NewIdent("unsafe.Pointer")},
}
p.mangleName(name)
f.Name[fpName] = name
}
r.Name = name
// Rewrite into call to _Cgo_ptr to prevent assignments. The _Cgo_ptr
// function is defined in out.go and simply returns its argument. See
// issue 7757.
expr = &ast.CallExpr{
Fun: &ast.Ident{NamePos: (*r.Expr).Pos(), Name: "_Cgo_ptr"},
Args: []ast.Expr{ast.NewIdent(name.Mangle)},
}
} else if r.Name.Kind == "type" {
// Okay - might be new(T)
if r.Name.Type == nil {
error_(r.Pos(), "expression C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C)
break
}
expr = r.Name.Type.Go
} else if r.Name.Kind == "var" {
expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr}
}
case "selector":
if r.Name.Kind == "var" {
expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr}
} else {
error_(r.Pos(), "only C variables allowed in selector expression %s", fixGo(r.Name.Go))
}
case "type":
if r.Name.Kind != "type" {
error_(r.Pos(), "expression C.%s used as type", fixGo(r.Name.Go))
} else if r.Name.Type == nil {
// Use of C.enum_x, C.struct_x or C.union_x without C definition.
// GCC won't raise an error when using pointers to such unknown types.
error_(r.Pos(), "type C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C)
} else {
expr = r.Name.Type.Go
}
default:
if r.Name.Kind == "func" {
error_(r.Pos(), "must call C.%s", fixGo(r.Name.Go))
}
}
if *godefs {
// Substitute definition for mangled type name.
if id, ok := expr.(*ast.Ident); ok {
if t := typedef[id.Name]; t != nil {
expr = t.Go
}
if id.Name == r.Name.Mangle && r.Name.Const != "" {
expr = ast.NewIdent(r.Name.Const)
}
}
}
// Copy position information from old expr into new expr,
// in case expression being replaced is first on line.
// See golang.org/issue/6563.
pos := (*r.Expr).Pos()
switch x := expr.(type) {
case *ast.Ident:
expr = &ast.Ident{NamePos: pos, Name: x.Name}
}
*r.Expr = expr
}
// Remove functions only used as expressions, so their respective
// bridge functions are not generated.
for name, used := range functions {
if !used {
delete(f.Name, name)
}
}
}
// gccBaseCmd returns the start of the compiler command line.
// It uses $CC if set, or else $GCC, or else the compiler recorded
// during the initial build as defaultCC.
// defaultCC is defined in zdefaultcc.go, written by cmd/dist.
func (p *Package) gccBaseCmd() []string {
// Use $CC if set, since that's what the build uses.
if ret := strings.Fields(os.Getenv("CC")); len(ret) > 0 {
return ret
}
// Try $GCC if set, since that's what we used to use.
if ret := strings.Fields(os.Getenv("GCC")); len(ret) > 0 {
return ret
}
return strings.Fields(defaultCC)
}
// gccMachine returns the gcc -m flag to use, either "-m32", "-m64" or "-marm".
func (p *Package) gccMachine() []string {
switch goarch {
case "amd64":
return []string{"-m64"}
case "386":
return []string{"-m32"}
case "arm":
return []string{"-marm"} // not thumb
case "s390":
return []string{"-m31"}
case "s390x":
return []string{"-m64"}
case "mips64", "mips64le":
return []string{"-mabi=64"}
case "mips", "mipsle":
return []string{"-mabi=32"}
}
return nil
}
func gccTmp() string {
return *objDir + "_cgo_.o"
}
// gccCmd returns the gcc command line to use for compiling
// the input.
func (p *Package) gccCmd() []string {
c := append(p.gccBaseCmd(),
"-w", // no warnings
"-Wno-error", // warnings are not errors
"-o"+gccTmp(), // write object to tmp
"-gdwarf-2", // generate DWARF v2 debugging symbols
"-c", // do not link
"-xc", // input language is C
)
if p.GccIsClang {
c = append(c,
"-ferror-limit=0",
// Apple clang version 1.7 (tags/Apple/clang-77) (based on LLVM 2.9svn)
// doesn't have -Wno-unneeded-internal-declaration, so we need yet another
// flag to disable the warning. Yes, really good diagnostics, clang.
"-Wno-unknown-warning-option",
"-Wno-unneeded-internal-declaration",
"-Wno-unused-function",
"-Qunused-arguments",
// Clang embeds prototypes for some builtin functions,
// like malloc and calloc, but all size_t parameters are
// incorrectly typed unsigned long. We work around that
// by disabling the builtin functions (this is safe as
// it won't affect the actual compilation of the C code).
// See: https://golang.org/issue/6506.
"-fno-builtin",
)
}
c = append(c, p.GccOptions...)
c = append(c, p.gccMachine()...)
c = append(c, "-") //read input from standard input
return c
}
// gccDebug runs gcc -gdwarf-2 over the C program stdin and
// returns the corresponding DWARF data and, if present, debug data block.
func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int64, floats []float64, strs []string) {
runGcc(stdin, p.gccCmd())
isDebugInts := func(s string) bool {
// Some systems use leading _ to denote non-assembly symbols.
return s == "__cgodebug_ints" || s == "___cgodebug_ints"
}
isDebugFloats := func(s string) bool {
// Some systems use leading _ to denote non-assembly symbols.
return s == "__cgodebug_floats" || s == "___cgodebug_floats"
}
indexOfDebugStr := func(s string) int {
// Some systems use leading _ to denote non-assembly symbols.
if strings.HasPrefix(s, "___") {
s = s[1:]
}
if strings.HasPrefix(s, "__cgodebug_str__") {
if n, err := strconv.Atoi(s[len("__cgodebug_str__"):]); err == nil {
return n
}
}
return -1
}
indexOfDebugStrlen := func(s string) int {
// Some systems use leading _ to denote non-assembly symbols.
if strings.HasPrefix(s, "___") {
s = s[1:]
}
if strings.HasPrefix(s, "__cgodebug_strlen__") {
if n, err := strconv.Atoi(s[len("__cgodebug_strlen__"):]); err == nil {
return n
}
}
return -1
}
strs = make([]string, nnames)
strdata := make(map[int]string, nnames)
strlens := make(map[int]int, nnames)
buildStrings := func() {
for n, strlen := range strlens {
data := strdata[n]
if len(data) <= strlen {
fatalf("invalid string literal")
}
strs[n] = string(data[:strlen])
}
}
if f, err := macho.Open(gccTmp()); err == nil {
defer f.Close()
d, err := f.DWARF()
if err != nil {
fatalf("cannot load DWARF output from %s: %v", gccTmp(), err)
}
bo := f.ByteOrder
if f.Symtab != nil {
for i := range f.Symtab.Syms {
s := &f.Symtab.Syms[i]
switch {
case isDebugInts(s.Name):
// Found it. Now find data section.
if i := int(s.Sect) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
ints = make([]int64, len(data)/8)
for i := range ints {
ints[i] = int64(bo.Uint64(data[i*8:]))
}
}
}
}
case isDebugFloats(s.Name):
// Found it. Now find data section.
if i := int(s.Sect) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
floats = make([]float64, len(data)/8)
for i := range floats {
floats[i] = math.Float64frombits(bo.Uint64(data[i*8:]))
}
}
}
}
default:
if n := indexOfDebugStr(s.Name); n != -1 {
// Found it. Now find data section.
if i := int(s.Sect) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
strdata[n] = string(data)
}
}
}
break
}
if n := indexOfDebugStrlen(s.Name); n != -1 {
// Found it. Now find data section.
if i := int(s.Sect) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
strlen := bo.Uint64(data[:8])
if strlen > (1<<(uint(p.IntSize*8)-1) - 1) { // greater than MaxInt?
fatalf("string literal too big")
}
strlens[n] = int(strlen)
}
}
}
break
}
}
}
buildStrings()
}
return d, ints, floats, strs
}
if f, err := elf.Open(gccTmp()); err == nil {
defer f.Close()
d, err := f.DWARF()
if err != nil {
fatalf("cannot load DWARF output from %s: %v", gccTmp(), err)
}
bo := f.ByteOrder
symtab, err := f.Symbols()
if err == nil {
for i := range symtab {
s := &symtab[i]
switch {
case isDebugInts(s.Name):
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
ints = make([]int64, len(data)/8)
for i := range ints {
ints[i] = int64(bo.Uint64(data[i*8:]))
}
}
}
}
case isDebugFloats(s.Name):
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
floats = make([]float64, len(data)/8)
for i := range floats {
floats[i] = math.Float64frombits(bo.Uint64(data[i*8:]))
}
}
}
}
default:
if n := indexOfDebugStr(s.Name); n != -1 {
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
strdata[n] = string(data)
}
}
}
break
}
if n := indexOfDebugStrlen(s.Name); n != -1 {
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
strlen := bo.Uint64(data[:8])
if strlen > (1<<(uint(p.IntSize*8)-1) - 1) { // greater than MaxInt?
fatalf("string literal too big")
}
strlens[n] = int(strlen)
}
}
}
break
}
}
}
buildStrings()
}
return d, ints, floats, strs
}
if f, err := pe.Open(gccTmp()); err == nil {
defer f.Close()
d, err := f.DWARF()
if err != nil {
fatalf("cannot load DWARF output from %s: %v", gccTmp(), err)
}
bo := binary.LittleEndian
for _, s := range f.Symbols {
switch {
case isDebugInts(s.Name):
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
ints = make([]int64, len(data)/8)
for i := range ints {
ints[i] = int64(bo.Uint64(data[i*8:]))
}
}
}
}
case isDebugFloats(s.Name):
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
floats = make([]float64, len(data)/8)
for i := range floats {
floats[i] = math.Float64frombits(bo.Uint64(data[i*8:]))
}
}
}
}
default:
if n := indexOfDebugStr(s.Name); n != -1 {
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
strdata[n] = string(data)
}
}
}
break
}
if n := indexOfDebugStrlen(s.Name); n != -1 {
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
strlen := bo.Uint64(data[:8])
if strlen > (1<<(uint(p.IntSize*8)-1) - 1) { // greater than MaxInt?
fatalf("string literal too big")
}
strlens[n] = int(strlen)
}
}
}
break
}
}
}
buildStrings()
return d, ints, floats, strs
}
fatalf("cannot parse gcc output %s as ELF, Mach-O, PE object", gccTmp())
panic("not reached")
}
// gccDefines runs gcc -E -dM -xc - over the C program stdin
// and returns the corresponding standard output, which is the
// #defines that gcc encountered while processing the input
// and its included files.
func (p *Package) gccDefines(stdin []byte) string {
base := append(p.gccBaseCmd(), "-E", "-dM", "-xc")
base = append(base, p.gccMachine()...)
stdout, _ := runGcc(stdin, append(append(base, p.GccOptions...), "-"))
return stdout
}
// gccErrors runs gcc over the C program stdin and returns
// the errors that gcc prints. That is, this function expects
// gcc to fail.
func (p *Package) gccErrors(stdin []byte) string {
// TODO(rsc): require failure
args := p.gccCmd()
// Optimization options can confuse the error messages; remove them.
nargs := make([]string, 0, len(args))
for _, arg := range args {
if !strings.HasPrefix(arg, "-O") {
nargs = append(nargs, arg)
}
}
if *debugGcc {
fmt.Fprintf(os.Stderr, "$ %s <<EOF\n", strings.Join(nargs, " "))
os.Stderr.Write(stdin)
fmt.Fprint(os.Stderr, "EOF\n")
}
stdout, stderr, _ := run(stdin, nargs)
if *debugGcc {
os.Stderr.Write(stdout)
os.Stderr.Write(stderr)
}
return string(stderr)
}
// runGcc runs the gcc command line args with stdin on standard input.
// If the command exits with a non-zero exit status, runGcc prints
// details about what was run and exits.
// Otherwise runGcc returns the data written to standard output and standard error.
// Note that for some of the uses we expect useful data back
// on standard error, but for those uses gcc must still exit 0.
func runGcc(stdin []byte, args []string) (string, string) {
if *debugGcc {
fmt.Fprintf(os.Stderr, "$ %s <<EOF\n", strings.Join(args, " "))
os.Stderr.Write(stdin)
fmt.Fprint(os.Stderr, "EOF\n")
}
stdout, stderr, ok := run(stdin, args)
if *debugGcc {
os.Stderr.Write(stdout)
os.Stderr.Write(stderr)
}
if !ok {
os.Stderr.Write(stderr)
os.Exit(2)
}
return string(stdout), string(stderr)
}
// A typeConv is a translator from dwarf types to Go types
// with equivalent memory layout.
type typeConv struct {
// Cache of already-translated or in-progress types.
m map[dwarf.Type]*Type
// Map from types to incomplete pointers to those types.
ptrs map[dwarf.Type][]*Type
// Keys of ptrs in insertion order (deterministic worklist)
ptrKeys []dwarf.Type
// Predeclared types.
bool ast.Expr
byte ast.Expr // denotes padding
int8, int16, int32, int64 ast.Expr
uint8, uint16, uint32, uint64, uintptr ast.Expr
float32, float64 ast.Expr
complex64, complex128 ast.Expr
void ast.Expr
string ast.Expr
goVoid ast.Expr // _Ctype_void, denotes C's void
goVoidPtr ast.Expr // unsafe.Pointer or *byte
ptrSize int64
intSize int64
}
var tagGen int
var typedef = make(map[string]*Type)
var goIdent = make(map[string]*ast.Ident)
// unionWithPointer is true for a Go type that represents a C union (or class)
// that may contain a pointer. This is used for cgo pointer checking.
var unionWithPointer = make(map[ast.Expr]bool)
func (c *typeConv) Init(ptrSize, intSize int64) {
c.ptrSize = ptrSize
c.intSize = intSize
c.m = make(map[dwarf.Type]*Type)
c.ptrs = make(map[dwarf.Type][]*Type)
c.bool = c.Ident("bool")
c.byte = c.Ident("byte")
c.int8 = c.Ident("int8")
c.int16 = c.Ident("int16")
c.int32 = c.Ident("int32")
c.int64 = c.Ident("int64")
c.uint8 = c.Ident("uint8")
c.uint16 = c.Ident("uint16")
c.uint32 = c.Ident("uint32")
c.uint64 = c.Ident("uint64")
c.uintptr = c.Ident("uintptr")
c.float32 = c.Ident("float32")
c.float64 = c.Ident("float64")
c.complex64 = c.Ident("complex64")
c.complex128 = c.Ident("complex128")
c.void = c.Ident("void")
c.string = c.Ident("string")
c.goVoid = c.Ident("_Ctype_void")
// Normally cgo translates void* to unsafe.Pointer,
// but for historical reasons -godefs uses *byte instead.
if *godefs {
c.goVoidPtr = &ast.StarExpr{X: c.byte}
} else {
c.goVoidPtr = c.Ident("unsafe.Pointer")
}
}
// base strips away qualifiers and typedefs to get the underlying type
func base(dt dwarf.Type) dwarf.Type {
for {
if d, ok := dt.(*dwarf.QualType); ok {
dt = d.Type
continue
}
if d, ok := dt.(*dwarf.TypedefType); ok {
dt = d.Type
continue
}
break
}
return dt
}
// unqual strips away qualifiers from a DWARF type.
// In general we don't care about top-level qualifiers.
func unqual(dt dwarf.Type) dwarf.Type {
for {
if d, ok := dt.(*dwarf.QualType); ok {
dt = d.Type
} else {
break
}
}
return dt
}
// Map from dwarf text names to aliases we use in package "C".
var dwarfToName = map[string]string{
"long int": "long",
"long unsigned int": "ulong",
"unsigned int": "uint",
"short unsigned int": "ushort",
"unsigned short": "ushort", // Used by Clang; issue 13129.
"short int": "short",
"long long int": "longlong",
"long long unsigned int": "ulonglong",
"signed char": "schar",
"unsigned char": "uchar",
}
const signedDelta = 64
// String returns the current type representation. Format arguments
// are assembled within this method so that any changes in mutable
// values are taken into account.
func (tr *TypeRepr) String() string {
if len(tr.Repr) == 0 {
return ""
}
if len(tr.FormatArgs) == 0 {
return tr.Repr
}
return fmt.Sprintf(tr.Repr, tr.FormatArgs...)
}
// Empty reports whether the result of String would be "".
func (tr *TypeRepr) Empty() bool {
return len(tr.Repr) == 0
}
// Set modifies the type representation.
// If fargs are provided, repr is used as a format for fmt.Sprintf.
// Otherwise, repr is used unprocessed as the type representation.
func (tr *TypeRepr) Set(repr string, fargs ...interface{}) {
tr.Repr = repr
tr.FormatArgs = fargs
}
// FinishType completes any outstanding type mapping work.
// In particular, it resolves incomplete pointer types.
func (c *typeConv) FinishType(pos token.Pos) {
// Completing one pointer type might produce more to complete.
// Keep looping until they're all done.
for len(c.ptrKeys) > 0 {
dtype := c.ptrKeys[0]
c.ptrKeys = c.ptrKeys[1:]
// Note Type might invalidate c.ptrs[dtype].
t := c.Type(dtype, pos)
for _, ptr := range c.ptrs[dtype] {
ptr.Go.(*ast.StarExpr).X = t.Go
ptr.C.Set("%s*", t.C)
}
c.ptrs[dtype] = nil // retain the map key
}
}
// Type returns a *Type with the same memory layout as
// dtype when used as the type of a variable or a struct field.
func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
if t, ok := c.m[dtype]; ok {
if t.Go == nil {
fatalf("%s: type conversion loop at %s", lineno(pos), dtype)
}
return t
}
t := new(Type)
t.Size = dtype.Size() // note: wrong for array of pointers, corrected below
t.Align = -1
t.C = &TypeRepr{Repr: dtype.Common().Name}
c.m[dtype] = t
switch dt := dtype.(type) {
default:
fatalf("%s: unexpected type: %s", lineno(pos), dtype)
case *dwarf.AddrType:
if t.Size != c.ptrSize {
fatalf("%s: unexpected: %d-byte address type - %s", lineno(pos), t.Size, dtype)
}
t.Go = c.uintptr
t.Align = t.Size
case *dwarf.ArrayType:
if dt.StrideBitSize > 0 {
// Cannot represent bit-sized elements in Go.
t.Go = c.Opaque(t.Size)
break
}
count := dt.Count
if count == -1 {
// Indicates flexible array member, which Go doesn't support.
// Translate to zero-length array instead.
count = 0
}
sub := c.Type(dt.Type, pos)
t.Align = sub.Align
t.Go = &ast.ArrayType{
Len: c.intExpr(count),
Elt: sub.Go,
}
// Recalculate t.Size now that we know sub.Size.
t.Size = count * sub.Size
t.C.Set("__typeof__(%s[%d])", sub.C, dt.Count)
case *dwarf.BoolType:
t.Go = c.bool
t.Align = 1
case *dwarf.CharType:
if t.Size != 1 {
fatalf("%s: unexpected: %d-byte char type - %s", lineno(pos), t.Size, dtype)
}
t.Go = c.int8
t.Align = 1
case *dwarf.EnumType:
if t.Align = t.Size; t.Align >= c.ptrSize {
t.Align = c.ptrSize
}
t.C.Set("enum " + dt.EnumName)
signed := 0
t.EnumValues = make(map[string]int64)
for _, ev := range dt.Val {
t.EnumValues[ev.Name] = ev.Val
if ev.Val < 0 {
signed = signedDelta
}
}
switch t.Size + int64(signed) {
default:
fatalf("%s: unexpected: %d-byte enum type - %s", lineno(pos), t.Size, dtype)
case 1:
t.Go = c.uint8
case 2:
t.Go = c.uint16
case 4:
t.Go = c.uint32
case 8:
t.Go = c.uint64
case 1 + signedDelta:
t.Go = c.int8
case 2 + signedDelta:
t.Go = c.int16
case 4 + signedDelta:
t.Go = c.int32
case 8 + signedDelta:
t.Go = c.int64
}
case *dwarf.FloatType:
switch t.Size {
default:
fatalf("%s: unexpected: %d-byte float type - %s", lineno(pos), t.Size, dtype)
case 4:
t.Go = c.float32
case 8:
t.Go = c.float64
}
if t.Align = t.Size; t.Align >= c.ptrSize {
t.Align = c.ptrSize
}
case *dwarf.ComplexType:
switch t.Size {
default:
fatalf("%s: unexpected: %d-byte complex type - %s", lineno(pos), t.Size, dtype)
case 8:
t.Go = c.complex64
case 16:
t.Go = c.complex128
}
if t.Align = t.Size / 2; t.Align >= c.ptrSize {
t.Align = c.ptrSize
}
case *dwarf.FuncType:
// No attempt at translation: would enable calls
// directly between worlds, but we need to moderate those.
t.Go = c.uintptr
t.Align = c.ptrSize
case *dwarf.IntType:
if dt.BitSize > 0 {
fatalf("%s: unexpected: %d-bit int type - %s", lineno(pos), dt.BitSize, dtype)
}
switch t.Size {
default:
fatalf("%s: unexpected: %d-byte int type - %s", lineno(pos), t.Size, dtype)
case 1:
t.Go = c.int8
case 2:
t.Go = c.int16
case 4:
t.Go = c.int32
case 8:
t.Go = c.int64
case 16:
t.Go = &ast.ArrayType{
Len: c.intExpr(t.Size),
Elt: c.uint8,
}
}
if t.Align = t.Size; t.Align >= c.ptrSize {
t.Align = c.ptrSize
}
case *dwarf.PtrType:
// Clang doesn't emit DW_AT_byte_size for pointer types.
if t.Size != c.ptrSize && t.Size != -1 {
fatalf("%s: unexpected: %d-byte pointer type - %s", lineno(pos), t.Size, dtype)
}
t.Size = c.ptrSize
t.Align = c.ptrSize
if _, ok := base(dt.Type).(*dwarf.VoidType); ok {
t.Go = c.goVoidPtr
t.C.Set("void*")
dq := dt.Type
for {
if d, ok := dq.(*dwarf.QualType); ok {
t.C.Set(d.Qual + " " + t.C.String())
dq = d.Type
} else {
break
}
}
break
}
// Placeholder initialization; completed in FinishType.
t.Go = &ast.StarExpr{}
t.C.Set("<incomplete>*")
if _, ok := c.ptrs[dt.Type]; !ok {
c.ptrKeys = append(c.ptrKeys, dt.Type)
}
c.ptrs[dt.Type] = append(c.ptrs[dt.Type], t)
case *dwarf.QualType:
t1 := c.Type(dt.Type, pos)
t.Size = t1.Size
t.Align = t1.Align
t.Go = t1.Go
if unionWithPointer[t1.Go] {
unionWithPointer[t.Go] = true
}
t.EnumValues = nil
t.Typedef = ""
t.C.Set("%s "+dt.Qual, t1.C)
return t
case *dwarf.StructType:
// Convert to Go struct, being careful about alignment.
// Have to give it a name to simulate C "struct foo" references.
tag := dt.StructName
if dt.ByteSize < 0 && tag == "" { // opaque unnamed struct - should not be possible
break
}
if tag == "" {
tag = "__" + strconv.Itoa(tagGen)
tagGen++
} else if t.C.Empty() {
t.C.Set(dt.Kind + " " + tag)
}
name := c.Ident("_Ctype_" + dt.Kind + "_" + tag)
t.Go = name // publish before recursive calls
goIdent[name.Name] = name
if dt.ByteSize < 0 {
// Size calculation in c.Struct/c.Opaque will die with size=-1 (unknown),
// so execute the basic things that the struct case would do
// other than try to determine a Go representation.
tt := *t
tt.C = &TypeRepr{"%s %s", []interface{}{dt.Kind, tag}}
tt.Go = c.Ident("struct{}")
typedef[name.Name] = &tt
break
}
switch dt.Kind {
case "class", "union":
t.Go = c.Opaque(t.Size)
if c.dwarfHasPointer(dt, pos) {
unionWithPointer[t.Go] = true
}
if t.C.Empty() {
t.C.Set("__typeof__(unsigned char[%d])", t.Size)
}
t.Align = 1 // TODO: should probably base this on field alignment.
typedef[name.Name] = t
case "struct":
g, csyntax, align := c.Struct(dt, pos)
if t.C.Empty() {
t.C.Set(csyntax)
}
t.Align = align
tt := *t
if tag != "" {
tt.C = &TypeRepr{"struct %s", []interface{}{tag}}
}
tt.Go = g
typedef[name.Name] = &tt
}
case *dwarf.TypedefType:
// Record typedef for printing.
if dt.Name == "_GoString_" {
// Special C name for Go string type.
// Knows string layout used by compilers: pointer plus length,
// which rounds up to 2 pointers after alignment.
t.Go = c.string
t.Size = c.ptrSize * 2
t.Align = c.ptrSize
break
}
if dt.Name == "_GoBytes_" {
// Special C name for Go []byte type.
// Knows slice layout used by compilers: pointer, length, cap.
t.Go = c.Ident("[]byte")
t.Size = c.ptrSize + 4 + 4
t.Align = c.ptrSize
break
}
name := c.Ident("_Ctype_" + dt.Name)
goIdent[name.Name] = name
sub := c.Type(dt.Type, pos)
t.Go = name
if unionWithPointer[sub.Go] {
unionWithPointer[t.Go] = true
}
t.Size = sub.Size
t.Align = sub.Align
oldType := typedef[name.Name]
if oldType == nil {
tt := *t
tt.Go = sub.Go
typedef[name.Name] = &tt
}
// If sub.Go.Name is "_Ctype_struct_foo" or "_Ctype_union_foo" or "_Ctype_class_foo",
// use that as the Go form for this typedef too, so that the typedef will be interchangeable
// with the base type.
// In -godefs mode, do this for all typedefs.
if isStructUnionClass(sub.Go) || *godefs {
t.Go = sub.Go
if isStructUnionClass(sub.Go) {
// Use the typedef name for C code.
typedef[sub.Go.(*ast.Ident).Name].C = t.C
}
// If we've seen this typedef before, and it
// was an anonymous struct/union/class before
// too, use the old definition.
// TODO: it would be safer to only do this if
// we verify that the types are the same.
if oldType != nil && isStructUnionClass(oldType.Go) {
t.Go = oldType.Go
}
}
case *dwarf.UcharType:
if t.Size != 1 {
fatalf("%s: unexpected: %d-byte uchar type - %s", lineno(pos), t.Size, dtype)
}
t.Go = c.uint8
t.Align = 1
case *dwarf.UintType:
if dt.BitSize > 0 {
fatalf("%s: unexpected: %d-bit uint type - %s", lineno(pos), dt.BitSize, dtype)
}
switch t.Size {
default:
fatalf("%s: unexpected: %d-byte uint type - %s", lineno(pos), t.Size, dtype)
case 1:
t.Go = c.uint8
case 2:
t.Go = c.uint16
case 4:
t.Go = c.uint32
case 8:
t.Go = c.uint64
case 16:
t.Go = &ast.ArrayType{
Len: c.intExpr(t.Size),
Elt: c.uint8,
}
}
if t.Align = t.Size; t.Align >= c.ptrSize {
t.Align = c.ptrSize
}
case *dwarf.VoidType:
t.Go = c.goVoid
t.C.Set("void")
t.Align = 1
}
switch dtype.(type) {
case *dwarf.AddrType, *dwarf.BoolType, *dwarf.CharType, *dwarf.ComplexType, *dwarf.IntType, *dwarf.FloatType, *dwarf.UcharType, *dwarf.UintType:
s := dtype.Common().Name
if s != "" {
if ss, ok := dwarfToName[s]; ok {
s = ss
}
s = strings.Join(strings.Split(s, " "), "") // strip spaces
name := c.Ident("_Ctype_" + s)
tt := *t
typedef[name.Name] = &tt
if !*godefs {
t.Go = name
}
}
}
if t.Size < 0 {
// Unsized types are [0]byte, unless they're typedefs of other types
// or structs with tags.
// if so, use the name we've already defined.
t.Size = 0
switch dt := dtype.(type) {
case *dwarf.TypedefType:
// ok
case *dwarf.StructType:
if dt.StructName != "" {
break
}
t.Go = c.Opaque(0)
default:
t.Go = c.Opaque(0)
}
if t.C.Empty() {
t.C.Set("void")
}
}
if t.C.Empty() {
fatalf("%s: internal error: did not create C name for %s", lineno(pos), dtype)
}
return t
}
// isStructUnionClass reports whether the type described by the Go syntax x
// is a struct, union, or class with a tag.
func isStructUnionClass(x ast.Expr) bool {
id, ok := x.(*ast.Ident)
if !ok {
return false
}
name := id.Name
return strings.HasPrefix(name, "_Ctype_struct_") ||
strings.HasPrefix(name, "_Ctype_union_") ||
strings.HasPrefix(name, "_Ctype_class_")
}
// FuncArg returns a Go type with the same memory layout as
// dtype when used as the type of a C function argument.
func (c *typeConv) FuncArg(dtype dwarf.Type, pos token.Pos) *Type {
t := c.Type(unqual(dtype), pos)
switch dt := dtype.(type) {
case *dwarf.ArrayType:
// Arrays are passed implicitly as pointers in C.
// In Go, we must be explicit.
tr := &TypeRepr{}
tr.Set("%s*", t.C)
return &Type{
Size: c.ptrSize,
Align: c.ptrSize,
Go: &ast.StarExpr{X: t.Go},
C: tr,
}
case *dwarf.TypedefType:
// C has much more relaxed rules than Go for
// implicit type conversions. When the parameter
// is type T defined as *X, simulate a little of the
// laxness of C by making the argument *X instead of T.
if ptr, ok := base(dt.Type).(*dwarf.PtrType); ok {
// Unless the typedef happens to point to void* since
// Go has special rules around using unsafe.Pointer.
if _, void := base(ptr.Type).(*dwarf.VoidType); void {
break
}
t = c.Type(ptr, pos)
if t == nil {
return nil
}
// For a struct/union/class, remember the C spelling,
// in case it has __attribute__((unavailable)).
// See issue 2888.
if isStructUnionClass(t.Go) {
t.Typedef = dt.Name
}
}
}
return t
}
// FuncType returns the Go type analogous to dtype.
// There is no guarantee about matching memory layout.
func (c *typeConv) FuncType(dtype *dwarf.FuncType, pos token.Pos) *FuncType {
p := make([]*Type, len(dtype.ParamType))
gp := make([]*ast.Field, len(dtype.ParamType))
for i, f := range dtype.ParamType {
// gcc's DWARF generator outputs a single DotDotDotType parameter for
// function pointers that specify no parameters (e.g. void
// (*__cgo_0)()). Treat this special case as void. This case is
// invalid according to ISO C anyway (i.e. void (*__cgo_1)(...) is not
// legal).
if _, ok := f.(*dwarf.DotDotDotType); ok && i == 0 {
p, gp = nil, nil
break
}
p[i] = c.FuncArg(f, pos)
gp[i] = &ast.Field{Type: p[i].Go}
}
var r *Type
var gr []*ast.Field
if _, ok := base(dtype.ReturnType).(*dwarf.VoidType); ok {
gr = []*ast.Field{{Type: c.goVoid}}
} else if dtype.ReturnType != nil {
r = c.Type(unqual(dtype.ReturnType), pos)
gr = []*ast.Field{{Type: r.Go}}
}
return &FuncType{
Params: p,
Result: r,
Go: &ast.FuncType{
Params: &ast.FieldList{List: gp},
Results: &ast.FieldList{List: gr},
},
}
}
// Identifier
func (c *typeConv) Ident(s string) *ast.Ident {
return ast.NewIdent(s)
}
// Opaque type of n bytes.
func (c *typeConv) Opaque(n int64) ast.Expr {
return &ast.ArrayType{
Len: c.intExpr(n),
Elt: c.byte,
}
}
// Expr for integer n.
func (c *typeConv) intExpr(n int64) ast.Expr {
return &ast.BasicLit{
Kind: token.INT,
Value: strconv.FormatInt(n, 10),
}
}
// Add padding of given size to fld.
func (c *typeConv) pad(fld []*ast.Field, sizes []int64, size int64) ([]*ast.Field, []int64) {
n := len(fld)
fld = fld[0 : n+1]
fld[n] = &ast.Field{Names: []*ast.Ident{c.Ident("_")}, Type: c.Opaque(size)}
sizes = sizes[0 : n+1]
sizes[n] = size
return fld, sizes
}
// Struct conversion: return Go and (gc) C syntax for type.
func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.StructType, csyntax string, align int64) {
// Minimum alignment for a struct is 1 byte.
align = 1
var buf bytes.Buffer
buf.WriteString("struct {")
fld := make([]*ast.Field, 0, 2*len(dt.Field)+1) // enough for padding around every field
sizes := make([]int64, 0, 2*len(dt.Field)+1)
off := int64(0)
// Rename struct fields that happen to be named Go keywords into
// _{keyword}. Create a map from C ident -> Go ident. The Go ident will
// be mangled. Any existing identifier that already has the same name on
// the C-side will cause the Go-mangled version to be prefixed with _.
// (e.g. in a struct with fields '_type' and 'type', the latter would be
// rendered as '__type' in Go).
ident := make(map[string]string)
used := make(map[string]bool)
for _, f := range dt.Field {
ident[f.Name] = f.Name
used[f.Name] = true
}
if !*godefs {
for cid, goid := range ident {
if token.Lookup(goid).IsKeyword() {
// Avoid keyword
goid = "_" + goid
// Also avoid existing fields
for _, exist := used[goid]; exist; _, exist = used[goid] {
goid = "_" + goid
}
used[goid] = true
ident[cid] = goid
}
}
}
anon := 0
for _, f := range dt.Field {
if f.ByteOffset > off {
fld, sizes = c.pad(fld, sizes, f.ByteOffset-off)
off = f.ByteOffset
}
name := f.Name
ft := f.Type
// In godefs mode, if this field is a C11
// anonymous union then treat the first field in the
// union as the field in the struct. This handles
// cases like the glibc <sys/resource.h> file; see
// issue 6677.
if *godefs {
if st, ok := f.Type.(*dwarf.StructType); ok && name == "" && st.Kind == "union" && len(st.Field) > 0 && !used[st.Field[0].Name] {
name = st.Field[0].Name
ident[name] = name
ft = st.Field[0].Type
}
}
// TODO: Handle fields that are anonymous structs by
// promoting the fields of the inner struct.
t := c.Type(ft, pos)
tgo := t.Go
size := t.Size
talign := t.Align
if f.BitSize > 0 {
if f.BitSize%8 != 0 {
continue
}
size = f.BitSize / 8
name := tgo.(*ast.Ident).String()
if strings.HasPrefix(name, "int") {
name = "int"
} else {
name = "uint"
}
tgo = ast.NewIdent(name + fmt.Sprint(f.BitSize))
talign = size
}
if talign > 0 && f.ByteOffset%talign != 0 {
// Drop misaligned fields, the same way we drop integer bit fields.
// The goal is to make available what can be made available.
// Otherwise one bad and unneeded field in an otherwise okay struct
// makes the whole program not compile. Much of the time these
// structs are in system headers that cannot be corrected.
continue
}
n := len(fld)
fld = fld[0 : n+1]
if name == "" {
name = fmt.Sprintf("anon%d", anon)
anon++
ident[name] = name
}
fld[n] = &ast.Field{Names: []*ast.Ident{c.Ident(ident[name])}, Type: tgo}
sizes = sizes[0 : n+1]
sizes[n] = size
off += size
buf.WriteString(t.C.String())
buf.WriteString(" ")
buf.WriteString(name)
buf.WriteString("; ")
if talign > align {
align = talign
}
}
if off < dt.ByteSize {
fld, sizes = c.pad(fld, sizes, dt.ByteSize-off)
off = dt.ByteSize
}
// If the last field in a non-zero-sized struct is zero-sized
// the compiler is going to pad it by one (see issue 9401).
// We can't permit that, because then the size of the Go
// struct will not be the same as the size of the C struct.
// Our only option in such a case is to remove the field,
// which means that it cannot be referenced from Go.
for off > 0 && sizes[len(sizes)-1] == 0 {
n := len(sizes)
fld = fld[0 : n-1]
sizes = sizes[0 : n-1]
}
if off != dt.ByteSize {
fatalf("%s: struct size calculation error off=%d bytesize=%d", lineno(pos), off, dt.ByteSize)
}
buf.WriteString("}")
csyntax = buf.String()
if *godefs {
godefsFields(fld)
}
expr = &ast.StructType{Fields: &ast.FieldList{List: fld}}
return
}
// dwarfHasPointer returns whether the DWARF type dt contains a pointer.
func (c *typeConv) dwarfHasPointer(dt dwarf.Type, pos token.Pos) bool {
switch dt := dt.(type) {
default:
fatalf("%s: unexpected type: %s", lineno(pos), dt)
return false
case *dwarf.AddrType, *dwarf.BoolType, *dwarf.CharType, *dwarf.EnumType,
*dwarf.FloatType, *dwarf.ComplexType, *dwarf.FuncType,
*dwarf.IntType, *dwarf.UcharType, *dwarf.UintType, *dwarf.VoidType:
return false
case *dwarf.ArrayType:
return c.dwarfHasPointer(dt.Type, pos)
case *dwarf.PtrType:
return true
case *dwarf.QualType:
return c.dwarfHasPointer(dt.Type, pos)
case *dwarf.StructType:
for _, f := range dt.Field {
if c.dwarfHasPointer(f.Type, pos) {
return true
}
}
return false
case *dwarf.TypedefType:
if dt.Name == "_GoString_" || dt.Name == "_GoBytes_" {
return true
}
return c.dwarfHasPointer(dt.Type, pos)
}
}
func upper(s string) string {
if s == "" {
return ""
}
r, size := utf8.DecodeRuneInString(s)
if r == '_' {
return "X" + s
}
return string(unicode.ToUpper(r)) + s[size:]
}
// godefsFields rewrites field names for use in Go or C definitions.
// It strips leading common prefixes (like tv_ in tv_sec, tv_usec)
// converts names to upper case, and rewrites _ into Pad_godefs_n,
// so that all fields are exported.
func godefsFields(fld []*ast.Field) {
prefix := fieldPrefix(fld)
npad := 0
for _, f := range fld {
for _, n := range f.Names {
if n.Name != prefix {
n.Name = strings.TrimPrefix(n.Name, prefix)
}
if n.Name == "_" {
// Use exported name instead.
n.Name = "Pad_cgo_" + strconv.Itoa(npad)
npad++
}
n.Name = upper(n.Name)
}
}
}
// fieldPrefix returns the prefix that should be removed from all the
// field names when generating the C or Go code. For generated
// C, we leave the names as is (tv_sec, tv_usec), since that's what
// people are used to seeing in C. For generated Go code, such as
// package syscall's data structures, we drop a common prefix
// (so sec, usec, which will get turned into Sec, Usec for exporting).
func fieldPrefix(fld []*ast.Field) string {
prefix := ""
for _, f := range fld {
for _, n := range f.Names {
// Ignore field names that don't have the prefix we're
// looking for. It is common in C headers to have fields
// named, say, _pad in an otherwise prefixed header.
// If the struct has 3 fields tv_sec, tv_usec, _pad1, then we
// still want to remove the tv_ prefix.
// The check for "orig_" here handles orig_eax in the
// x86 ptrace register sets, which otherwise have all fields
// with reg_ prefixes.
if strings.HasPrefix(n.Name, "orig_") || strings.HasPrefix(n.Name, "_") {
continue
}
i := strings.Index(n.Name, "_")
if i < 0 {
continue
}
if prefix == "" {
prefix = n.Name[:i+1]
} else if prefix != n.Name[:i+1] {
return ""
}
}
}
return prefix
}
| [
"\"CC\"",
"\"GCC\""
] | [] | [
"GCC",
"CC"
] | [] | ["GCC", "CC"] | go | 2 | 0 | |
app.py | """
First, a few callback functions are defined. Then, those functions are passed to
the Dispatcher and registered at their respective places.
Then, the bot is started and runs until we press Ctrl-C on the command line.
Usage:
Example of a bot-user conversation using ConversationHandler.
Send /start to initiate the conversation.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
import logging
import os
from typing import Dict
from ctlpe_demo_bot.credentials import bot_token, url
from telegram import ReplyKeyboardMarkup, Update, ReplyKeyboardRemove
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
CallbackContext,
)
PORT = int(os.environ.get('PORT', 8443))
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
)
logger = logging.getLogger(__name__)
CHOOSING, TYPING_REPLY, TYPING_CHOICE = range(3)
reply_keyboard = [
['Age', 'Favourite colour'],
['Number of siblings', 'Something else...'],
['Done'],
]
markup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)
def facts_to_str(user_data: Dict[str, str]) -> str:
facts = list()
for key, value in user_data.items():
facts.append(f'{key} - {value}')
return "\n".join(facts).join(['\n', '\n'])
def start(update: Update, _: CallbackContext) -> None:
"""Send a message when the command /start is issued."""
update.message.reply_text(
"Hi! I am a work in progress bot. My job is to help this awesome CTLPE Intake 5 group, "
"especially with deadlines. Type /deadlines to start. For list of supported commands, type /help.",
# reply_markup=markup,
)
def help_command(update: Update, _: CallbackContext) -> None:
"""Send a message when the command /help is issued."""
update.message.reply_text("These are the list of commands supported. \n\n /deadlines. \n\n "
"Hey! I am still being enhanced, more features to come...!")
def deadlines_command(update: Update, _: CallbackContext) -> None:
"""Produce a list of upcoming deadlines"""
update.message.reply_text("Polymall Module 1.10 - 4 Jun 2359 \n"
"Lesson Plan submission - end of Sem 1\n")
def main() -> None:
# Create the Updater and pass it your bot's token.
updater = Updater(bot_token)
# Get the dispatcher to register handlers
dispatcher = updater.dispatcher
# dispatcher.add_handler(conv_handler)
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("help", help_command))
dispatcher.add_handler(CommandHandler("deadlines", deadlines_command))
# Start the Bot
updater.start_webhook(listen="0.0.0.0",
port=int(PORT),
url_path=bot_token,
webhook_url=url + bot_token)
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
| [] | [] | [
"PORT"
] | [] | ["PORT"] | python | 1 | 0 | |
registry/storage/driver/s3-aws/s3.go | // Package s3 provides a storagedriver.StorageDriver implementation to
// store blobs in Amazon S3 cloud storage.
//
// This package leverages the official aws client library for interfacing with
// S3.
//
// Because S3 is a key, value store the Stat call does not support last modification
// time for directories (directories are an abstraction for key, value stores)
//
// Keep in mind that S3 guarantees only read-after-write consistency for new
// objects, but no read-after-update or list-after-write consistency.
package s3
import (
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"os"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/cenkalti/backoff/v4"
"github.com/hashicorp/go-multierror"
log "github.com/sirupsen/logrus"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/sts"
dcontext "github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/base"
"github.com/docker/distribution/registry/storage/driver/factory"
"github.com/docker/distribution/version"
)
const driverName = "s3aws"
// minChunkSize defines the minimum multipart upload chunk size
// S3 API requires multipart upload chunks to be at least 5MB
const minChunkSize = 5 << 20
// maxChunkSize defines the maximum multipart upload chunk size allowed by S3.
const maxChunkSize = 5 << 30
const defaultChunkSize = 2 * minChunkSize
const (
// defaultMultipartCopyChunkSize defines the default chunk size for all
// but the last Upload Part - Copy operation of a multipart copy.
// Empirically, 32 MB is optimal.
defaultMultipartCopyChunkSize = 32 << 20
// defaultMultipartCopyMaxConcurrency defines the default maximum number
// of concurrent Upload Part - Copy operations for a multipart copy.
defaultMultipartCopyMaxConcurrency = 100
// defaultMultipartCopyThresholdSize defines the default object size
// above which multipart copy will be used. (PUT Object - Copy is used
// for objects at or below this size.) Empirically, 32 MB is optimal.
defaultMultipartCopyThresholdSize = 32 << 20
)
// listMax is the largest amount of objects you can request from S3 in a list call
const listMax = 1000
// deleteMax is the largest amount of objects you can request to be deleted in S3 using a DeleteObjects call. This is
// currently set to 1000 as per the S3 specification https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
const deleteMax = 1000
// defaultMaxRequestsPerSecond defines the default maximum number of requests
// per second that can be made to the S3 API per driver instance. 350 is 10%
// of the requestsPerSecondUpperLimit based on the figures listed in
// https://docs.aws.amazon.com/AmazonS3/latest/dev/optimizing-performance.html
const defaultMaxRequestsPerSecond = 350
// defaultBurst is how many limiter tokens may be reserved at once. Currently,
// we only reserve one at a time via Limiter.Wait()
const defaultBurst = 1
// noStorageClass defines the value to be used if storage class is not supported by the S3 endpoint
const noStorageClass = "NONE"
// defaults related to exponential backoff
const (
// defaultMaxRetries is how many times the driver will retry failed requests.
defaultMaxRetries = 5
defaultInitialInterval = backoff.DefaultInitialInterval
defaultRandomizationFactor = backoff.DefaultRandomizationFactor
defaultMultiplier = backoff.DefaultMultiplier
defaultMaxInterval = backoff.DefaultMaxInterval
defaultMaxElapsedTime = backoff.DefaultMaxElapsedTime
)
// validRegions maps known s3 region identifiers to region descriptors
var validRegions = map[string]struct{}{}
// validObjectACLs contains known s3 object Acls
var validObjectACLs = map[string]struct{}{}
//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
type DriverParameters struct {
AccessKey string
SecretKey string
Bucket string
Region string
RegionEndpoint string
Encrypt bool
KeyID string
Secure bool
SkipVerify bool
V4Auth bool
ChunkSize int64
MultipartCopyChunkSize int64
MultipartCopyMaxConcurrency int64
MultipartCopyThresholdSize int64
RootDirectory string
StorageClass string
ObjectACL string
SessionToken string
PathStyle bool
MaxRequestsPerSecond int64
MaxRetries int64
ParallelWalk bool
LogLevel aws.LogLevelType
}
func init() {
partitions := endpoints.DefaultPartitions()
for _, p := range partitions {
for region := range p.Regions() {
validRegions[region] = struct{}{}
}
}
for _, objectACL := range []string{
s3.ObjectCannedACLPrivate,
s3.ObjectCannedACLPublicRead,
s3.ObjectCannedACLPublicReadWrite,
s3.ObjectCannedACLAuthenticatedRead,
s3.ObjectCannedACLAwsExecRead,
s3.ObjectCannedACLBucketOwnerRead,
s3.ObjectCannedACLBucketOwnerFullControl,
} {
validObjectACLs[objectACL] = struct{}{}
}
// Register this as the default s3 driver in addition to s3aws
factory.Register("s3", &s3DriverFactory{})
factory.Register(driverName, &s3DriverFactory{})
}
// s3DriverFactory implements the factory.StorageDriverFactory interface
type s3DriverFactory struct{}
func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
return FromParameters(parameters)
}
type driver struct {
S3 *s3wrapper
Bucket string
ChunkSize int64
Encrypt bool
KeyID string
MultipartCopyChunkSize int64
MultipartCopyMaxConcurrency int64
MultipartCopyThresholdSize int64
RootDirectory string
StorageClass string
ObjectACL string
ParallelWalk bool
}
type baseEmbed struct {
base.Base
}
// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3
// Objects are stored at absolute keys in the provided bucket.
type Driver struct {
baseEmbed
}
func parseLogLevelParam(param interface{}) aws.LogLevelType {
logLevel := aws.LogOff
if param != nil {
switch strings.ToLower(param.(string)) {
case "logoff":
log.Info("S3 logging level set to LogOff")
case "logdebug":
log.Info("S3 logging level set to LogDebug")
logLevel = aws.LogDebug
case "logdebugwithsigning":
log.Info("S3 logging level set to LogDebugWithSigning")
logLevel = aws.LogDebugWithSigning
case "logdebugwithhttpbody":
log.Info("S3 logging level set to LogDebugWithHTTPBody")
logLevel = aws.LogDebugWithHTTPBody
case "logdebugwithrequestretries":
log.Info("S3 logging level set to LogDebugWithRequestRetries")
logLevel = aws.LogDebugWithRequestRetries
case "logdebugwithrequesterrors":
log.Info("S3 logging level set to LogDebugWithRequestErrors")
logLevel = aws.LogDebugWithRequestErrors
case "logdebugwitheventstreambody":
log.Info("S3 logging level set to LogDebugWithEventStreamBody")
logLevel = aws.LogDebugWithEventStreamBody
default:
log.Infof("unknown loglevel %q, S3 logging level set to LogOff", param)
}
}
return logLevel
}
// FromParameters constructs a new Driver with a given parameters map
// Required parameters:
// - accesskey
// - secretkey
// - region
// - bucket
// - encrypt
func FromParameters(parameters map[string]interface{}) (*Driver, error) {
// Providing no values for these is valid in case the user is authenticating
// with an IAM on an ec2 instance (in which case the instance credentials will
// be summoned when GetAuth is called)
accessKey := parameters["accesskey"]
if accessKey == nil {
accessKey = ""
}
secretKey := parameters["secretkey"]
if secretKey == nil {
secretKey = ""
}
regionEndpoint := parameters["regionendpoint"]
if regionEndpoint == nil {
regionEndpoint = ""
}
var result *multierror.Error
regionName := parameters["region"]
if regionName == nil || fmt.Sprint(regionName) == "" {
err := errors.New("no region parameter provided")
result = multierror.Append(result, err)
}
region := fmt.Sprint(regionName)
// Don't check the region value if a custom endpoint is provided.
if regionEndpoint == "" {
if _, ok := validRegions[region]; !ok {
err := fmt.Errorf("validating region provided: %v", region)
result = multierror.Append(result, err)
}
}
bucket := parameters["bucket"]
if bucket == nil || fmt.Sprint(bucket) == "" {
err := errors.New("no bucket parameter provided")
result = multierror.Append(result, err)
}
encryptBool := false
encrypt := parameters["encrypt"]
switch encrypt := encrypt.(type) {
case string:
b, err := strconv.ParseBool(encrypt)
if err != nil {
err := errors.New("the encrypt parameter should be a boolean")
result = multierror.Append(result, err)
}
encryptBool = b
case bool:
encryptBool = encrypt
case nil:
// do nothing
default:
err := errors.New("the encrypt parameter should be a boolean")
result = multierror.Append(result, err)
}
secureBool := true
secure := parameters["secure"]
switch secure := secure.(type) {
case string:
b, err := strconv.ParseBool(secure)
if err != nil {
err := errors.New("the secure parameter should be a boolean")
result = multierror.Append(result, err)
}
secureBool = b
case bool:
secureBool = secure
case nil:
// do nothing
default:
err := errors.New("the secure parameter should be a boolean")
result = multierror.Append(result, err)
}
skipVerifyBool := false
skipVerify := parameters["skipverify"]
switch skipVerify := skipVerify.(type) {
case string:
b, err := strconv.ParseBool(skipVerify)
if err != nil {
err := errors.New("the skipVerify parameter should be a boolean")
result = multierror.Append(result, err)
}
skipVerifyBool = b
case bool:
skipVerifyBool = skipVerify
case nil:
// do nothing
default:
err := errors.New("the skipVerify parameter should be a boolean")
result = multierror.Append(result, err)
}
v4Bool := true
v4auth := parameters["v4auth"]
switch v4auth := v4auth.(type) {
case string:
b, err := strconv.ParseBool(v4auth)
if err != nil {
err := errors.New("the v4auth parameter should be a boolean")
result = multierror.Append(result, err)
}
v4Bool = b
case bool:
v4Bool = v4auth
case nil:
// do nothing
default:
err := errors.New("the v4auth parameter should be a boolean")
result = multierror.Append(result, err)
}
keyID := parameters["keyid"]
if keyID == nil {
keyID = ""
}
chunkSize, err := getParameterAsInt64(parameters, "chunksize", defaultChunkSize, minChunkSize, maxChunkSize)
if err != nil {
err := fmt.Errorf("converting chunksize to int64: %w", err)
result = multierror.Append(result, err)
}
multipartCopyChunkSize, err := getParameterAsInt64(parameters, "multipartcopychunksize", defaultMultipartCopyChunkSize, minChunkSize, maxChunkSize)
if err != nil {
err := fmt.Errorf("converting multipartcopychunksize to valid int64: %w", err)
result = multierror.Append(result, err)
}
multipartCopyMaxConcurrency, err := getParameterAsInt64(parameters, "multipartcopymaxconcurrency", defaultMultipartCopyMaxConcurrency, 1, math.MaxInt64)
if err != nil {
err := fmt.Errorf("converting multipartcopymaxconcurrency to valid int64: %w", err)
result = multierror.Append(result, err)
}
multipartCopyThresholdSize, err := getParameterAsInt64(parameters, "multipartcopythresholdsize", defaultMultipartCopyThresholdSize, 0, maxChunkSize)
if err != nil {
err := fmt.Errorf("converting multipartcopythresholdsize to valid int64: %w", err)
result = multierror.Append(result, err)
}
rootDirectory := parameters["rootdirectory"]
if rootDirectory == nil {
rootDirectory = ""
}
storageClass := s3.StorageClassStandard
storageClassParam := parameters["storageclass"]
if storageClassParam != nil {
storageClassString, ok := storageClassParam.(string)
if !ok {
err := fmt.Errorf("the storageclass parameter must be one of %v, %v invalid",
[]string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam)
result = multierror.Append(result, err)
}
// All valid storage class parameters are UPPERCASE, so be a bit more flexible here
storageClassString = strings.ToUpper(storageClassString)
if storageClassString != noStorageClass &&
storageClassString != s3.StorageClassStandard &&
storageClassString != s3.StorageClassReducedRedundancy {
err := fmt.Errorf("the storageclass parameter must be one of %v, %v invalid",
[]string{noStorageClass, s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam)
result = multierror.Append(result, err)
}
storageClass = storageClassString
}
objectACL := s3.ObjectCannedACLPrivate
objectACLParam := parameters["objectacl"]
if objectACLParam != nil {
objectACLString, ok := objectACLParam.(string)
if !ok {
err := fmt.Errorf("object ACL parameter should be a string: %v", objectACLParam)
result = multierror.Append(result, err)
}
if _, ok = validObjectACLs[objectACLString]; !ok {
var objectACLkeys []string
for key := range validObjectACLs {
objectACLkeys = append(objectACLkeys, key)
}
err := fmt.Errorf("object ACL parameter should be one of %v: %v", objectACLkeys, objectACLParam)
result = multierror.Append(result, err)
}
objectACL = objectACLString
}
pathStyleBool := false
// If regionEndpoint is set, default to forcining pathstyle to preserve legacy behavior.
if regionEndpoint != "" {
pathStyleBool = true
}
pathStyle := parameters["pathstyle"]
switch pathStyle := pathStyle.(type) {
case string:
b, err := strconv.ParseBool(pathStyle)
if err != nil {
err := errors.New("the pathstyle parameter should be a boolean")
result = multierror.Append(result, err)
}
pathStyleBool = b
case bool:
pathStyleBool = pathStyle
case nil:
// do nothing
default:
err := errors.New("the pathstyle parameter should be a boolean")
result = multierror.Append(result, err)
}
var parallelWalkBool bool
parallelWalk := parameters["parallelwalk"]
switch parallelWalk := parallelWalk.(type) {
case string:
b, err := strconv.ParseBool(parallelWalk)
if err != nil {
err := errors.New("the parallelwalk parameter should be a boolean")
result = multierror.Append(result, err)
}
parallelWalkBool = b
case bool:
parallelWalkBool = parallelWalk
case nil:
// do nothing
default:
err := errors.New("the parallelwalk parameter should be a boolean")
result = multierror.Append(result, err)
}
maxRequestsPerSecond, err := getParameterAsInt64(parameters, "maxrequestspersecond", defaultMaxRequestsPerSecond, 0, math.MaxInt64)
if err != nil {
err = fmt.Errorf("converting maxrequestspersecond to valid int64: %w", err)
result = multierror.Append(result, err)
}
maxRetries, err := getParameterAsInt64(parameters, "maxretries", defaultMaxRetries, 0, math.MaxInt64)
if err != nil {
err := fmt.Errorf("converting maxrequestspersecond to valid int64: %w", err)
result = multierror.Append(result, err)
}
// multierror return
if err := result.ErrorOrNil(); err != nil {
return nil, err
}
sessionToken := ""
logLevel := parseLogLevelParam(parameters["loglevel"])
params := DriverParameters{
fmt.Sprint(accessKey),
fmt.Sprint(secretKey),
fmt.Sprint(bucket),
region,
fmt.Sprint(regionEndpoint),
encryptBool,
fmt.Sprint(keyID),
secureBool,
skipVerifyBool,
v4Bool,
chunkSize,
multipartCopyChunkSize,
multipartCopyMaxConcurrency,
multipartCopyThresholdSize,
fmt.Sprint(rootDirectory),
storageClass,
objectACL,
fmt.Sprint(sessionToken),
pathStyleBool,
maxRequestsPerSecond,
maxRetries,
parallelWalkBool,
logLevel,
}
return New(params)
}
// getParameterAsInt64 converts parameters[name] to an int64 value (using
// default if nil), verifies it is no smaller than min, and returns it.
func getParameterAsInt64(parameters map[string]interface{}, name string, defaultt int64, min int64, max int64) (int64, error) {
rv := defaultt
param := parameters[name]
switch v := param.(type) {
case string:
vv, err := strconv.ParseInt(v, 0, 64)
if err != nil {
return 0, fmt.Errorf("%s parameter must be an integer, %v invalid", name, param)
}
rv = vv
case int64:
rv = v
case int, uint, int32, uint32, uint64:
rv = reflect.ValueOf(v).Convert(reflect.TypeOf(rv)).Int()
case nil:
// do nothing
default:
return 0, fmt.Errorf("converting value for %s: %#v", name, param)
}
if rv < min || rv > max {
return 0, fmt.Errorf("the %s %#v parameter should be a number between %d and %d (inclusive)", name, rv, min, max)
}
return rv, nil
}
// New constructs a new Driver with the given AWS credentials, region, encryption flag, and
// bucketName
func New(params DriverParameters) (*Driver, error) {
if !params.V4Auth &&
(params.RegionEndpoint == "" ||
strings.Contains(params.RegionEndpoint, "s3.amazonaws.com")) {
return nil, fmt.Errorf("on Amazon S3 this storage driver can only be used with v4 authentication")
}
awsConfig := aws.NewConfig().WithLogLevel(params.LogLevel)
sess, err := session.NewSession()
if err != nil {
return nil, fmt.Errorf("creating a new session: %w", err)
}
creds := credentials.NewChainCredentials([]credentials.Provider{
&credentials.StaticProvider{
Value: credentials.Value{
AccessKeyID: params.AccessKey,
SecretAccessKey: params.SecretKey,
SessionToken: params.SessionToken,
},
},
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{},
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(sess)},
webIdentityProvider(sess),
})
if params.RegionEndpoint != "" {
awsConfig.WithEndpoint(params.RegionEndpoint)
}
awsConfig.WithS3ForcePathStyle(params.PathStyle)
awsConfig.WithCredentials(creds)
awsConfig.WithRegion(params.Region)
awsConfig.WithDisableSSL(!params.Secure)
if params.SkipVerify {
awsConfig.WithHTTPClient(&http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
})
}
sess, err = session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("creating a new session with aws config: %w", err)
}
userAgentHandler := request.NamedHandler{
Name: "user-agent",
Fn: request.MakeAddToUserAgentHandler("docker-distribution", version.Version, runtime.Version()),
}
sess.Handlers.Build.PushFrontNamed(userAgentHandler)
s3obj := s3.New(sess)
// enable S3 compatible signature v2 signing instead
if !params.V4Auth {
setv2Handlers(s3obj)
}
// TODO Currently multipart uploads have no timestamps, so this would be unwise
// if you initiated a new s3driver while another one is running on the same bucket.
// multis, _, err := bucket.ListMulti("", "")
// if err != nil {
// return nil, err
// }
// for _, multi := range multis {
// err := multi.Abort()
// //TODO appropriate to do this error checking?
// if err != nil {
// return nil, err
// }
// }
w := newS3Wrapper(
s3obj,
withRateLimit(params.MaxRequestsPerSecond, defaultBurst),
withExponentialBackoff(params.MaxRetries),
withBackoffNotify(func(err error, t time.Duration) {
log.WithFields(log.Fields{"error": err, "delay_s": t.Seconds()}).Info("S3: retrying after error")
}),
)
d := &driver{
S3: w,
Bucket: params.Bucket,
ChunkSize: params.ChunkSize,
Encrypt: params.Encrypt,
KeyID: params.KeyID,
MultipartCopyChunkSize: params.MultipartCopyChunkSize,
MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency,
MultipartCopyThresholdSize: params.MultipartCopyThresholdSize,
RootDirectory: params.RootDirectory,
StorageClass: params.StorageClass,
ObjectACL: params.ObjectACL,
ParallelWalk: params.ParallelWalk,
}
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
StorageDriver: d,
},
},
}, nil
}
func webIdentityProvider(sess client.ConfigProvider) credentials.Provider {
svc := sts.New(sess)
roleARN := os.Getenv("AWS_ROLE_ARN")
tokenFilepath := os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")
roleSessionName := os.Getenv("AWS_ROLE_SESSION_NAME")
return stscreds.NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, tokenFilepath)
}
// Implement the storagedriver.StorageDriver interface
func (d *driver) Name() string {
return driverName
}
// GetContent retrieves the content stored at "path" as a []byte.
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
reader, err := d.Reader(ctx, path, 0)
if err != nil {
return nil, err
}
return ioutil.ReadAll(reader)
}
// PutContent stores the []byte content at a location designated by "path".
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
_, err := d.S3.PutObjectWithContext(
ctx,
&s3.PutObjectInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(path)),
ContentType: d.getContentType(),
ACL: d.getACL(),
ServerSideEncryption: d.getEncryptionMode(),
SSEKMSKeyId: d.getSSEKMSKeyID(),
StorageClass: d.getStorageClass(),
Body: bytes.NewReader(contents),
})
return parseError(path, err)
}
// Reader retrieves an io.ReadCloser for the content stored at "path" with a
// given byte offset.
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
resp, err := d.S3.GetObjectWithContext(
ctx,
&s3.GetObjectInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(path)),
Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"),
})
if err != nil {
if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "InvalidRange" {
return ioutil.NopCloser(bytes.NewReader(nil)), nil
}
return nil, parseError(path, err)
}
return resp.Body, nil
}
// Writer returns a FileWriter which will store the content written to it
// at the location designated by "path" after the call to Commit.
func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
key := d.s3Path(path)
if !append {
// TODO (brianbland): cancel other uploads at this path
resp, err := d.S3.CreateMultipartUploadWithContext(
ctx,
&s3.CreateMultipartUploadInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(key),
ContentType: d.getContentType(),
ACL: d.getACL(),
ServerSideEncryption: d.getEncryptionMode(),
SSEKMSKeyId: d.getSSEKMSKeyID(),
StorageClass: d.getStorageClass(),
})
if err != nil {
return nil, err
}
return d.newWriter(key, *resp.UploadId, nil), nil
}
resp, err := d.S3.ListMultipartUploadsWithContext(
ctx,
&s3.ListMultipartUploadsInput{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(key),
})
if err != nil {
return nil, parseError(path, err)
}
for _, multi := range resp.Uploads {
if key != *multi.Key {
continue
}
resp, err := d.S3.ListPartsWithContext(
ctx,
&s3.ListPartsInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(key),
UploadId: multi.UploadId,
})
if err != nil {
return nil, parseError(path, err)
}
var multiSize int64
for _, part := range resp.Parts {
multiSize += *part.Size
}
return d.newWriter(key, *multi.UploadId, resp.Parts), nil
}
return nil, storagedriver.PathNotFoundError{Path: path}
}
// Stat retrieves the FileInfo for the given path, including the current size
// in bytes and the creation time.
func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
resp, err := d.S3.ListObjectsV2WithContext(
ctx,
&s3.ListObjectsV2Input{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(d.s3Path(path)),
MaxKeys: aws.Int64(1),
})
if err != nil {
return nil, err
}
fi := storagedriver.FileInfoFields{
Path: path,
}
if len(resp.Contents) == 1 {
if *resp.Contents[0].Key != d.s3Path(path) {
fi.IsDir = true
} else {
fi.IsDir = false
fi.Size = *resp.Contents[0].Size
fi.ModTime = *resp.Contents[0].LastModified
}
} else if len(resp.CommonPrefixes) == 1 {
fi.IsDir = true
} else {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
}
// List returns a list of the objects that are direct descendants of the given path.
func (d *driver) List(ctx context.Context, opath string) ([]string, error) {
path := opath
if path != "/" && path[len(path)-1] != '/' {
path = path + "/"
}
// This is to cover for the cases when the rootDirectory of the driver is either "" or "/".
// In those cases, there is no root prefix to replace and we must actually add a "/" to all
// results in order to keep them as valid paths as recognized by storagedriver.PathRegexp
prefix := ""
if d.s3Path("") == "" {
prefix = "/"
}
resp, err := d.S3.ListObjectsV2WithContext(
ctx,
&s3.ListObjectsV2Input{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(d.s3Path(path)),
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(listMax),
})
if err != nil {
return nil, parseError(opath, err)
}
files := []string{}
directories := []string{}
for {
for _, key := range resp.Contents {
files = append(files, strings.Replace(*key.Key, d.s3Path(""), prefix, 1))
}
for _, commonPrefix := range resp.CommonPrefixes {
commonPrefix := *commonPrefix.Prefix
directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1))
}
if *resp.IsTruncated {
resp, err = d.S3.ListObjectsV2WithContext(
ctx,
&s3.ListObjectsV2Input{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(d.s3Path(path)),
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(listMax),
ContinuationToken: resp.NextContinuationToken,
})
if err != nil {
return nil, err
}
} else {
break
}
}
if opath != "/" {
if len(files) == 0 && len(directories) == 0 {
// Treat empty response as missing directory, since we don't actually
// have directories in s3.
return nil, storagedriver.PathNotFoundError{Path: opath}
}
}
return append(files, directories...), nil
}
// Move moves an object stored at sourcePath to destPath, removing the original
// object.
func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
/* This is terrible, but aws doesn't have an actual move. */
if err := d.copy(ctx, sourcePath, destPath); err != nil {
return err
}
return d.Delete(ctx, sourcePath)
}
// copy copies an object stored at sourcePath to destPath.
func (d *driver) copy(ctx context.Context, sourcePath string, destPath string) error {
// S3 can copy objects up to 5 GB in size with a single PUT Object - Copy
// operation. For larger objects, the multipart upload API must be used.
//
// Empirically, multipart copy is fastest with 32 MB parts and is faster
// than PUT Object - Copy for objects larger than 32 MB.
fileInfo, err := d.Stat(ctx, sourcePath)
if err != nil {
return parseError(sourcePath, err)
}
if fileInfo.Size() <= d.MultipartCopyThresholdSize {
_, err = d.S3.CopyObjectWithContext(
ctx,
&s3.CopyObjectInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(destPath)),
ContentType: d.getContentType(),
ACL: d.getACL(),
ServerSideEncryption: d.getEncryptionMode(),
SSEKMSKeyId: d.getSSEKMSKeyID(),
StorageClass: d.getStorageClass(),
CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)),
})
if err != nil {
return parseError(sourcePath, err)
}
return nil
}
createResp, err := d.S3.CreateMultipartUploadWithContext(
ctx,
&s3.CreateMultipartUploadInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(destPath)),
ContentType: d.getContentType(),
ACL: d.getACL(),
SSEKMSKeyId: d.getSSEKMSKeyID(),
ServerSideEncryption: d.getEncryptionMode(),
StorageClass: d.getStorageClass(),
})
if err != nil {
return err
}
numParts := (fileInfo.Size() + d.MultipartCopyChunkSize - 1) / d.MultipartCopyChunkSize
completedParts := make([]*s3.CompletedPart, numParts)
errChan := make(chan error, numParts)
// Reduce the client/server exposure to long lived connections regardless of
// how many requests per second are allowed.
limiter := make(chan struct{}, d.MultipartCopyMaxConcurrency)
for i := range completedParts {
i := int64(i)
go func() {
limiter <- struct{}{}
firstByte := i * d.MultipartCopyChunkSize
lastByte := firstByte + d.MultipartCopyChunkSize - 1
if lastByte >= fileInfo.Size() {
lastByte = fileInfo.Size() - 1
}
uploadResp, err := d.S3.UploadPartCopyWithContext(
ctx,
&s3.UploadPartCopyInput{
Bucket: aws.String(d.Bucket),
CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)),
Key: aws.String(d.s3Path(destPath)),
PartNumber: aws.Int64(i + 1),
UploadId: createResp.UploadId,
CopySourceRange: aws.String(fmt.Sprintf("bytes=%d-%d", firstByte, lastByte)),
})
if err == nil {
completedParts[i] = &s3.CompletedPart{
ETag: uploadResp.CopyPartResult.ETag,
PartNumber: aws.Int64(i + 1),
}
}
errChan <- err
<-limiter
}()
}
for range completedParts {
err := <-errChan
if err != nil {
return err
}
}
_, err = d.S3.CompleteMultipartUploadWithContext(
ctx,
&s3.CompleteMultipartUploadInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(destPath)),
UploadId: createResp.UploadId,
MultipartUpload: &s3.CompletedMultipartUpload{Parts: completedParts},
})
return err
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
// We must be careful since S3 does not guarantee read after delete consistency
func (d *driver) Delete(ctx context.Context, path string) error {
s3Objects := make([]*s3.ObjectIdentifier, 0, listMax)
s3Path := d.s3Path(path)
listObjectsV2Input := &s3.ListObjectsV2Input{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(s3Path),
}
ListLoop:
for {
// list all the objects
resp, err := d.S3.ListObjectsV2WithContext(ctx, listObjectsV2Input)
// resp.Contents can only be empty on the first call
// if there were no more results to return after the first call, resp.IsTruncated would have been false
// and the loop would be exited without recalling ListObjects
if err != nil || len(resp.Contents) == 0 {
return storagedriver.PathNotFoundError{Path: path}
}
for _, key := range resp.Contents {
// Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab").
if len(*key.Key) > len(s3Path) && (*key.Key)[len(s3Path)] != '/' {
break ListLoop
}
s3Objects = append(s3Objects, &s3.ObjectIdentifier{
Key: key.Key,
})
}
// resp.Contents must have at least one element or we would have returned not found
listObjectsV2Input.StartAfter = resp.Contents[len(resp.Contents)-1].Key
// from the s3 api docs, IsTruncated "specifies whether (true) or not (false) all of the results were returned"
// if everything has been returned, break
if resp.IsTruncated == nil || !*resp.IsTruncated {
break
}
}
// need to chunk objects into groups of deleteMax per s3 restrictions
total := len(s3Objects)
for i := 0; i < total; i += deleteMax {
_, err := d.S3.DeleteObjectsWithContext(
ctx,
&s3.DeleteObjectsInput{
Bucket: aws.String(d.Bucket),
Delete: &s3.Delete{
Objects: s3Objects[i:min(i+deleteMax, total)],
Quiet: aws.Bool(false),
},
})
if err != nil {
return err
}
}
return nil
}
// DeleteFiles deletes a set of files using the S3 bulk delete feature, with up to deleteMax files per request. If
// deleting more than deleteMax files, DeleteFiles will split files in deleteMax requests automatically. A separate
// goroutine is created for each request. Contrary to Delete, which is a generic method to delete any kind of object,
// DeleteFiles does not send a ListObjects request before DeleteObjects. Returns the number of successfully deleted
// files and any errors. This method is idempotent, no error is returned if a file does not exist.
func (d *driver) DeleteFiles(ctx context.Context, paths []string) (int, error) {
s3Objects := make([]*s3.ObjectIdentifier, 0, len(paths))
for i := range paths {
p := d.s3Path(paths[i])
s3Objects = append(s3Objects, &s3.ObjectIdentifier{Key: &p})
}
// collect errors from concurrent DeleteObjects requests
var errs error
errCh := make(chan error)
errDone := make(chan struct{})
go func() {
for err := range errCh {
errs = multierror.Append(errs, err)
}
errDone <- struct{}{}
}()
// count the number of successfully deleted files across concurrent DeleteObjects requests
count := 0
countCh := make(chan int)
countDone := make(chan struct{})
go func() {
for n := range countCh {
count += n
}
countDone <- struct{}{}
}()
// chunk files into batches of deleteMax (as per S3 restrictions), creating a goroutine per batch
var wg sync.WaitGroup
total := len(s3Objects)
for i := 0; i < total; i += deleteMax {
wg.Add(1)
go func(i int) {
defer wg.Done()
resp, err := d.S3.DeleteObjectsWithContext(
ctx,
&s3.DeleteObjectsInput{
Bucket: aws.String(d.Bucket),
Delete: &s3.Delete{
Objects: s3Objects[i:min(i+deleteMax, total)],
Quiet: aws.Bool(false),
},
})
if err != nil {
errCh <- err
return
}
// count successfully deleted files
countCh <- len(resp.Deleted)
// even if err is nil (200 OK response) it's not guaranteed that all files have been successfully deleted,
// we need to check the []*s3.Error slice within the S3 response and make sure it's empty
if len(resp.Errors) > 0 {
// parse s3.Error errors and return a single storagedriver.MultiError
var errs error
for _, s3e := range resp.Errors {
err := fmt.Errorf("deleting file '%s': '%s'", *s3e.Key, *s3e.Message)
errs = multierror.Append(errs, err)
}
errCh <- errs
}
}(i)
}
wg.Wait()
close(errCh)
<-errDone
close(countCh)
<-countDone
return count, errs
}
// URLFor returns a URL which may be used to retrieve the content stored at the given path.
// May return an UnsupportedMethodErr in certain StorageDriver implementations.
func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
methodString := "GET"
method, ok := options["method"]
if ok {
methodString, ok = method.(string)
if !ok || (methodString != "GET" && methodString != "HEAD") {
return "", storagedriver.ErrUnsupportedMethod{}
}
}
expiresIn := 20 * time.Minute
expires, ok := options["expiry"]
if ok {
et, ok := expires.(time.Time)
if ok {
expiresIn = et.Sub(time.Now())
}
}
var req *request.Request
switch methodString {
case "GET":
req, _ = d.S3.GetObjectRequest(&s3.GetObjectInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(path)),
})
case "HEAD":
req, _ = d.S3.HeadObjectRequest(&s3.HeadObjectInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(path)),
})
default:
panic("unreachable")
}
return req.Presign(expiresIn)
}
// Walk traverses a filesystem defined within driver, starting
// from the given path, calling f on each file
func (d *driver) Walk(ctx context.Context, from string, f storagedriver.WalkFn) error {
path := from
if !strings.HasSuffix(path, "/") {
path = path + "/"
}
prefix := ""
if d.s3Path("") == "" {
prefix = "/"
}
var objectCount int64
if err := d.doWalk(ctx, &objectCount, d.s3Path(path), prefix, f); err != nil {
return err
}
// S3 doesn't have the concept of empty directories, so it'll return path not found if there are no objects
if objectCount == 0 {
return storagedriver.PathNotFoundError{Path: from}
}
return nil
}
// WalkParallel traverses a filesystem defined within driver, starting
// from the given path, calling f on each file.
func (d *driver) WalkParallel(ctx context.Context, from string, f storagedriver.WalkFn) error {
// If the ParallelWalk feature flag is not set, fall back to standard sequential walk.
if !d.ParallelWalk {
return d.Walk(ctx, from, f)
}
path := from
if !strings.HasSuffix(path, "/") {
path = path + "/"
}
prefix := ""
if d.s3Path("") == "" {
prefix = "/"
}
var objectCount int64
var retError error
countChan := make(chan int64)
countDone := make(chan struct{})
errors := make(chan error)
errDone := make(chan struct{})
quit := make(chan struct{})
// Consume object counts from each doWalkParallel call asynchronusly to avoid blocking.
go func() {
for i := range countChan {
objectCount += i
}
countDone <- struct{}{}
}()
// If we encounter an error from any goroutine called from within doWalkParallel,
// return early from any new goroutines and return that error.
go func() {
var closed bool
// Consume all errors to prevent goroutines from blocking and to
// report errors from goroutines that were already in progress.
for err := range errors {
// Signal goroutines to quit only once on the first error.
if !closed {
close(quit)
closed = true
}
if err != nil {
retError = multierror.Append(retError, err)
}
}
errDone <- struct{}{}
}()
// doWalkParallel spawns and manages it's own goroutines, but it also calls
// itself recursively. Passing in a WaitGroup allows us to wait for the
// entire walk to complete without blocking on each doWalkParallel call.
var wg sync.WaitGroup
d.doWalkParallel(ctx, &wg, countChan, quit, errors, d.s3Path(path), prefix, f)
wg.Wait()
// Ensure that all object counts have been totaled before continuing.
close(countChan)
close(errors)
<-countDone
<-errDone
// S3 doesn't have the concept of empty directories, so it'll return path not found if there are no objects
if objectCount == 0 {
return storagedriver.PathNotFoundError{Path: from}
}
return retError
}
func (d *driver) TransferTo(ctx context.Context, destDriver storagedriver.StorageDriver, src, dest string) error {
return storagedriver.ErrUnsupportedMethod{}
}
type walkInfoContainer struct {
storagedriver.FileInfoFields
prefix *string
}
// Path provides the full path of the target of this file info.
func (wi walkInfoContainer) Path() string {
return wi.FileInfoFields.Path
}
// Size returns current length in bytes of the file. The return value can
// be used to write to the end of the file at path. The value is
// meaningless if IsDir returns true.
func (wi walkInfoContainer) Size() int64 {
return wi.FileInfoFields.Size
}
// ModTime returns the modification time for the file. For backends that
// don't have a modification time, the creation time should be returned.
func (wi walkInfoContainer) ModTime() time.Time {
return wi.FileInfoFields.ModTime
}
// IsDir returns true if the path is a directory.
func (wi walkInfoContainer) IsDir() bool {
return wi.FileInfoFields.IsDir
}
func (d *driver) doWalk(parentCtx context.Context, objectCount *int64, path, prefix string, f storagedriver.WalkFn) error {
var retError error
listObjectsInput := &s3.ListObjectsV2Input{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(path),
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(listMax),
}
ctx, done := dcontext.WithTrace(parentCtx)
defer done("s3aws.ListObjectsV2Pages(%s)", path)
listObjectErr := d.S3.ListObjectsV2PagesWithContext(ctx, listObjectsInput, func(objects *s3.ListObjectsV2Output, lastPage bool) bool {
var count int64
// KeyCount was introduced with version 2 of the GET Bucket operation in S3.
// Some S3 implementations don't support V2 now, so we fall back to manual
// calculation of the key count if required
if objects.KeyCount != nil {
count = *objects.KeyCount
} else {
count = int64(len(objects.Contents) + len(objects.CommonPrefixes))
}
*objectCount += count
walkInfos := make([]walkInfoContainer, 0, count)
for _, dir := range objects.CommonPrefixes {
commonPrefix := *dir.Prefix
walkInfos = append(walkInfos, walkInfoContainer{
prefix: dir.Prefix,
FileInfoFields: storagedriver.FileInfoFields{
IsDir: true,
Path: strings.Replace(commonPrefix[:len(commonPrefix)-1], d.s3Path(""), prefix, 1),
},
})
}
for _, file := range objects.Contents {
walkInfos = append(walkInfos, walkInfoContainer{
FileInfoFields: storagedriver.FileInfoFields{
IsDir: false,
Size: *file.Size,
ModTime: *file.LastModified,
Path: strings.Replace(*file.Key, d.s3Path(""), prefix, 1),
},
})
}
sort.SliceStable(walkInfos, func(i, j int) bool { return walkInfos[i].FileInfoFields.Path < walkInfos[j].FileInfoFields.Path })
for _, walkInfo := range walkInfos {
err := f(walkInfo)
if err == storagedriver.ErrSkipDir {
if walkInfo.IsDir() {
continue
} else {
break
}
} else if err != nil {
retError = err
return false
}
if walkInfo.IsDir() {
if err := d.doWalk(ctx, objectCount, *walkInfo.prefix, prefix, f); err != nil {
retError = err
return false
}
}
}
return true
})
if retError != nil {
return retError
}
if listObjectErr != nil {
return listObjectErr
}
return nil
}
func (d *driver) doWalkParallel(parentCtx context.Context, wg *sync.WaitGroup, countChan chan<- int64, quit <-chan struct{}, errors chan<- error, path, prefix string, f storagedriver.WalkFn) {
listObjectsInput := &s3.ListObjectsV2Input{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(path),
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(listMax),
}
ctx, done := dcontext.WithTrace(parentCtx)
defer done("s3aws.ListObjectsV2Pages(%s)", path)
listObjectErr := d.S3.ListObjectsV2PagesWithContext(ctx, listObjectsInput, func(objects *s3.ListObjectsV2Output, lastPage bool) bool {
select {
// The walk was canceled, return to stop requests for pages and prevent gorountines from leaking.
case <-quit:
return false
default:
var count int64
// KeyCount was introduced with version 2 of the GET Bucket operation in S3.
// Some S3 implementations don't support V2 now, so we fall back to manual
// calculation of the key count if required
if objects.KeyCount != nil {
count = *objects.KeyCount
} else {
count = int64(len(objects.Contents) + len(objects.CommonPrefixes))
}
countChan <- count
walkInfos := make([]walkInfoContainer, 0, count)
for _, dir := range objects.CommonPrefixes {
commonPrefix := *dir.Prefix
walkInfos = append(walkInfos, walkInfoContainer{
prefix: dir.Prefix,
FileInfoFields: storagedriver.FileInfoFields{
IsDir: true,
Path: strings.Replace(commonPrefix[:len(commonPrefix)-1], d.s3Path(""), prefix, 1),
},
})
}
for _, file := range objects.Contents {
walkInfos = append(walkInfos, walkInfoContainer{
FileInfoFields: storagedriver.FileInfoFields{
IsDir: false,
Size: *file.Size,
ModTime: *file.LastModified,
Path: strings.Replace(*file.Key, d.s3Path(""), prefix, 1),
},
})
}
for _, walkInfo := range walkInfos {
wg.Add(1)
wInfo := walkInfo
go func() {
defer wg.Done()
err := f(wInfo)
if err == storagedriver.ErrSkipDir && wInfo.IsDir() {
return
}
if err != nil {
errors <- err
}
if wInfo.IsDir() {
d.doWalkParallel(ctx, wg, countChan, quit, errors, *wInfo.prefix, prefix, f)
}
}()
}
}
return true
})
if listObjectErr != nil {
errors <- listObjectErr
}
}
func (d *driver) s3Path(path string) string {
return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/")
}
// S3BucketKey returns the s3 bucket key for the given storage driver path.
func (d *Driver) S3BucketKey(path string) string {
return d.StorageDriver.(*driver).s3Path(path)
}
func parseError(path string, err error) error {
if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "NoSuchKey" {
return storagedriver.PathNotFoundError{Path: path}
}
return err
}
func (d *driver) getEncryptionMode() *string {
if !d.Encrypt {
return nil
}
if d.KeyID == "" {
return aws.String("AES256")
}
return aws.String("aws:kms")
}
func (d *driver) getSSEKMSKeyID() *string {
if d.KeyID != "" {
return aws.String(d.KeyID)
}
return nil
}
func (d *driver) getContentType() *string {
return aws.String("application/octet-stream")
}
func (d *driver) getACL() *string {
return aws.String(d.ObjectACL)
}
func (d *driver) getStorageClass() *string {
if d.StorageClass == noStorageClass {
return nil
}
return aws.String(d.StorageClass)
}
// writer attempts to upload parts to S3 in a buffered fashion where the last
// part is at least as large as the chunksize, so the multipart upload could be
// cleanly resumed in the future. This is violated if Close is called after less
// than a full chunk is written.
type writer struct {
driver *driver
key string
uploadID string
parts []*s3.Part
size int64
readyPart []byte
pendingPart []byte
closed bool
committed bool
canceled bool
}
func (d *driver) newWriter(key, uploadID string, parts []*s3.Part) storagedriver.FileWriter {
var size int64
for _, part := range parts {
size += *part.Size
}
return &writer{
driver: d,
key: key,
uploadID: uploadID,
parts: parts,
size: size,
}
}
type completedParts []*s3.CompletedPart
func (a completedParts) Len() int { return len(a) }
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber }
func (w *writer) Write(p []byte) (int, error) {
ctx := context.Background()
if w.closed {
return 0, fmt.Errorf("already closed")
} else if w.committed {
return 0, fmt.Errorf("already committed")
} else if w.canceled {
return 0, fmt.Errorf("already canceled")
}
// If the last written part is smaller than minChunkSize, we need to make a
// new multipart upload :sadface:
if len(w.parts) > 0 && int(*w.parts[len(w.parts)-1].Size) < minChunkSize {
var completedUploadedParts completedParts
for _, part := range w.parts {
completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
sort.Sort(completedUploadedParts)
_, err := w.driver.S3.CompleteMultipartUploadWithContext(
ctx,
&s3.CompleteMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
UploadId: aws.String(w.uploadID),
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: completedUploadedParts,
},
})
if err != nil {
w.driver.S3.AbortMultipartUploadWithContext(
ctx,
&s3.AbortMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
UploadId: aws.String(w.uploadID),
})
return 0, err
}
resp, err := w.driver.S3.CreateMultipartUploadWithContext(
ctx,
&s3.CreateMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
ContentType: w.driver.getContentType(),
ACL: w.driver.getACL(),
ServerSideEncryption: w.driver.getEncryptionMode(),
StorageClass: w.driver.getStorageClass(),
})
if err != nil {
return 0, err
}
w.uploadID = *resp.UploadId
// If the entire written file is smaller than minChunkSize, we need to make
// a new part from scratch :double sad face:
if w.size < minChunkSize {
resp, err := w.driver.S3.GetObjectWithContext(
ctx,
&s3.GetObjectInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
})
if err != nil {
return 0, err
}
defer resp.Body.Close()
w.parts = nil
w.readyPart, err = ioutil.ReadAll(resp.Body)
if err != nil {
return 0, err
}
} else {
// Otherwise we can use the old file as the new first part
copyPartResp, err := w.driver.S3.UploadPartCopyWithContext(
ctx,
&s3.UploadPartCopyInput{
Bucket: aws.String(w.driver.Bucket),
CopySource: aws.String(w.driver.Bucket + "/" + w.key),
Key: aws.String(w.key),
PartNumber: aws.Int64(1),
UploadId: resp.UploadId,
})
if err != nil {
return 0, err
}
w.parts = []*s3.Part{
{
ETag: copyPartResp.CopyPartResult.ETag,
PartNumber: aws.Int64(1),
Size: aws.Int64(w.size),
},
}
}
}
var n int
for len(p) > 0 {
// If no parts are ready to write, fill up the first part
if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 {
if len(p) >= neededBytes {
w.readyPart = append(w.readyPart, p[:neededBytes]...)
n += neededBytes
p = p[neededBytes:]
} else {
w.readyPart = append(w.readyPart, p...)
n += len(p)
p = nil
}
}
if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 {
if len(p) >= neededBytes {
w.pendingPart = append(w.pendingPart, p[:neededBytes]...)
n += neededBytes
p = p[neededBytes:]
err := w.flushPart()
if err != nil {
w.size += int64(n)
return n, err
}
} else {
w.pendingPart = append(w.pendingPart, p...)
n += len(p)
p = nil
}
}
}
w.size += int64(n)
return n, nil
}
func (w *writer) Size() int64 {
return w.size
}
func (w *writer) Close() error {
if w.closed {
return fmt.Errorf("already closed")
}
w.closed = true
return w.flushPart()
}
func (w *writer) Cancel() error {
if w.closed {
return fmt.Errorf("already closed")
} else if w.committed {
return fmt.Errorf("already committed")
}
w.canceled = true
_, err := w.driver.S3.AbortMultipartUploadWithContext(
context.Background(),
&s3.AbortMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
UploadId: aws.String(w.uploadID),
})
return err
}
func (w *writer) Commit() error {
ctx := context.Background()
if w.closed {
return fmt.Errorf("already closed")
} else if w.committed {
return fmt.Errorf("already committed")
} else if w.canceled {
return fmt.Errorf("already canceled")
}
err := w.flushPart()
if err != nil {
return err
}
w.committed = true
var completedUploadedParts completedParts
for _, part := range w.parts {
completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
sort.Sort(completedUploadedParts)
_, err = w.driver.S3.CompleteMultipartUploadWithContext(
ctx,
&s3.CompleteMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
UploadId: aws.String(w.uploadID),
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: completedUploadedParts,
},
})
if err != nil {
w.driver.S3.AbortMultipartUploadWithContext(
ctx,
&s3.AbortMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
UploadId: aws.String(w.uploadID),
})
return err
}
return nil
}
// flushPart flushes buffers to write a part to S3.
// Only called by Write (with both buffers full) and Close/Commit (always)
func (w *writer) flushPart() error {
if len(w.readyPart) == 0 && len(w.pendingPart) == 0 {
// nothing to write
return nil
}
if len(w.pendingPart) < int(w.driver.ChunkSize) {
// closing with a small pending part
// combine ready and pending to avoid writing a small part
w.readyPart = append(w.readyPart, w.pendingPart...)
w.pendingPart = nil
}
partNumber := aws.Int64(int64(len(w.parts) + 1))
resp, err := w.driver.S3.UploadPartWithContext(
context.Background(),
&s3.UploadPartInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
PartNumber: partNumber,
UploadId: aws.String(w.uploadID),
Body: bytes.NewReader(w.readyPart),
})
if err != nil {
return err
}
w.parts = append(w.parts, &s3.Part{
ETag: resp.ETag,
PartNumber: partNumber,
Size: aws.Int64(int64(len(w.readyPart))),
})
w.readyPart = w.pendingPart
w.pendingPart = nil
return nil
}
| [
"\"AWS_ROLE_ARN\"",
"\"AWS_WEB_IDENTITY_TOKEN_FILE\"",
"\"AWS_ROLE_SESSION_NAME\""
] | [] | [
"AWS_ROLE_ARN",
"AWS_ROLE_SESSION_NAME",
"AWS_WEB_IDENTITY_TOKEN_FILE"
] | [] | ["AWS_ROLE_ARN", "AWS_ROLE_SESSION_NAME", "AWS_WEB_IDENTITY_TOKEN_FILE"] | go | 3 | 0 | |
generate_projects_and_tasks.py | from lib.jobcan import JobcanInput
import pandas as pd
from argparse import ArgumentParser
import os
parser = ArgumentParser()
parser.add_argument('--chrome_driver_path', help='chrome driver path')
parser.add_argument('--client_id', help='client_id')
parser.add_argument('--email', help='email')
parser.add_argument('--password', help='password')
args = parser.parse_args()
# Get credentials
CHROMEDRIVER_PATH = args.chrome_driver_path if args.chrome_driver_path else os.environ.get('JOBCAN_CHROMEDRIVER_PATH')
CLIENT_ID = args.client_id if args.client_id else os.environ.get('JOBCAN_CLIENT_ID')
EMAIL = args.email if args.email else os.environ.get('JOBCAN_EMAIL')
PASSWORD = args.password if args.password else os.environ.get('JOBCAN_PASSWORD')
# Retrieve projects and tasks list
jobcan_cli = JobcanInput(CHROMEDRIVER_PATH, client_id=CLIENT_ID, email=EMAIL, password=PASSWORD)
jobcan_cli.login()
jobcan_cli.open_man_hour_manage()
jobcan_cli.select_date(open=True)
jobcan_cli.add_blank_record()
projects_and_tasks = jobcan_cli.get_projects_and_tasks()
jobcan_cli.quit()
print(projects_and_tasks)
# create csv file
df = pd.DataFrame(columns=["project", "task"])
for project, tasks in projects_and_tasks.items():
for task in tasks:
df = df.append(pd.Series([project, task], index=df.columns), ignore_index=True)
df.to_csv("project_task.csv", index=False)
| [] | [] | [
"JOBCAN_CLIENT_ID",
"JOBCAN_EMAIL",
"JOBCAN_PASSWORD",
"JOBCAN_CHROMEDRIVER_PATH"
] | [] | ["JOBCAN_CLIENT_ID", "JOBCAN_EMAIL", "JOBCAN_PASSWORD", "JOBCAN_CHROMEDRIVER_PATH"] | python | 4 | 0 | |
algutil/pipette_server/pipetteClient.py | import string
import random
import datetime
import time
import os
import sys
import platform
import getpass
class PipetteClient:
def __init__(self, pipelineOutDir,
pipelineName='NamelessPipeline',
defaultCaching='False',
defaultCleanUpJobFilesOnFail='False',
defaultCleanUpPipelineJobsOnFail='True',
defaultExecutionEngine='mockPrint',
pipelinePriority=50,
injectionMap={},
communicationDirBase=None
):
# Set the communication dir in a platform specific manner.
# Must be modified in a new environment.
if communicationDirBase == None:
try:
username = getpass.getuser()
except KeyError:
username = 'unknown'
platform_name = platform.system()
if platform_name == 'Windows':
communicationDirBase = os.path.join('u:\\','pipette',username)
elif platform_name == 'Linux':
home = os.environ['HOME']
communicationDirBase = os.path.join(home,'pipette_queue')
else:
raise Exception('unknown platform type')
self._communication_dir_base = communicationDirBase
self._launch_subdir = 'launch'
self._status_subdir = 'status'
if 'ipette' not in pipelineOutDir:
raise Exception('"Pipette" or "pipette" must be somewhere in pipelineOutDir, for safety against rmtree')
self._outfile_set = set()
self._last_timestamp=''
self._token_counter = 0
pipelineTimestamp_val = self._get_timestamp()
token = self._get_unique_token(pipelineTimestamp_val)
pipelineId_val = token + '.' + pipelineName
self._pipelineOutDir = pipelineOutDir
self._pipelineName = pipelineName
self._pipelineTimestamp = pipelineTimestamp_val
self._pipelineId = pipelineId_val
self._defaultCaching = defaultCaching
self._defaultCleanUpJobFilesOnFail = defaultCleanUpJobFilesOnFail
self._defaultCleanUpPipelineJobsOnFail = defaultCleanUpPipelineJobsOnFail
self._defaultExecutionEngine = defaultExecutionEngine
self._pipelinePriority = pipelinePriority
self._injectionMap = injectionMap
self._dispense_buffer = []
self._launched = False
def _substitute_macros(self,in_str,module_outdir):
out_str = in_str
if module_outdir != None:
out_str = out_str.replace('$MODULEOUTDIR',module_outdir)
out_str = out_str.replace('$PIPELINEOUTDIR',self._pipelineOutDir)
for key in self._injectionMap:
out_str = out_str.replace('$'+key,self._injectionMap[key])
return out_str
def _get_unique_token(self,timestamp_arg=None):
#create unique string that sorts in order of creation
#get a timestamp string, ordered years to seconds
if timestamp_arg==None:
ts = self._get_timestamp()
else:
ts = timestamp_arg
# set token counter to make timestamp unique within this instance of the pipette client.
if ts == self._last_timestamp:
self._token_counter = self._token_counter+1
else:
self._token_counter = 0
self._last_timestamp = ts
token_counter_str = str(self._token_counter).zfill(3)
uniq_hash = self._get_uniq_hash()
token = ts + '.' + token_counter_str + '.' + uniq_hash
return token
def _get_uniq_hash(self):
#set random 6 character string, for easier human readability of the id's.
uniq_hash = ''
for i in range(6):
uniq_hash = uniq_hash + random.choice(string.ascii_letters)
return uniq_hash
def _get_timestamp(self):
t=datetime.datetime.now()
timestamp = str(t.year) + '_' + str(t.month).zfill(2) + '_' + str(t.day).zfill(2) + '__' + \
str(t.hour).zfill(2) + '_' + str(t.minute).zfill(2) + '_' + str(t.second).zfill(2)
return timestamp
def _get_timestamp_delta(self,ts_begin,ts_end):
begin_secs = timestamp_to_seconds(ts_begin)
end_secs = timestamp_to_seconds(ts_end)
duration = end_secs - begin_secs
return duration
def _timestamp_to_seconds(self,timestamp):
(year,month,day,junk,hour,minute,second)= timestamp.split('_')
(year,month,day,junk,hour,minute,second) = (
int(year),int(month),int(day),junk,int(hour),int(minute),int(second))
dt = datetime.datetime(year,month,day,hour,minute,second)
days = dt.toordinal()
days = days - 733000 # keep the seconds to a manageable size...
secs = days*24*60*60 + hour*60*60 + minute*60 + second
return secs
def dispense(self, moduleSubDir, cmdStr, resources, jobName= "NamelessJob", inputFiles=[],
filesToBeOutput=[], filesToBeDeleted=[],
caching='PipelineDefault',cleanUpJobFilesOnFail='PipelineDefault',
cleanUpPipelineJobsOnFail='PipelineDefault',
executionEngine='PipelineDefault'):
if self._launched == True:
raise Exception ('pipeline already launched, so a new pipeline must be instatiated to continue')
# Create unique jobId.
token = self._get_unique_token()
moduleSubDir_thunked = moduleSubDir.replace(os.path.sep,'_')
jobId = token + '.' + self._pipelineName + '.' + jobName + '.' + moduleSubDir_thunked
module_outdir = os.path.join(self._pipelineOutDir,moduleSubDir)
# Expand macros in all args except in jobName and outSubDir.
inputFiles_sub = []
for fn in inputFiles:
fn_substituted = self._substitute_macros(fn,module_outdir)
inputFiles_sub.append(fn_substituted)
filesToBeOutput_sub = []
for fn in filesToBeOutput:
fn_substituted = self._substitute_macros(fn,module_outdir)
filesToBeOutput_sub.append(fn_substituted)
filesToBeDeleted_sub = []
for fn in filesToBeDeleted:
fn_substituted = self._substitute_macros(fn,module_outdir)
filesToBeDeleted_sub.append(fn_substituted)
resources_sub={}
for key in resources:
key_sub = self._substitute_macros(key,module_outdir)
value_sub = self._substitute_macros(str(resources[key]),module_outdir)
resources_sub[key_sub]=value_sub
# Do NOT replace JOBOUTDIR in cmdStr, that must be done later
cmdStr_sub = self._substitute_macros(cmdStr,None)
caching_sub = self._substitute_macros(caching,module_outdir)
cleanUpJobFilesOnFail_sub = self._substitute_macros(cleanUpJobFilesOnFail,module_outdir)
cleanUpPipelineJobsOnFail_sub = self._substitute_macros(cleanUpPipelineJobsOnFail,module_outdir)
executionEngine_sub = self._substitute_macros(executionEngine,module_outdir)
# Substitute pipeline default values
if caching_sub == 'PipelineDefault':
caching_sub = self._defaultCaching
if cleanUpJobFilesOnFail_sub == 'PipelineDefault':
cleanUpJobFilesOnFail_sub = self._defaultCleanUpJobFilesOnFail
if cleanUpPipelineJobsOnFail_sub == 'PipelineDefault':
cleanUpPipelineJobsOnFail_sub = self._defaultCleanUpPipelineJobsOnFail
if executionEngine_sub == 'PipelineDefault':
executionEngine_sub = self._defaultExecutionEngine
# validate scalar arguments
if caching_sub not in ['True','False']:
raise Exception('caching must resolve to True or False. raw: '+caching+' substituted:'+caching_sub)
if cleanUpJobFilesOnFail_sub not in ['True','False']:
raise Exception('cleanUpJobFilesOnFail must resolve to True or False. raw: '+cleanUpJobFilesOnFail+' substituted:'+cleanUpJobFilesOnFail_sub)
if cleanUpPipelineJobsOnFail_sub not in ['True','False']:
raise Exception('cleanUpPipelineJobsOnFail must resolve to True or False. raw: '+cleanUpPipelineJobsOnFail+' substituted:'+cleanUpPipelineJobsOnFail_sub)
if 'maxmem' not in resources_sub or 'maxtime' not in resources_sub:
raise Exception('maxmem and maxtime resources must be specified')
# validate input files.
# For ones which are expected earlier in pipeline, verify than some previous module is making it.
# For ones which come from outside the pipeline, verify that they already exist.
pipeline_infiles = []
for infile in inputFiles_sub:
if infile.startswith(self._pipelineOutDir):
if infile in self._outfile_set:
pipeline_infiles.append(infile)
#ok - file has already been listed as output file earlier in pipeline
else:
raise Exception('Invalid input file - expected to be created by pipeline, but no previous module has listed it as an output file\n' + \
infile + '\n'+
'files present: ' + str(self._outfile_set) + '\n')
else:
if os.path.exists(infile):
pass
#ok - file outside of pipeline, and already exists
else:
raise Exception('Invalid input file - file outside of pipeline, and does not yet exist\n' +\
infile + '\n')
# record the deleted files, for subsequent jobs
for fn in filesToBeDeleted_sub:
self._outfile_set.remove(fn)
# record the output files, for subsequent jobs
for fn in filesToBeOutput_sub:
self._outfile_set.add(fn)
self._dispense_buffer.append("formatVersion-\t3\n")
self._dispense_buffer.append('moduleSubDirName-\t'+moduleSubDir_thunked+'\n')
self._dispense_buffer.append('jobId-\t' + jobId + '\n')
self._dispense_buffer.append('jobName-\t' + jobName + '\n')
self._dispense_buffer.append('pipelineId-\t' + self._pipelineId + '\n')
self._dispense_buffer.append('pipelineName-\t' + self._pipelineName + '\n')
self._dispense_buffer.append('pipelineTimestamp-\t' + self._pipelineTimestamp + '\n')
self._dispense_buffer.append('pipelinePriority-\t' + str(self._pipelinePriority) + '\n')
self._dispense_buffer.append('cmdStr-\t' + cmdStr_sub + '\n')
# Guaranteed to be at least two keys in the resources dict
res_str = ''
for key in resources_sub:
res_str = res_str + '\t' + key + '\t' + resources_sub[key]
self._dispense_buffer.append('resources:'+res_str+'\n')
self._dispense_buffer.append('pipelineOutDir-\t' + self._pipelineOutDir + '\n')
self._dispense_buffer.append('moduleOutDir-\t' + module_outdir + '\n')
infiles_tabs = '\t'.join(pipeline_infiles) + '\n'
self._dispense_buffer.append('inFiles=\t'+infiles_tabs)
outfiles_tabs = '\t'.join(filesToBeOutput_sub) + '\n'
self._dispense_buffer.append('outFiles=\t'+outfiles_tabs)
deletefiles_tabs = '\t'.join(filesToBeDeleted_sub) + '\n'
self._dispense_buffer.append('deleteFiles=\t' + deletefiles_tabs)
self._dispense_buffer.append('caching-\t'+caching_sub+'\n')
self._dispense_buffer.append('cleanUpJobFilesOnFail-\t'+cleanUpJobFilesOnFail_sub+'\n')
self._dispense_buffer.append('cleanUpPipelineJobsOnFail-\t'+cleanUpPipelineJobsOnFail_sub+'\n')
self._dispense_buffer.append('executionEngine-\t'+executionEngine_sub+'\n')
self._dispense_buffer.append('EOM-\t\n')
return filesToBeOutput_sub
def go(self):
self._dispense_buffer.append("EOF-\t\n")
# write dispense file for pipetteServer
dispense_dir = os.path.join(self._communication_dir_base,self._launch_subdir)
if not os.path.exists(dispense_dir):
try:
os.makedirs(dispense_dir)
except:
pass
fn_base = self._pipelineId + ".launch.txt"
fn = os.path.join(dispense_dir,fn_base)
fid = open(fn,"wt")
fid.writelines(self._dispense_buffer)
fid.close()
self._launched = True
def wait(self,timeout=None):
polling_interval = 60
preliminary_passes = 3
return_status = self.status()
# Allow some time for pipeline to initially launch
if return_status == 'Error':
for i in range(preliminary_passes):
time.sleep(polling_interval)
return_status = self.status()
if return_status != 'Error':
break
# Wait until pipeline finishes or until time is up
while return_status == 'InProgress':
time.sleep(polling_interval)
if timeout!=None:
current_timestamp = self._get_timestamp()
pipeline_duration = self._get_timestamp_delta(self._pipelineTimestamp,current_timestamp)
if pipeline_duration > timeout:
break
return_status = self.status()
return return_status
def status(self):
status_filename = os.path.join(self._communication_dir_base,self._status_subdir,'pipeline_only.status.txt')
fid = open(status_filename,'rt')
return_status = 'Error'
while line in fid:
line_list = split(line)
pipelineStatus = line_list[0]
pipelineId = line_list[1]
if pipelineId == self._pipelineId:
return_status = pipelineStatus
break
fid.close()
return return_status
| [] | [] | [
"HOME"
] | [] | ["HOME"] | python | 1 | 0 | |
742run.py | import sys
import argparse
import subprocess
import os
import shutil
import glob
irene_list = ["blackscholes", "bodytrack", "canneal", "dedup"]
maxwell_list = ["facesim", "ferret", "fluidanimate", "freqmine"]
jen_list = ["raytrace", "streamcluster", "swaptions", "vips", "x264"]
suite = {
"Maxwell":"intrate",
"Irene":"core4fprate",
"Jen":"fprate",
"Last": "intspeed"
}
suite_benchmarks = {
"intrate" : ["xalancbmk_r", "deepsjeng_r", "leela_r", "xz_r"],
"intspeed" : ["mcf_s", "omnetpp_s","xalancbmk_s", "deepsjeng_s",
"leela_s", "xz_s"],
"fpspeed" : ["cactuBSSN_s", "lbm_s", "wrf_s", "cam4_s", "pop2_s",
"imagick_s", "nab_s"],
"fprate" : ["namd_r", "parest_r", "povray_r",
"lbm_r", "wrf_r"],
"core4fprate" : ["blender_r", "cam4_r", "imagick_r", "nab_r"]
}
parsec_path = "/home/ulsi/18742/parsec-3.0"
spec_dir = "/home/ulsi/18742/spec"
gem5_path="/home/ulsi/18742/InvisiSpec-1.0"
parser = argparse.ArgumentParser(description='Run benchmarks.')
parser.add_argument('--arm', action="store_true",
help="For running an ARM benchmark. Assumes you have ARM set up for GEM5")
parser.add_argument('--output', '-o', action="store_true",
help="Will output a compressed log file named after exe if set")
parser.add_argument('--fs', action="store_true",
help="If you want to use full system instead of syscall emulation");
parser.add_argument('--exe', default="attack_code/spectre_full.out",
help="The program you want to benchmark")
parser.add_argument('--flags', default="",
help="Debug flags you want set - use one string, comma separated")
parser.add_argument('--setupparsec', default="",
help="Usage: '--setup <Jen, Irene, Maxwell> (choose your name)'")
parser.add_argument('--setupspec', default="",
help="Usage: '--setup <Jen, Irene, Maxwell> (choose your name)'")
parser.add_argument('--runparsec', default="",
help="""Usage: '--runparsec <Jen, Irene, Maxwell> (choose your name).
Assumes the correct setup has been run already.'""")
parser.add_argument('--runspec', default="",
help="""Usage: '--runparsec <Jen, Irene, Maxwell> (choose your name).
Assumes the correct setup has been run already.'""")
parser.add_argument('--cpu', default="DerivO3CPU",
help="The CPU model for GEM5. Default iS Deriv03CPU")
parser.add_argument('--start', default="",
help="CPU ticks to start logging at")
def setup_command_line(args):
arch = "X86"
flags = ""
output = ""
start = ""
cpu = "DerivO3CPU"
extra = ""
if args.fs:
config="fs"
exe = "--script="
extra = "--kernel=vmlinux --disk-image=amd64-linux.img"
else:
exe = "--cmd="
config = "se"
if args.arm:
arch = "ARM"
if args.exe:
exe += args.exe
else:
exe +="spectre_full.out"
if args.output:
output = "--debug-file=" + exe.split("/")[-1].split(".")[0]+".out.gz"
if args.flags:
flags = "--debug-flags=%s"%(args.flags)
if args.start:
start = "--debug-start=%s"(args.start)
s = """build/{arch}/gem5.opt {flags} {output} {start} \
configs/example/{config}.py \
{exe} --cpu-type={cpu} --caches --l1d_size=64kB --l1i_size=16kB \
--needsTSO=0 --scheme=UnsafeBaseline {extra}""".format(
arch=arch, config=config, exe=exe, flags=flags, output=output,
cpu=cpu, start=start, extra=extra)
return s
def setup_parsec():
os.environ["M5_PATH"] = "/home/ulsi/18742/InvisiSpec-1.0/x86-system"
gcc_bldconf = os.path.join(parsec_path, "config", "gcc.bldconf")
ret_bldconf = os.path.join(parsec_path, "config", "ret.bldconf")
# Add "-case-values-threshold 1" so the case statements don't get optimized
with open(gcc_bldconf, 'r') as file :
filedata = file.read()
# Replace the target string
filedata = filedata.replace('-fprefetch-loop-arrays ',
'-fprefetch-loop-arrays -case-values-threshold 1 ')
# Write the file out again
with open(gcc_bldconf, 'w') as file:
file.write(filedata)
# Create the ret_bldconf by copying gcc bldconf
shutil.copyfile(gcc_bldconf, ret_bldconf)
# Add the -mindirect-branch=thunk flag
with open(ret_bldconf, 'r') as file :
filedata = file.read()
# Replace the target string
filedata = filedata.replace('-fprefetch-loop-arrays ',
'-fprefetch-loop-arrays \
-mindirect-branch=thunk \
--param case-values-threshold 1')
# Write the file out again
with open(ret_bldconf, 'w') as file:
file.write(filedata)
# Set up the config files
pkg_dir = os.path.join(parsec_path, "pkgs")
# For all the apps and dependencies, we need to copy local gcc.bldconf
# files to ret.bldconf
for dirs in os.listdir(pkg_dir):
app_dir = os.path.join(pkg_dir, dirs)
for apps in os.listdir(app_dir):
cfg_dir = os.path.join(app_dir, apps)
current_cfg = os.path.join(cfg_dir, "parsec", "gcc.bldconf")
new_cfg = os.path.join(cfg_dir, "parsec", "ret.bldconf")
if os.path.exists(current_cfg):
shutil.copyfile(current_cfg, new_cfg)
def build_parsec(list):
os.chdir(parsec_path)
for workload in list:
subprocess.call(["bin/parsecmgmt", "-a", "build", "-c", "ret", "-p",
workload])
subprocess.call(["bin/parsecmgmt", "-a", "build", "-c", "gcc", "-p",
workload])
def setup_spec(person):
# doing extremely secure things with sudo password to mount the SPEC CD
command_line = "echo 18664 | sudo -S \
mount -t iso9660 /dev/cdrom /media/cdrom"
subprocess.call(command_line, shell=True)
# update gcc while we're doing this
command_line = "sudo apt -y update && \
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && \
sudo apt -y install gcc-9 g++-9"
subprocess.call(command_line, shell = True)
# Install SPEC
command_line = "sudo /media/cdrom/install.sh -d \
{dir} -f".format(dir = spec_dir)
subprocess.call(command_line, shell = True)
orig_bldconf = "{dir}/config/Example-gcc-linux-x86.cfg".format(
dir = spec_dir)
gcc_bldconf = "{dir}/config/baseline.cfg".format(dir = spec_dir)
ret_bldconf = "{dir}/config/retpoline.cfg".format(dir = spec_dir)
shutil.copyfile(orig_bldconf, gcc_bldconf)
with open(gcc_bldconf, 'r') as file :
filedata = file.read()
# Update label
filedata = filedata.replace("label mytest ",
'label baseline')
# Update number of cores to build with
filedata = filedata.replace("build_ncpus 8", "build_ncpus 2")
filedata = filedata.replace("CC = $(SPECLANG)gcc ",
"CC = $(SPECLANG)gcc-9 ")
filedata = filedata.replace("CXX = $(SPECLANG)g++",
"CXX = $(SPECLANG)g++-9")
filedata = filedata.replace("FC = $(SPECLANG)fortran",
"FC = $(SPECLANG)fortran-9")
filedata = filedata.replace("gcc_dir /opt/rh/devtoolset-7/root/usr",
"gcc_dir /usr")
# Add -case-values-threshold 1 to not optimize out indirect jumps
# (do we want this?)
filedata = filedata.replace("-O3 -march=native ",
"-O3 -march=native --param case-values-threshold=1 ")
# Write the file out again
with open(gcc_bldconf, 'w') as file:
file.write(filedata)
shutil.copyfile(gcc_bldconf, ret_bldconf)
with open(ret_bldconf, 'r') as file :
filedata = file.read()
# Update label and add flags
filedata = filedata.replace("label baseline ",
'label ret')
filedata = filedata.replace("-O3 -march=native ",
"-O3 -march=native -mindirect-branch=thunk ")
# Write the file out again
with open(ret_bldconf, 'w') as file:
file.write(filedata)
# Source the shrc and test the build
subprocess.call("cd {dir} && chmod +x shrc && ./shrc \
&& runcpu --config=baseline.cfg \
--action=runsetup --threads=1 \
--size=ref \
{suite}".format(suite=suite[person],
dir = spec_dir), shell=True)
# Source the shrc and test the build
subprocess.call("cd {dir} && ./shrc \
&& runcpu --config=ret.cfg \
--action=runsetup --threads=1 \
--size=ref \
{suite}".format(suite=suite[person], dir=spec_dir),
shell=True)
def run_spec(user):
benchmarks = suite_benchmarks[suite[user]]
rate_speed = "rate"
if "speed" in suite[user]:
rate_speed = "speed"
base_dir = "run_base_ref{rs}_baseline-m64.0000".format(rs=rate_speed)
ret_dir = "run_base_ref{rs}_ret-m64.0000".format(rs=rate_speed)
for benchmark in benchmarks:
bench_top_dir = glob.glob(
"{spec_dir}/benchspec/CPU/*.{benchmark}/run".format(
spec_dir=spec_dir, benchmark=benchmark))
if not bench_top_dir:
print (
"ERROR: Could not locate benchmark top level directory for\
{}".format(benchmark))
continue
bench_top_dir = bench_top_dir[0]
bench_base_dir = os.path.join(bench_top_dir, base_dir)
bench_ret_dir = os.path.join(bench_top_dir, ret_dir)
print("Benchmark baseline: {}".format(bench_base_dir))
print("Benchmark retpoline: {}".format(bench_ret_dir))
specinvoke = subprocess.check_output(
"{spec_dir}/bin/specinvoke -n {bench_dir}/speccmds.cmd | \
grep -v '#'".format(
spec_dir=spec_dir, bench_dir=bench_base_dir), shell=True)
print(specinvoke)
specinvoke = specinvoke.split("\n")[0]
specinvoke = specinvoke.split()
idx1 = specinvoke.index(">") if ">" in specinvoke else len(specinvoke)
idx2 = specinvoke.index("<") if "<" in specinvoke else len(specinvoke)
bench_bin = specinvoke[0]
bench_opts = specinvoke[1:min(idx1, idx2)]
print("\n--- Running simulation: {} {} ---".format(
bench_bin, " ".join(bench_opts)))
# From the exp_script
run_cmd = ("{gem5_path}/build/X86/gem5.opt " +
"{gem5_path}/configs/example/se.py --output=gem5_run.log " +
"--cmd={bench_bin} --options=\'{bench_opts}\' " +
"--num-cpus=1 --mem-size=2GB " +
"--l1d_assoc=8 --l2_assoc=16 --l1i_assoc=4 " +
"--cpu-type=DerivO3CPU --needsTSO=0 --scheme=UnsafeBaseline " +
"--caches --maxinsts=2000000000 ").format(
gem5_path=gem5_path,
bench_bin=bench_bin, bench_opts=" ".join(bench_opts))
print("\n--- GEM5 run_cmd: {} ---".format(run_cmd))
try:
print("\n--- GEM5 running baseline simulation: \
{} > {} ---\n".format(
bench_base_dir, os.path.join(bench_base_dir, "gem5_run.log")))
subprocess.call("cd {} && {}".format(bench_base_dir,
run_cmd), shell=True)
except subprocess.CalledProcessError as e:
print("ERROR: GEM5 baseline simulation returned errcode {}".format(
e.returncode))
continue
# Run retpoline compiled code
specinvoke = subprocess.check_output(
"{spec_dir}/bin/specinvoke -n \
{bench_dir}/speccmds.cmd | grep -v '#'".format(
spec_dir=spec_dir, bench_dir=bench_ret_dir), shell=True)
specinvoke = specinvoke.split("\n")[0]
specinvoke = specinvoke.split()
idx1 = specinvoke.index(">") if ">" in specinvoke else len(specinvoke)
idx2 = specinvoke.index("<") if "<" in specinvoke else len(specinvoke)
bench_bin = specinvoke[0]
bench_opts = specinvoke[1:min(idx1, idx2)]
print("\n--- Running simulation: {} \
{} ---".format(bench_bin, " ".join(bench_opts)))
# From the exp_script
run_cmd = ("{gem5_path}/build/X86/gem5.opt " +
"{gem5_path}/configs/example/se.py --output=gem5_run.log " +
"--cmd={bench_bin} --options=\'{bench_opts}\' " +
"--num-cpus=1 --mem-size=2GB " +
"--l1d_assoc=8 --l2_assoc=16 --l1i_assoc=4 " +
"--cpu-type=DerivO3CPU --needsTSO=0 --scheme=UnsafeBaseline " +
"--caches --maxinsts=2000000000 ").format(
gem5_path=gem5_path,
bench_bin=bench_bin, bench_opts=" ".join(bench_opts))
print("\n--- GEM5 run_cmd: {} ---".format(run_cmd))
try:
print("\n--- GEM5 running ret simulation: {} > {} ---\n".format(
bench_ret_dir, os.path.join(bench_base_dir, "gem5_run.log")))
subprocess.call("cd {} && {}".format(bench_ret_dir,
run_cmd), shell=True)
except subprocess.CalledProcessError as e:
print("ERROR: GEM5 ret simulation returned errcode {}".format(
e.returncode))
continue
def run_parsec(list):
arch = "X86"
flags = ""
output = ""
start = ""
cpu = "DerivO3CPU"
extra = "--kernel=vmlinux --disk-image=amd64-linux.img"
if args.arm:
arch = "ARM"
if args.flags:
flags = "--debug-flags=%s"%(args.flags)
if args.start:
start = "--debug-start=%s"(args.start)
for workload in list:
# Set up and run the normal gcc version
script_name = workload + "_gcc"
if args.output:
output = "--debug-file=" + script_name +".out.gz"
s = """build/{arch}/gem5.opt {flags} {output} {start} \
configs/example/fs.py {extra} \
--script={exe} --cpu-type={cpu} --caches --l1d_size=64kB \
--l1i_size=16kB --needsTSO=0 --scheme=UnsafeBaseline \
""".format(
arch=arch, exe=script_name, flags=flags,
output=output, cpu=cpu, start=start, extra=extra)
subprocess.call(s.split())
print("\nDone running %s \n", script_name)
# Move the stats file so that running other files doesn't clobber it
old_stats_file = "/home/ulsi/18742/InvisiSpec-1.0/m5out/stats.txt"
new_stats_file = "/home/ulsi/18742/InvisiSpec-1.0/m5out/" + \
"{sname}_stats.txt".format(sname = script_name)
shutil.copyfile(old_stats_file, new_stats_file)
# Set up and run the retpoline compiled version
script_name = workload + "_ret"
if args.output:
output = "--debug-file=" + script_name +".out.gz"
s = """build/{arch}/gem5.opt {flags} {output} {start} \
configs/example/fs.py {extra} \
--script=runparsec/{exe} --cpu-type={cpu} --caches \
--l1d_size=64kB \
--l1i_size=16kB --needsTSO=0 --scheme=UnsafeBaseline \
""".format(
arch=arch, exe=script_name, flags=flags,
output=output, cpu=cpu, start=start, extra=extra)
subprocess.call(s.split())
print("\nDone running %s \n", script_name)
# Just used this to copy the gcc shell scripts so a ret version existed too
def copy_gcc_ret():
workloads = jen_list + irene_list + maxwell_list
for workload in workloads:
gcc_file = os.path.join("/home/ulsi/18742/InvisiSpec-1.0/runparsec",
workload + "_gcc")
ret_file = os.path.join("/home/ulsi/18742/InvisiSpec-1.0/runparsec",
workload + "_ret")
if (not os.path.exists(ret_file)):
shutil.copyfile(gcc_file, ret_file)
# Replace the "gcc" with "ret"
with open(ret_file, 'r') as file :
filedata = file.read()
# Replace the target string
filedata = filedata.replace('gcc', 'ret')
# Write the file out again
with open(ret_file, 'w') as file:
file.write(filedata)
#Make it executable
os.chmod(ret_file, 777)
if __name__ == "__main__":
os.environ["M5_PATH"] = "/home/ulsi/18742/InvisiSpec-1.0/x86-system"
args = parser.parse_args()
if (args.setupspec != ""):
setup_spec(args.setupspec)
if (args.runspec != ""):
run_spec(args.runspec)
if (args.setupparsec == "Jen"):
setup_parsec()
build_parsec(jen_list)
if (args.setupparsec == "Irene"):
setup_parsec()
build_parsec(irene_list)
if (args.setupparsec == "Maxwell"):
setup_parsec()
build_parsec(maxwell_list)
if (args.runparsec == "Jen"):
run_parsec(jen_list)
if (args.runparsec == "Irene"):
run_parsec(irene_list)
if (args.runparsec == "Maxwell"):
run_parsec(maxwell_list)
elif (args.runparsec == ""
and args.runspec == ""
and args.setupparsec == ""
and args.setupspec == ""):
command_line = setup_command_line(args).split()
print(command_line)
subprocess.call(command_line)
| [] | [] | [
"M5_PATH"
] | [] | ["M5_PATH"] | python | 1 | 0 | |
components/isceobj/TopsProc/runDenseOffsets.py | #
# Author: Joshua Cohen
# Copyright 2016
# Based on Piyush Agram's denseOffsets.py script
#
import os
import isce
import isceobj
import logging
from isceobj.Util.decorators import use_api
logger = logging.getLogger('isce.insar.DenseOffsets')
def runDenseOffsets(self):
'''
Run CPU / GPU version depending on user choice and availability.
'''
if not self.doDenseOffsets:
print('Dense offsets not requested. Skipping ....')
return
hasGPU = self.useGPU and self._insar.hasGPU()
if hasGPU:
runDenseOffsetsGPU(self)
else:
runDenseOffsetsCPU(self)
@use_api
def runDenseOffsetsCPU(self):
'''
Estimate dense offset field between merged master bursts and slave bursts.
'''
from mroipac.ampcor.DenseAmpcor import DenseAmpcor
os.environ['VRT_SHARED_SOURCE'] = "0"
print('\n============================================================')
print('Configuring DenseAmpcor object for processing...\n')
### Determine appropriate filenames
mf = 'master.slc'
sf = 'slave.slc'
if not ((self.numberRangeLooks == 1) and (self.numberAzimuthLooks==1)):
mf += '.full'
sf += '.full'
master = os.path.join(self._insar.mergedDirname, mf)
slave = os.path.join(self._insar.mergedDirname, sf)
####For this module currently, we need to create an actual file on disk
for infile in [master,slave]:
if os.path.isfile(infile):
continue
cmd = 'gdal_translate -of ENVI {0}.vrt {0}'.format(infile)
status = os.system(cmd)
if status:
raise Exception('{0} could not be executed'.format(status))
### Load the master object
m = isceobj.createSlcImage()
m.load(master + '.xml')
m.setAccessMode('READ')
# m.createImage()
### Load the slave object
s = isceobj.createSlcImage()
s.load(slave + '.xml')
s.setAccessMode('READ')
# s.createImage()
width = m.getWidth()
length = m.getLength()
objOffset = DenseAmpcor(name='dense')
objOffset.configure()
# objOffset.numberThreads = 1
### Configure dense Ampcor object
print('\nMaster frame: %s' % (mf))
print('Slave frame: %s' % (sf))
print('Main window size width: %d' % (self.winwidth))
print('Main window size height: %d' % (self.winhgt))
print('Search window size width: %d' % (self.srcwidth))
print('Search window size height: %d' % (self.srchgt))
print('Skip sample across: %d' % (self.skipwidth))
print('Skip sample down: %d' % (self.skiphgt))
print('Field margin: %d' % (self.margin))
print('Oversampling factor: %d' % (self.oversample))
print('Gross offset across: %d' % (self.rgshift))
print('Gross offset down: %d\n' % (self.azshift))
objOffset.setWindowSizeWidth(self.winwidth)
objOffset.setWindowSizeHeight(self.winhgt)
objOffset.setSearchWindowSizeWidth(self.srcwidth)
objOffset.setSearchWindowSizeHeight(self.srchgt)
objOffset.skipSampleAcross = self.skipwidth
objOffset.skipSampleDown = self.skiphgt
objOffset.oversamplingFactor = self.oversample
objOffset.setAcrossGrossOffset(self.rgshift)
objOffset.setDownGrossOffset(self.azshift)
objOffset.setFirstPRF(1.0)
objOffset.setSecondPRF(1.0)
if m.dataType.startswith('C'):
objOffset.setImageDataType1('mag')
else:
objOffset.setImageDataType1('real')
if s.dataType.startswith('C'):
objOffset.setImageDataType2('mag')
else:
objOffset.setImageDataType2('real')
objOffset.offsetImageName = os.path.join(self._insar.mergedDirname, self._insar.offsetfile)
objOffset.snrImageName = os.path.join(self._insar.mergedDirname, self._insar.snrfile)
print('Output dense offsets file name: %s' % (objOffset.offsetImageName))
print('Output SNR file name: %s' % (objOffset.snrImageName))
print('\n======================================')
print('Running dense ampcor...')
print('======================================\n')
objOffset.denseampcor(m, s) ### Where the magic happens...
### Store params for later
self._insar.offset_width = objOffset.offsetCols
self._insar.offset_length = objOffset.offsetLines
self._insar.offset_top = objOffset.locationDown[0][0]
self._insar.offset_left = objOffset.locationAcross[0][0]
def runDenseOffsetsGPU(self):
'''
Estimate dense offset field between merged master bursts and slave bursts.
'''
from contrib.PyCuAmpcor import PyCuAmpcor
print('\n============================================================')
print('Configuring PyCuAmpcor object for processing...\n')
### Determine appropriate filenames
mf = 'master.slc'
sf = 'slave.slc'
if not ((self.numberRangeLooks == 1) and (self.numberAzimuthLooks==1)):
mf += '.full'
sf += '.full'
master = os.path.join(self._insar.mergedDirname, mf)
slave = os.path.join(self._insar.mergedDirname, sf)
####For this module currently, we need to create an actual file on disk
for infile in [master,slave]:
if os.path.isfile(infile):
continue
cmd = 'gdal_translate -of ENVI {0}.vrt {0}'.format(infile)
status = os.system(cmd)
if status:
raise Exception('{0} could not be executed'.format(status))
### Load the master object
m = isceobj.createSlcImage()
m.load(master + '.xml')
m.setAccessMode('READ')
# m.createImage()
### Load the slave object
s = isceobj.createSlcImage()
s.load(slave + '.xml')
s.setAccessMode('READ')
# s.createImage()
width = m.getWidth()
length = m.getLength()
objOffset = PyCuAmpcor.PyCuAmpcor()
objOffset.algorithm = 0
objOffset.deviceID = -1
objOffset.nStreams = 2
objOffset.derampMethod = 0
objOffset.masterImageName = master
objOffset.masterImageHeight = length
objOffset.masterImageWidth = width
objOffset.slaveImageName = slave
objOffset.slaveImageHeight = length
objOffset.slaveImageWidth = width
objOffset.numberWindowDown = (length-100-self.winhgt)//self.skiphgt
objOffset.numberWindowAcross = (width-100-self.winwidth)//self.skipwidth
objOffset.windowSizeHeight = self.winhgt
objOffset.windowSizeWidth = self.winwidth
objOffset.halfSearchRangeDown = self.srchgt
objOffset.halfSearchRangeAcross = self.srcwidth
objOffset.masterStartPixelDownStatic = 50
objOffset.masterStartPixelAcrossStatic = 50
objOffset.skipSampleDown = self.skiphgt
objOffset.skipSampleAcross = self.skipwidth
objOffset.corrSufaceOverSamplingMethod = 0
objOffset.corrSurfaceOverSamplingFactor = self.oversample
# generic control
objOffset.numberWindowDownInChunk = 10
objOffset.numberWindowAcrossInChunk = 10
objOffset.mmapSize = 16
objOffset.setupParams()
objOffset.setConstantGrossOffset(self.azshift,self.rgshift)
# objOffset.numberThreads = 1
### Configure dense Ampcor object
print('\nMaster frame: %s' % (mf))
print('Slave frame: %s' % (sf))
print('Main window size width: %d' % (self.winwidth))
print('Main window size height: %d' % (self.winhgt))
print('Search window size width: %d' % (self.srcwidth))
print('Search window size height: %d' % (self.srchgt))
print('Skip sample across: %d' % (self.skipwidth))
print('Skip sample down: %d' % (self.skiphgt))
print('Field margin: %d' % (self.margin))
print('Oversampling factor: %d' % (self.oversample))
print('Gross offset across: %d' % (self.rgshift))
print('Gross offset down: %d\n' % (self.azshift))
#Modify BIL in filename to BIP if needed and store for future use
prefix, ext = os.path.splitext(self._insar.offsetfile)
if ext == '.bil':
ext = '.bip'
self._insar.offsetfile = prefix + ext
objOffset.offsetImageName = os.path.join(self._insar.mergedDirname, self._insar.offsetfile)
objOffset.snrImageName = os.path.join(self._insar.mergedDirname, self._insar.snrfile)
print('Output dense offsets file name: %s' % (objOffset.offsetImageName))
print('Output SNR file name: %s' % (objOffset.snrImageName))
print('\n======================================')
print('Running dense ampcor...')
print('======================================\n')
objOffset.checkPixelInImageRange()
objOffset.runAmpcor()
#objOffset.denseampcor(m, s) ### Where the magic happens...
### Store params for later
self._insar.offset_width = objOffset.numberWindowAcross
self._insar.offset_length = objOffset.numberWindowDown
self._insar.offset_top = 50
self._insar.offset_left = 50
outImg = isceobj.createImage()
outImg.setDataType('FLOAT')
outImg.setFilename(objOffset.offsetImageName.decode('utf-8'))
outImg.setBands(2)
outImg.scheme = 'BIP'
outImg.setWidth(objOffset.numberWindowAcross)
outImg.setLength(objOffset.numberWindowDown)
outImg.setAccessMode('read')
outImg.renderHdr()
snrImg = isceobj.createImage()
snrImg.setFilename( objOffset.snrImageName.decode('utf8'))
snrImg.setDataType('FLOAT')
snrImg.setBands(1)
snrImg.setWidth(objOffset.numberWindowAcross)
snrImg.setLength(objOffset.numberWindowDown)
snrImg.setAccessMode('read')
snrImg.renderHdr()
if __name__ == '__main__' :
'''
Default routine to plug master.slc.full/slave.slc.full into
Dense Offsets Ampcor module.
'''
main()
| [] | [] | [
"VRT_SHARED_SOURCE"
] | [] | ["VRT_SHARED_SOURCE"] | python | 1 | 0 | |
FreshShop/wsgi.py | """
WSGI config for FreshShop project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FreshShop.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
qa/rpc-tests/util.py | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The PIVX developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "zcore.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
zcored and zcore-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run zcored:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "zcored"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "zcore-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in zcore.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a zcored and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "zcored"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "zcore-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple zcoreds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| [] | [] | [
"BITCOINCLI",
"BITCOIND"
] | [] | ["BITCOINCLI", "BITCOIND"] | python | 2 | 0 | |
flink-yarn-tests/src/test/java/org/apache/flink/yarn/YARNSessionCapacitySchedulerITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.yarn;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.client.cli.CliFrontend;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.GlobalConfiguration;
import org.apache.flink.configuration.JobManagerOptions;
import org.apache.flink.runtime.rest.RestClient;
import org.apache.flink.runtime.rest.handler.legacy.messages.ClusterOverviewWithVersion;
import org.apache.flink.runtime.rest.messages.ClusterConfigurationInfo;
import org.apache.flink.runtime.rest.messages.ClusterConfigurationInfoEntry;
import org.apache.flink.runtime.rest.messages.ClusterConfigurationInfoHeaders;
import org.apache.flink.runtime.rest.messages.ClusterOverviewHeaders;
import org.apache.flink.runtime.rest.messages.taskmanager.TaskManagerInfo;
import org.apache.flink.runtime.rest.messages.taskmanager.TaskManagersHeaders;
import org.apache.flink.runtime.rest.messages.taskmanager.TaskManagersInfo;
import org.apache.flink.runtime.testutils.CommonTestUtils;
import org.apache.flink.test.testdata.WordCountData;
import org.apache.flink.testutils.logging.LoggerAuditingExtension;
import org.apache.flink.yarn.cli.FlinkYarnSessionCli;
import org.apache.flink.yarn.configuration.YarnConfigOptions;
import org.apache.flink.yarn.util.TestUtils;
import org.apache.flink.shaded.guava30.com.google.common.net.HostAndPort;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import static org.apache.flink.core.testutils.FlinkAssertions.anyCauseMatches;
import static org.apache.flink.util.Preconditions.checkState;
import static org.apache.flink.yarn.util.TestUtils.getTestJarPath;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.assertj.core.api.Fail.fail;
/**
* This test starts a MiniYARNCluster with a CapacityScheduler. Is has, by default a queue called
* "default". The configuration here adds another queue: "qa-team".
*/
class YARNSessionCapacitySchedulerITCase extends YarnTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(YARNSessionCapacitySchedulerITCase.class);
/** RestClient to query Flink cluster. */
private static RestClient restClient;
/**
* ExecutorService for {@link RestClient}.
*
* @see #restClient
*/
private static ExecutorService restClientExecutor;
/** Toggles checking for prohibited strings in logs after the test has run. */
private boolean checkForProhibitedLogContents = true;
@RegisterExtension
private final LoggerAuditingExtension cliLoggerAuditingExtension =
new LoggerAuditingExtension(CliFrontend.class, Level.INFO);
@RegisterExtension
private final LoggerAuditingExtension yarLoggerAuditingExtension =
new LoggerAuditingExtension(YarnClusterDescriptor.class, Level.WARN);
@BeforeAll
static void setup() throws Exception {
YARN_CONFIGURATION.setClass(
YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
YARN_CONFIGURATION.set("yarn.scheduler.capacity.root.queues", "default,qa-team");
YARN_CONFIGURATION.setInt("yarn.scheduler.capacity.root.default.capacity", 40);
YARN_CONFIGURATION.setInt("yarn.scheduler.capacity.root.qa-team.capacity", 60);
YARN_CONFIGURATION.set(
YarnTestBase.TEST_CLUSTER_NAME_KEY, "flink-yarn-tests-capacityscheduler");
startYARNWithConfig(YARN_CONFIGURATION);
restClientExecutor = Executors.newSingleThreadExecutor();
restClient = new RestClient(new Configuration(), restClientExecutor);
}
@AfterAll
static void tearDown() throws Exception {
try {
YarnTestBase.teardown();
} finally {
if (restClient != null) {
restClient.shutdown(Time.seconds(5));
}
if (restClientExecutor != null) {
restClientExecutor.shutdownNow();
}
}
}
/**
* Tests that a session cluster, that uses the resources from the <i>qa-team</i> queue, can be
* started from the command line.
*/
@Test
void testStartYarnSessionClusterInQaTeamQueue() throws Exception {
runTest(
() ->
runWithArgs(
new String[] {
"-j",
flinkUberjar.getAbsolutePath(),
"-t",
flinkLibFolder.getAbsolutePath(),
"-jm",
"768m",
"-tm",
"1024m",
"-qu",
"qa-team"
},
"JobManager Web Interface:",
null,
RunTypes.YARN_SESSION,
0));
}
/**
* Test per-job yarn cluster
*
* <p>This also tests the prefixed CliFrontend options for the YARN case We also test if the
* requested parallelism of 2 is passed through. The parallelism is requested at the YARN client
* (-ys).
*/
@Test
void perJobYarnCluster() throws Exception {
runTest(
() -> {
LOG.info("Starting perJobYarnCluster()");
File exampleJarLocation = getTestJarPath("BatchWordCount.jar");
runWithArgs(
new String[] {
"run",
"-m",
"yarn-cluster",
"-yj",
flinkUberjar.getAbsolutePath(),
"-yt",
flinkLibFolder.getAbsolutePath(),
"-ys",
"2", // test that the job is executed with a DOP of 2
"-yjm",
"768m",
"-ytm",
"1024m",
exampleJarLocation.getAbsolutePath()
},
/* test succeeded after this string */
"Program execution finished",
/* prohibited strings: (to verify the parallelism) */
// (we should see "DataSink (...) (1/2)" and "DataSink (...) (2/2)"
// instead)
new String[] {"DataSink \\(.*\\) \\(1/1\\) switched to FINISHED"},
RunTypes.CLI_FRONTEND,
0,
cliLoggerAuditingExtension::getMessages);
LOG.info("Finished perJobYarnCluster()");
});
}
/**
* Test per-job yarn cluster and memory calculations for off-heap use (see FLINK-7400) with the
* same job as {@link #perJobYarnCluster()}.
*
* <p>This ensures that with (any) pre-allocated off-heap memory by us, there is some off-heap
* memory remaining for Flink's libraries. Creating task managers will thus fail if no off-heap
* memory remains.
*/
@Test
void perJobYarnClusterOffHeap() throws Exception {
runTest(
() -> {
LOG.info("Starting perJobYarnCluster()");
File exampleJarLocation = getTestJarPath("BatchWordCount.jar");
// set memory constraints (otherwise this is the same test as
// perJobYarnCluster() above)
final long taskManagerMemoryMB = 1024;
runWithArgs(
new String[] {
"run",
"-m",
"yarn-cluster",
"-yj",
flinkUberjar.getAbsolutePath(),
"-yt",
flinkLibFolder.getAbsolutePath(),
"-ys",
"2", // test that the job is executed with a DOP of 2
"-yjm",
"768m",
"-ytm",
taskManagerMemoryMB + "m",
exampleJarLocation.getAbsolutePath()
},
/* test succeeded after this string */
"Program execution finished",
/* prohibited strings: (to verify the parallelism) */
// (we should see "DataSink (...) (1/2)" and "DataSink (...) (2/2)"
// instead)
new String[] {"DataSink \\(.*\\) \\(1/1\\) switched to FINISHED"},
RunTypes.CLI_FRONTEND,
0,
cliLoggerAuditingExtension::getMessages);
LOG.info("Finished perJobYarnCluster()");
});
}
/**
* Starts a session cluster on YARN, and submits a streaming job.
*
* <p>Tests
*
* <ul>
* <li>if a custom YARN application name can be set from the command line,
* <li>if the number of TaskManager slots can be set from the command line,
* <li>if dynamic properties from the command line are set,
* <li>if the vcores are set correctly (FLINK-2213),
* <li>if jobmanager hostname/port are shown in web interface (FLINK-1902)
* </ul>
*
* <p><b>Hint: </b> If you think it is a good idea to add more assertions to this test, think
* again!
*/
@Test
void
testVCoresAreSetCorrectlyAndJobManagerHostnameAreShownInWebInterfaceAndDynamicPropertiesAndYarnApplicationNameAndTaskManagerSlots()
throws Exception {
runTest(
() -> {
checkForProhibitedLogContents = false;
final Runner yarnSessionClusterRunner =
startWithArgs(
new String[] {
"-j",
flinkUberjar.getAbsolutePath(),
"-t",
flinkLibFolder.getAbsolutePath(),
"-jm",
"768m",
"-tm",
"1024m",
"-s",
"3", // set the slots 3 to check if the vCores are set
// properly!
"-nm",
"customName",
"-Dfancy-configuration-value=veryFancy",
"-D" + YarnConfigOptions.VCORES.key() + "=2"
},
"JobManager Web Interface:",
RunTypes.YARN_SESSION);
try {
final String logs = outContent.toString();
final HostAndPort hostAndPort = parseJobManagerHostname(logs);
final String host = hostAndPort.getHost();
final int port = hostAndPort.getPort();
LOG.info("Extracted hostname:port: {}:{}", host, port);
submitJob("WindowJoin.jar");
//
// Assert that custom YARN application name "customName" is set
//
final ApplicationReport applicationReport = getOnlyApplicationReport();
assertThat(applicationReport.getName()).isEqualTo("customName");
//
// Assert the number of TaskManager slots are set
//
waitForTaskManagerRegistration(host, port);
assertNumberOfSlotsPerTask(host, port, 3);
final Map<String, String> flinkConfig = getFlinkConfig(host, port);
//
// Assert dynamic properties
//
assertThat(flinkConfig)
.containsEntry("fancy-configuration-value", "veryFancy")
//
// FLINK-2213: assert that vcores are set
//
.containsEntry(YarnConfigOptions.VCORES.key(), "2")
//
// FLINK-1902: check if jobmanager hostname is shown in web
// interface
//
.containsEntry(JobManagerOptions.ADDRESS.key(), host);
} finally {
yarnSessionClusterRunner.sendStop();
yarnSessionClusterRunner.join();
}
});
}
private static HostAndPort parseJobManagerHostname(final String logs) {
final Pattern p =
Pattern.compile("JobManager Web Interface: http://([a-zA-Z0-9.-]+):([0-9]+)");
final Matcher matches = p.matcher(logs);
String hostname = null;
String port = null;
while (matches.find()) {
hostname = matches.group(1).toLowerCase();
port = matches.group(2);
}
checkState(hostname != null, "hostname not found in log");
checkState(port != null, "port not found in log");
return HostAndPort.fromParts(hostname, Integer.parseInt(port));
}
private void submitJob(final String jobFileName) throws IOException, InterruptedException {
Runner jobRunner =
startWithArgs(
new String[] {
"run", "--detached", getTestJarPath(jobFileName).getAbsolutePath()
},
"Job has been submitted with JobID",
RunTypes.CLI_FRONTEND);
jobRunner.join();
}
private static void waitForTaskManagerRegistration(final String host, final int port)
throws Exception {
CommonTestUtils.waitUntilCondition(() -> getNumberOfTaskManagers(host, port) > 0);
}
private static void assertNumberOfSlotsPerTask(
final String host, final int port, final int slotsNumber) throws Exception {
try {
CommonTestUtils.waitUntilCondition(
() -> getNumberOfSlotsPerTaskManager(host, port) == slotsNumber);
} catch (final TimeoutException e) {
final int currentNumberOfSlots = getNumberOfSlotsPerTaskManager(host, port);
fail(
String.format(
"Expected slots per TM to be %d, was: %d",
slotsNumber, currentNumberOfSlots));
}
}
private static int getNumberOfTaskManagers(final String host, final int port) throws Exception {
final ClusterOverviewWithVersion clusterOverviewWithVersion =
restClient
.sendRequest(host, port, ClusterOverviewHeaders.getInstance())
.get(30_000, TimeUnit.MILLISECONDS);
return clusterOverviewWithVersion.getNumTaskManagersConnected();
}
private static int getNumberOfSlotsPerTaskManager(final String host, final int port)
throws Exception {
final TaskManagersInfo taskManagersInfo =
restClient.sendRequest(host, port, TaskManagersHeaders.getInstance()).get();
return taskManagersInfo.getTaskManagerInfos().stream()
.map(TaskManagerInfo::getNumberSlots)
.findFirst()
.orElse(0);
}
private static Map<String, String> getFlinkConfig(final String host, final int port)
throws Exception {
final ClusterConfigurationInfo clusterConfigurationInfoEntries =
restClient
.sendRequest(host, port, ClusterConfigurationInfoHeaders.getInstance())
.get();
return clusterConfigurationInfoEntries.stream()
.collect(
Collectors.toMap(
ClusterConfigurationInfoEntry::getKey,
ClusterConfigurationInfoEntry::getValue));
}
/**
* Test deployment to non-existing queue & ensure that the system logs a WARN message for the
* user. (Users had unexpected behavior of Flink on YARN because they mistyped the target queue.
* With an error message, we can help users identifying the issue)
*/
@Test
void testNonexistingQueueWARNmessage() throws Exception {
runTest(
() -> {
LOG.info("Starting testNonexistingQueueWARNmessage()");
assertThatThrownBy(
() ->
runWithArgs(
new String[] {
"-j",
flinkUberjar.getAbsolutePath(),
"-t",
flinkLibFolder.getAbsolutePath(),
"-jm",
"768m",
"-tm",
"1024m",
"-qu",
"doesntExist"
},
"to unknown queue: doesntExist",
null,
RunTypes.YARN_SESSION,
1))
.isInstanceOf(Exception.class)
.satisfies(anyCauseMatches("to unknown queue: doesntExist"));
assertThat(yarLoggerAuditingExtension.getMessages())
.anySatisfy(
s ->
assertThat(s)
.contains(
"The specified queue 'doesntExist' does not exist. Available queues"));
LOG.info("Finished testNonexistingQueueWARNmessage()");
});
}
/**
* Test per-job yarn cluster with the parallelism set at the CliFrontend instead of the YARN
* client.
*/
@Test
void perJobYarnClusterWithParallelism() throws Exception {
runTest(
() -> {
LOG.info("Starting perJobYarnClusterWithParallelism()");
File exampleJarLocation = getTestJarPath("BatchWordCount.jar");
runWithArgs(
new String[] {
"run",
"-p",
"2", // test that the job is executed with a DOP of 2
"-m",
"yarn-cluster",
"-yj",
flinkUberjar.getAbsolutePath(),
"-yt",
flinkLibFolder.getAbsolutePath(),
"-ys",
"2",
"-yjm",
"768m",
"-ytm",
"1024m",
exampleJarLocation.getAbsolutePath()
},
/* test succeeded after this string */
"Program execution finished",
/* prohibited strings: (we want to see "DataSink (...) (2/2) switched to FINISHED") */
new String[] {"DataSink \\(.*\\) \\(1/1\\) switched to FINISHED"},
RunTypes.CLI_FRONTEND,
0,
cliLoggerAuditingExtension::getMessages);
LOG.info("Finished perJobYarnClusterWithParallelism()");
});
}
/** Test a fire-and-forget job submission to a YARN cluster. */
@Timeout(value = 60)
@Test
void testDetachedPerJobYarnCluster() throws Exception {
runTest(
() -> {
LOG.info("Starting testDetachedPerJobYarnCluster()");
File exampleJarLocation = getTestJarPath("BatchWordCount.jar");
testDetachedPerJobYarnClusterInternal(exampleJarLocation.getAbsolutePath());
LOG.info("Finished testDetachedPerJobYarnCluster()");
});
}
/** Test a fire-and-forget job submission to a YARN cluster. */
@Timeout(value = 60)
@Test
void testDetachedPerJobYarnClusterWithStreamingJob() throws Exception {
runTest(
() -> {
LOG.info("Starting testDetachedPerJobYarnClusterWithStreamingJob()");
File exampleJarLocation = getTestJarPath("StreamingWordCount.jar");
testDetachedPerJobYarnClusterInternal(exampleJarLocation.getAbsolutePath());
LOG.info("Finished testDetachedPerJobYarnClusterWithStreamingJob()");
});
}
private void testDetachedPerJobYarnClusterInternal(String job) throws Exception {
YarnClient yc = YarnClient.createYarnClient();
yc.init(YARN_CONFIGURATION);
yc.start();
// get temporary folder for writing output of wordcount example
File tmpOutFolder = tmp;
// get temporary file for reading input data for wordcount example
File tmpInFile = tmpOutFolder.toPath().resolve(UUID.randomUUID().toString()).toFile();
tmpInFile.createNewFile();
try {
FileUtils.writeStringToFile(tmpInFile, WordCountData.TEXT, Charset.defaultCharset());
} catch (IOException e) {
throw new RuntimeException(e);
}
Runner runner =
startWithArgs(
new String[] {
"run",
"-m",
"yarn-cluster",
"-yj",
flinkUberjar.getAbsolutePath(),
"-yt",
flinkLibFolder.getAbsolutePath(),
"-yjm",
"768m",
"-yD",
YarnConfigOptions.APPLICATION_TAGS.key() + "=test-tag",
"-ytm",
"1024m",
"-ys",
"2", // test requesting slots from YARN.
"-p",
"2",
"--detached",
job,
"--input",
tmpInFile.getAbsoluteFile().toString(),
"--output",
tmpOutFolder.getAbsoluteFile().toString()
},
"Job has been submitted with JobID",
RunTypes.CLI_FRONTEND);
// it should usually be 2, but on slow machines, the number varies
assertThat(getRunningContainers()).isLessThanOrEqualTo(2);
// give the runner some time to detach
for (int attempt = 0; runner.isAlive() && attempt < 5; attempt++) {
try {
Thread.sleep(500);
} catch (InterruptedException ignored) {
}
}
assertThat(runner.isAlive()).isFalse();
LOG.info("CLI Frontend has returned, so the job is running");
// find out the application id and wait until it has finished.
try {
List<ApplicationReport> apps =
getApplicationReportWithRetryOnNPE(
yc, EnumSet.of(YarnApplicationState.RUNNING));
ApplicationId tmpAppId;
if (apps.size() == 1) {
// Better method to find the right appId. But sometimes the app is shutting down
// very fast
// Only one running
tmpAppId = apps.get(0).getApplicationId();
LOG.info("waiting for the job with appId {} to finish", tmpAppId);
// wait until the app has finished
while (getApplicationReportWithRetryOnNPE(
yc, EnumSet.of(YarnApplicationState.RUNNING))
.size()
> 0) {
sleep(500);
}
} else {
// get appId by finding the latest finished appid
apps = getApplicationReportWithRetryOnNPE(yc);
Collections.sort(
apps,
(o1, o2) -> o1.getApplicationId().compareTo(o2.getApplicationId()) * -1);
tmpAppId = apps.get(0).getApplicationId();
LOG.info(
"Selected {} as the last appId from {}",
tmpAppId,
Arrays.toString(apps.toArray()));
}
final ApplicationId id = tmpAppId;
// now it has finished.
// check the output files.
File[] listOfOutputFiles = tmpOutFolder.listFiles();
assertThat(listOfOutputFiles).isNotNull();
LOG.info("The job has finished. TaskManager output files found in {}", tmpOutFolder);
// read all output files in output folder to one output string
StringBuilder content = new StringBuilder();
for (File f : listOfOutputFiles) {
if (f.isFile()) {
content.append(FileUtils.readFileToString(f, Charset.defaultCharset()))
.append("\n");
}
}
// check if the heap size for the TaskManager was set correctly
File jobmanagerLog =
TestUtils.findFile(
"..",
(dir, name) ->
name.contains("jobmanager.log")
&& dir.getAbsolutePath().contains(id.toString()));
assertThat(jobmanagerLog).isNotNull();
content =
new StringBuilder(
FileUtils.readFileToString(jobmanagerLog, Charset.defaultCharset()));
assertThat(content.toString())
.contains("Starting TaskManagers")
.contains(" (2/2) (attempt #0) with attempt id ");
// make sure the detached app is really finished.
LOG.info("Checking again that app has finished");
ApplicationReport rep;
do {
sleep(500);
rep = yc.getApplicationReport(id);
LOG.info("Got report {}", rep);
} while (rep.getYarnApplicationState() == YarnApplicationState.RUNNING);
verifyApplicationTags(rep);
} finally {
// cleanup the yarn-properties file
String confDirPath = System.getenv("FLINK_CONF_DIR");
File configDirectory = new File(confDirPath);
LOG.info(
"testDetachedPerJobYarnClusterInternal: Using configuration directory "
+ configDirectory.getAbsolutePath());
// load the configuration
LOG.info("testDetachedPerJobYarnClusterInternal: Trying to load configuration file");
Configuration configuration =
GlobalConfiguration.loadConfiguration(configDirectory.getAbsolutePath());
try {
File yarnPropertiesFile =
FlinkYarnSessionCli.getYarnPropertiesLocation(
configuration.getValue(YarnConfigOptions.PROPERTIES_FILE_LOCATION));
if (yarnPropertiesFile.exists()) {
LOG.info(
"testDetachedPerJobYarnClusterInternal: Cleaning up temporary Yarn address reference: {}",
yarnPropertiesFile.getAbsolutePath());
yarnPropertiesFile.delete();
}
} catch (Exception e) {
LOG.warn(
"testDetachedPerJobYarnClusterInternal: Exception while deleting the JobManager address file",
e);
}
try {
LOG.info("testDetachedPerJobYarnClusterInternal: Closing the yarn client");
yc.stop();
} catch (Exception e) {
LOG.warn(
"testDetachedPerJobYarnClusterInternal: Exception while close the yarn client",
e);
}
}
}
/**
* Ensures that the YARN application tags were set properly.
*
* <p>Since YARN application tags were only added in Hadoop 2.4, but Flink still supports Hadoop
* 2.3, reflection is required to invoke the methods. If the method does not exist, this test
* passes.
*/
private void verifyApplicationTags(final ApplicationReport report)
throws InvocationTargetException, IllegalAccessException {
final Method applicationTagsMethod;
Class<ApplicationReport> clazz = ApplicationReport.class;
try {
// this method is only supported by Hadoop 2.4.0 onwards
applicationTagsMethod = clazz.getMethod("getApplicationTags");
} catch (NoSuchMethodException e) {
// only verify the tags if the method exists
return;
}
@SuppressWarnings("unchecked")
Set<String> applicationTags = (Set<String>) applicationTagsMethod.invoke(report);
assertThat(applicationTags).containsOnly("test-tag");
}
@AfterEach
void checkForProhibitedLogContents() {
if (checkForProhibitedLogContents) {
ensureNoProhibitedStringInLogFiles(PROHIBITED_STRINGS, WHITELISTED_STRINGS);
}
}
}
| [
"\"FLINK_CONF_DIR\""
] | [] | [
"FLINK_CONF_DIR"
] | [] | ["FLINK_CONF_DIR"] | java | 1 | 0 | |
view-generator-framework/src/main/java/org/hypertrace/core/viewgenerator/service/ViewGeneratorLauncher.java | package org.hypertrace.core.viewgenerator.service;
import static org.hypertrace.core.viewgenerator.service.ViewGeneratorConstants.INPUT_TOPIC_CONFIG_KEY;
import static org.hypertrace.core.viewgenerator.service.ViewGeneratorConstants.OUTPUT_TOPIC_CONFIG_KEY;
import static org.hypertrace.core.viewgenerator.service.ViewGeneratorConstants.VIEW_GENERATOR_CLASS_CONFIG_KEY;
import com.typesafe.config.Config;
import java.util.Map;
import org.apache.kafka.common.serialization.Serde;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.kstream.Consumed;
import org.apache.kafka.streams.kstream.KStream;
import org.apache.kafka.streams.kstream.Produced;
import org.hypertrace.core.kafkastreams.framework.KafkaStreamsApp;
import org.hypertrace.core.serviceframework.config.ConfigClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ViewGeneratorLauncher extends KafkaStreamsApp {
private static final Logger logger = LoggerFactory.getLogger(ViewGeneratorLauncher.class);
private static final String DEFAULT_VIEW_GEN_JOB_CONFIG_KEY = "view-gen-job-config-key";
private String viewGenName;
public ViewGeneratorLauncher(ConfigClient configClient) {
super(configClient);
}
public String getViewGenName() {
return viewGenName;
}
public void setViewGenName(String viewGenName) {
this.viewGenName = viewGenName;
}
@Override
public StreamsBuilder buildTopology(Map<String, Object> properties, StreamsBuilder streamsBuilder,
Map<String, KStream<?, ?>> inputStreams) {
Config jobConfig = getJobConfig(properties);
String viewGeneratorClassName = jobConfig.getString(VIEW_GENERATOR_CLASS_CONFIG_KEY);
InputToViewMapper viewMapper;
try {
viewMapper = new InputToViewMapper(viewGeneratorClassName);
} catch (Exception e) {
throw new RuntimeException(e);
}
String inputTopic = jobConfig.getString(INPUT_TOPIC_CONFIG_KEY);
String outputTopic = jobConfig.getString(OUTPUT_TOPIC_CONFIG_KEY);
KStream<?, ?> inputStream = inputStreams.get(inputTopic);
if (inputStream == null) {
inputStream = streamsBuilder.stream(inputTopic, Consumed.with(Serdes.String(), null));
inputStreams.put(inputTopic, inputStream);
}
// This environment property helps in overriding producer value serde. For hypertrace quickstart
// deployment, this helps in using GenericAvroSerde for pinot views.
Serde producerValueSerde = null;
String envProducerValueSerdeClassName = System.getenv("PRODUCER_VALUE_SERDE");
if (envProducerValueSerdeClassName != null) {
try {
logger.info("Using producer value serde: {}", envProducerValueSerdeClassName);
Class clazz = Class.forName(envProducerValueSerdeClassName);
producerValueSerde = (Serde) clazz.getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
inputStream.flatMapValues(viewMapper)
.to(outputTopic, Produced.with(Serdes.String(), producerValueSerde));
return streamsBuilder;
}
@Override
public String getJobConfigKey() {
String jobConfigKey = getViewGenName();
return jobConfigKey != null ? jobConfigKey : DEFAULT_VIEW_GEN_JOB_CONFIG_KEY;
}
private Config getJobConfig(Map<String, Object> properties) {
return (Config) properties.get(getJobConfigKey());
}
}
| [
"\"PRODUCER_VALUE_SERDE\""
] | [] | [
"PRODUCER_VALUE_SERDE"
] | [] | ["PRODUCER_VALUE_SERDE"] | java | 1 | 0 | |
monolens/util.py | import os
import sys
from PySide6 import QtGui
import numpy as np
import numba as nb
DEBUG = int(os.environ.get("DEBUG", "0"))
if sys.byteorder == "little":
argb = (3, 2, 1, 0)
else:
argb = (0, 1, 2, 3)
# matrix values from colorblind package
cb_lms = np.array(
[
# Protanopia (red weakness)
[[0, 0.90822864, 0.008192], [0, 1, 0], [0, 0, 1]],
# Deuteranopia (green weakness)
[[1, 0, 0], [1.10104433, 0, -0.00901975], [0, 0, 1]],
# Tritanopia (blue weakness)
[[1, 0, 0], [0, 1, 0], [-0.15773032, 1.19465634, 0]],
],
)
rgb2lms = np.array(
[
[0.3904725, 0.54990437, 0.00890159],
[0.07092586, 0.96310739, 0.00135809],
[0.02314268, 0.12801221, 0.93605194],
],
)
lms2rgb = np.linalg.inv(rgb2lms)
cb_full = [np.linalg.multi_dot((lms2rgb, cbi, rgb2lms)) for cbi in cb_lms]
@nb.njit(cache=True)
def clip(x, xmin, xmax):
if x < xmin:
return xmin
return min(x, xmax)
class QImageArrayInterface:
__slots__ = ("__array_interface__",)
def __init__(self, image):
format = image.format()
assert format == QtGui.QImage.Format_RGB32
self.__array_interface__ = {
"shape": (image.width() * image.height(), 4),
"typestr": "|u1",
"data": image.bits(),
"version": 3,
}
def qimage_array_view(image):
return np.asarray(QImageArrayInterface(image))
@nb.njit(parallel=True, cache=True)
def _grayscale(d, s, argb):
a, r, g, b = argb
for i in nb.prange(len(s)):
sr = s[i, r]
sg = s[i, g]
sb = s[i, b]
c = clip(0.299 * sr + 0.587 * sg + 0.114 * sb, 0, 255)
d[i, a] = 255
d[i, r] = c
d[i, g] = c
d[i, b] = c
def grayscale(dest, source):
s = qimage_array_view(source)
d = qimage_array_view(dest)
_grayscale(d, s, argb)
@nb.njit(parallel=True, cache=True)
def _colorblindness(d, s, cb, argb):
a, r, g, b = argb
for i in nb.prange(len(s)):
sr = s[i, r]
sg = s[i, g]
sb = s[i, b]
dr = cb[0, 0] * sr + cb[0, 1] * sg + cb[0, 2] * sb
dg = cb[1, 0] * sr + cb[1, 1] * sg + cb[1, 2] * sb
db = cb[2, 0] * sr + cb[2, 1] * sg + cb[2, 2] * sb
d[i, a] = 255
d[i, r] = clip(dr, 0, 255)
d[i, g] = clip(dg, 0, 255)
d[i, b] = clip(db, 0, 255)
def colorblindness(dest, source, type):
s = qimage_array_view(source)
d = qimage_array_view(dest)
cb = cb_full[type]
_colorblindness(d, s, cb, argb)
| [] | [] | [
"DEBUG"
] | [] | ["DEBUG"] | python | 1 | 0 | |
djangotutorial/settings.py | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# Basic config
DEBUG = True
ALLOWED_HOSTS = ['0.0.0.0',
'127.0.0.1',
'localhost',
'127.0.0.1:8000',
'example.com',]
WSGI_APPLICATION = 'djangotutorial.wsgi.application'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
]
# Added middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# URLs
ROOT_URLCONF = 'djangotutorial.urls'
# Database
DATABASES = {
'default': {
'ENGINE': os.environ.get('DB_ENGINE'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_USER'),
'HOST': os.environ.get('DB_HOST'),
'PORT': os.environ.get('DB_PORT'),
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',},
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',},
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',},
]
# Base template folder & templating engine
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
"DIRS": [os.path.join(BASE_DIR, "djangotutorial/templates/")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
"django.template.context_processors.static",
"django.template.context_processors.media",
],
},
},
]
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, "static")
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'EST'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Etc.
APPEND_SLASH = True
| [] | [] | [
"DB_ENGINE",
"DB_HOST",
"DB_PORT",
"DB_NAME",
"SECRET_KEY",
"DB_USER"
] | [] | ["DB_ENGINE", "DB_HOST", "DB_PORT", "DB_NAME", "SECRET_KEY", "DB_USER"] | python | 6 | 0 | |
gcp/run.py | # coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts actor or learner job depending on the GCP node type."""
import concurrent.futures
import json
import os
import subprocess
import sys
from absl import app
from absl import flags
from absl import logging
flags.DEFINE_string('environment', 'football', 'Environment to run.')
flags.DEFINE_string('agent', 'vtrace', 'Agent to run.')
flags.DEFINE_integer('workers', 1, 'Number of workers.')
flags.DEFINE_integer('actors_per_worker', 1,
'Number of actors to run on a single worker.')
FLAGS = flags.FLAGS
def get_py_main():
return os.path.join('/seed_rl', FLAGS.environment,
FLAGS.agent + '_main.py')
def run_learner(executor, config):
"""Runs learner job using executor."""
_, master_port = config.get('cluster').get('master')[0].split(':', 1)
args = [
'python', get_py_main(),
'--run_mode=learner',
'--server_address=[::]:{}'.format(master_port),
'--num_envs={}'.format(FLAGS.workers * FLAGS.actors_per_worker)
]
if '--' in sys.argv:
args.extend(sys.argv[sys.argv.index('--') + 1:])
return executor.submit(subprocess.check_call, args)
def run_actor(executor, config, actor_id):
"""Runs actor job using executor."""
master_addr = config.get('cluster').get('master')[0]
args = [
'python', get_py_main(),
'--run_mode=actor',
'--server_address={}'.format(master_addr),
'--num_envs={}'.format(FLAGS.workers * FLAGS.actors_per_worker)
]
worker_index = config.get('task').get('index')
args.append('--task={}'.format(worker_index * FLAGS.actors_per_worker +
actor_id))
if '--' in sys.argv:
args.extend(sys.argv[sys.argv.index('--') + 1:])
return executor.submit(subprocess.check_call, args)
def main(_):
tf_config = os.environ.get('TF_CONFIG', None)
logging.info(tf_config)
config = json.loads(tf_config)
job_type = config.get('task', {}).get('type')
os.environ['PYTHONPATH'] = '/'
os.environ['LD_LIBRARY_PATH'] = os.environ[
'LD_LIBRARY_PATH'] + ':/root/.mujoco/mjpro150/bin'
executor = concurrent.futures.ThreadPoolExecutor(
max_workers=FLAGS.actors_per_worker)
futures = []
if job_type == 'master':
futures.append(run_learner(executor, config))
else:
assert job_type == 'worker', 'Unexpected task type: {}'.format(job_type)
for actor_id in range(FLAGS.actors_per_worker):
futures.append(run_actor(executor, config, actor_id))
for f in futures:
f.result()
if __name__ == '__main__':
app.run(main)
| [] | [] | [
"LD_LIBRARY_PATH",
"TF_CONFIG",
"PYTHONPATH"
] | [] | ["LD_LIBRARY_PATH", "TF_CONFIG", "PYTHONPATH"] | python | 3 | 0 | |
pkg/runner/run_context_test.go | package runner
import (
"context"
"fmt"
"os"
"regexp"
"runtime"
"sort"
"strings"
"testing"
"github.com/nektos/act/pkg/model"
log "github.com/sirupsen/logrus"
assert "github.com/stretchr/testify/assert"
yaml "gopkg.in/yaml.v3"
)
func TestRunContext_EvalBool(t *testing.T) {
var yml yaml.Node
err := yml.Encode(map[string][]interface{}{
"os": {"Linux", "Windows"},
"foo": {"bar", "baz"},
})
assert.NoError(t, err)
rc := &RunContext{
Config: &Config{
Workdir: ".",
},
Env: map[string]string{
"SOMETHING_TRUE": "true",
"SOMETHING_FALSE": "false",
"SOME_TEXT": "text",
},
Run: &model.Run{
JobID: "job1",
Workflow: &model.Workflow{
Name: "test-workflow",
Jobs: map[string]*model.Job{
"job1": {
Strategy: &model.Strategy{
RawMatrix: yml,
},
},
},
},
},
Matrix: map[string]interface{}{
"os": "Linux",
"foo": "bar",
},
StepResults: map[string]*model.StepResult{
"id1": {
Conclusion: model.StepStatusSuccess,
Outcome: model.StepStatusFailure,
Outputs: map[string]string{
"foo": "bar",
},
},
},
}
rc.ExprEval = rc.NewExpressionEvaluator()
tables := []struct {
in string
out bool
wantErr bool
}{
// The basic ones
{in: "failure()", out: false},
{in: "success()", out: true},
{in: "cancelled()", out: false},
{in: "always()", out: true},
// TODO: move to sc.NewExpressionEvaluator(), because "steps" context is not available here
// {in: "steps.id1.conclusion == 'success'", out: true},
// {in: "steps.id1.conclusion != 'success'", out: false},
// {in: "steps.id1.outcome == 'failure'", out: true},
// {in: "steps.id1.outcome != 'failure'", out: false},
{in: "true", out: true},
{in: "false", out: false},
// TODO: This does not throw an error, because the evaluator does not know if the expression is inside ${{ }} or not
// {in: "!true", wantErr: true},
// {in: "!false", wantErr: true},
{in: "1 != 0", out: true},
{in: "1 != 1", out: false},
{in: "${{ 1 != 0 }}", out: true},
{in: "${{ 1 != 1 }}", out: false},
{in: "1 == 0", out: false},
{in: "1 == 1", out: true},
{in: "1 > 2", out: false},
{in: "1 < 2", out: true},
// And or
{in: "true && false", out: false},
{in: "true && 1 < 2", out: true},
{in: "false || 1 < 2", out: true},
{in: "false || false", out: false},
// None boolable
{in: "env.UNKNOWN == 'true'", out: false},
{in: "env.UNKNOWN", out: false},
// Inline expressions
{in: "env.SOME_TEXT", out: true},
{in: "env.SOME_TEXT == 'text'", out: true},
{in: "env.SOMETHING_TRUE == 'true'", out: true},
{in: "env.SOMETHING_FALSE == 'true'", out: false},
{in: "env.SOMETHING_TRUE", out: true},
{in: "env.SOMETHING_FALSE", out: true},
// TODO: This does not throw an error, because the evaluator does not know if the expression is inside ${{ }} or not
// {in: "!env.SOMETHING_TRUE", wantErr: true},
// {in: "!env.SOMETHING_FALSE", wantErr: true},
{in: "${{ !env.SOMETHING_TRUE }}", out: false},
{in: "${{ !env.SOMETHING_FALSE }}", out: false},
{in: "${{ ! env.SOMETHING_TRUE }}", out: false},
{in: "${{ ! env.SOMETHING_FALSE }}", out: false},
{in: "${{ env.SOMETHING_TRUE }}", out: true},
{in: "${{ env.SOMETHING_FALSE }}", out: true},
{in: "${{ !env.SOMETHING_TRUE }}", out: false},
{in: "${{ !env.SOMETHING_FALSE }}", out: false},
{in: "${{ !env.SOMETHING_TRUE && true }}", out: false},
{in: "${{ !env.SOMETHING_FALSE && true }}", out: false},
{in: "${{ !env.SOMETHING_TRUE || true }}", out: true},
{in: "${{ !env.SOMETHING_FALSE || false }}", out: false},
{in: "${{ env.SOMETHING_TRUE && true }}", out: true},
{in: "${{ env.SOMETHING_FALSE || true }}", out: true},
{in: "${{ env.SOMETHING_FALSE || false }}", out: true},
// TODO: This does not throw an error, because the evaluator does not know if the expression is inside ${{ }} or not
// {in: "!env.SOMETHING_TRUE || true", wantErr: true},
{in: "${{ env.SOMETHING_TRUE == 'true'}}", out: true},
{in: "${{ env.SOMETHING_FALSE == 'true'}}", out: false},
{in: "${{ env.SOMETHING_FALSE == 'false'}}", out: true},
{in: "${{ env.SOMETHING_FALSE }} && ${{ env.SOMETHING_TRUE }}", out: true},
// All together now
{in: "false || env.SOMETHING_TRUE == 'true'", out: true},
{in: "true || env.SOMETHING_FALSE == 'true'", out: true},
{in: "true && env.SOMETHING_TRUE == 'true'", out: true},
{in: "false && env.SOMETHING_TRUE == 'true'", out: false},
{in: "env.SOMETHING_FALSE == 'true' && env.SOMETHING_TRUE == 'true'", out: false},
{in: "env.SOMETHING_FALSE == 'true' && true", out: false},
{in: "${{ env.SOMETHING_FALSE == 'true' }} && true", out: true},
{in: "true && ${{ env.SOMETHING_FALSE == 'true' }}", out: true},
// Check github context
{in: "github.actor == 'nektos/act'", out: true},
{in: "github.actor == 'unknown'", out: false},
// The special ACT flag
{in: "${{ env.ACT }}", out: true},
{in: "${{ !env.ACT }}", out: false},
// Invalid expressions should be reported
{in: "INVALID_EXPRESSION", wantErr: true},
}
updateTestIfWorkflow(t, tables, rc)
for _, table := range tables {
table := table
t.Run(table.in, func(t *testing.T) {
assertObject := assert.New(t)
b, err := EvalBool(rc.ExprEval, table.in)
if table.wantErr {
assertObject.Error(err)
}
assertObject.Equal(table.out, b, fmt.Sprintf("Expected %s to be %v, was %v", table.in, table.out, b))
})
}
}
func updateTestIfWorkflow(t *testing.T, tables []struct {
in string
out bool
wantErr bool
}, rc *RunContext) {
var envs string
keys := make([]string, 0, len(rc.Env))
for k := range rc.Env {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
envs += fmt.Sprintf(" %s: %s\n", k, rc.Env[k])
}
// editorconfig-checker-disable
workflow := fmt.Sprintf(`
name: "Test what expressions result in true and false on GitHub"
on: push
env:
%s
jobs:
test-ifs-and-buts:
runs-on: ubuntu-latest
steps:
`, envs)
// editorconfig-checker-enable
for i, table := range tables {
if table.wantErr || strings.HasPrefix(table.in, "github.actor") {
continue
}
expressionPattern := regexp.MustCompile(`\${{\s*(.+?)\s*}}`)
expr := expressionPattern.ReplaceAllStringFunc(table.in, func(match string) string {
return fmt.Sprintf("€{{ %s }}", expressionPattern.ReplaceAllString(match, "$1"))
})
echo := fmt.Sprintf(`run: echo "%s should be false, but was evaluated to true;" exit 1;`, table.in)
name := fmt.Sprintf(`"❌ I should not run, expr: %s"`, expr)
if table.out {
echo = `run: echo OK`
name = fmt.Sprintf(`"✅ I should run, expr: %s"`, expr)
}
workflow += fmt.Sprintf("\n - name: %s\n id: step%d\n if: %s\n %s\n", name, i, table.in, echo)
if table.out {
workflow += fmt.Sprintf("\n - name: \"Double checking expr: %s\"\n if: steps.step%d.conclusion == 'skipped'\n run: echo \"%s should have been true, but wasn't\"\n", expr, i, table.in)
}
}
file, err := os.Create("../../.github/workflows/test-if.yml")
if err != nil {
t.Fatal(err)
}
_, err = file.WriteString(workflow)
if err != nil {
t.Fatal(err)
}
}
func TestRunContext_GetBindsAndMounts(t *testing.T) {
rctemplate := &RunContext{
Name: "TestRCName",
Run: &model.Run{
Workflow: &model.Workflow{
Name: "TestWorkflowName",
},
},
Config: &Config{
BindWorkdir: false,
},
}
tests := []struct {
windowsPath bool
name string
rc *RunContext
wantbind string
wantmount string
}{
{false, "/mnt/linux", rctemplate, "/mnt/linux", "/mnt/linux"},
{false, "/mnt/path with spaces/linux", rctemplate, "/mnt/path with spaces/linux", "/mnt/path with spaces/linux"},
{true, "C:\\Users\\TestPath\\MyTestPath", rctemplate, "/mnt/c/Users/TestPath/MyTestPath", "/mnt/c/Users/TestPath/MyTestPath"},
{true, "C:\\Users\\Test Path with Spaces\\MyTestPath", rctemplate, "/mnt/c/Users/Test Path with Spaces/MyTestPath", "/mnt/c/Users/Test Path with Spaces/MyTestPath"},
{true, "/LinuxPathOnWindowsShouldFail", rctemplate, "", ""},
}
isWindows := runtime.GOOS == "windows"
for _, testcase := range tests {
// pin for scopelint
testcase := testcase
for _, bindWorkDir := range []bool{true, false} {
// pin for scopelint
bindWorkDir := bindWorkDir
testBindSuffix := ""
if bindWorkDir {
testBindSuffix = "Bind"
}
// Only run windows path tests on windows and non-windows on non-windows
if (isWindows && testcase.windowsPath) || (!isWindows && !testcase.windowsPath) {
t.Run((testcase.name + testBindSuffix), func(t *testing.T) {
config := testcase.rc.Config
config.Workdir = testcase.name
config.BindWorkdir = bindWorkDir
gotbind, gotmount := rctemplate.GetBindsAndMounts()
// Name binds/mounts are either/or
if config.BindWorkdir {
fullBind := testcase.name + ":" + testcase.wantbind
if runtime.GOOS == "darwin" {
fullBind += ":delegated"
}
assert.Contains(t, gotbind, fullBind)
} else {
mountkey := testcase.rc.jobContainerName()
assert.EqualValues(t, testcase.wantmount, gotmount[mountkey])
}
})
}
}
}
t.Run("ContainerVolumeMountTest", func(t *testing.T) {
tests := []struct {
name string
volumes []string
wantbind string
wantmount map[string]string
}{
{"BindAnonymousVolume", []string{"/volume"}, "/volume", map[string]string{}},
{"BindHostFile", []string{"/path/to/file/on/host:/volume"}, "/path/to/file/on/host:/volume", map[string]string{}},
{"MountExistingVolume", []string{"volume-id:/volume"}, "", map[string]string{"volume-id": "/volume"}},
}
for _, testcase := range tests {
t.Run(testcase.name, func(t *testing.T) {
job := &model.Job{}
err := job.RawContainer.Encode(map[string][]string{
"volumes": testcase.volumes,
})
assert.NoError(t, err)
rc := rctemplate.Clone()
rc.Run.JobID = "job1"
rc.Run.Workflow.Jobs = map[string]*model.Job{"job1": job}
gotbind, gotmount := rc.GetBindsAndMounts()
if len(testcase.wantbind) > 0 {
assert.Contains(t, gotbind, testcase.wantbind)
}
for k, v := range testcase.wantmount {
assert.Contains(t, gotmount, k)
assert.Equal(t, gotmount[k], v)
}
})
}
})
}
func TestGetGitHubContext(t *testing.T) {
log.SetLevel(log.DebugLevel)
cwd, err := os.Getwd()
assert.Nil(t, err)
rc := &RunContext{
Config: &Config{
EventName: "push",
Workdir: cwd,
},
Run: &model.Run{
Workflow: &model.Workflow{
Name: "GitHubContextTest",
},
},
Name: "GitHubContextTest",
CurrentStep: "step",
Matrix: map[string]interface{}{},
Env: map[string]string{},
ExtraPath: []string{},
StepResults: map[string]*model.StepResult{},
OutputMappings: map[MappableOutput]MappableOutput{},
}
ghc := rc.getGithubContext()
log.Debugf("%v", ghc)
actor := "nektos/act"
if a := os.Getenv("ACT_ACTOR"); a != "" {
actor = a
}
repo := "nektos/act"
if r := os.Getenv("ACT_REPOSITORY"); r != "" {
repo = r
}
owner := "nektos"
if o := os.Getenv("ACT_OWNER"); o != "" {
owner = o
}
assert.Equal(t, ghc.RunID, "1")
assert.Equal(t, ghc.Workspace, rc.Config.containerPath(cwd))
assert.Equal(t, ghc.RunNumber, "1")
assert.Equal(t, ghc.RetentionDays, "0")
assert.Equal(t, ghc.Actor, actor)
assert.Equal(t, ghc.Repository, repo)
assert.Equal(t, ghc.RepositoryOwner, owner)
assert.Equal(t, ghc.RunnerPerflog, "/dev/null")
assert.Equal(t, ghc.EventPath, ActPath+"/workflow/event.json")
assert.Equal(t, ghc.Token, rc.Config.Secrets["GITHUB_TOKEN"])
}
func createIfTestRunContext(jobs map[string]*model.Job) *RunContext {
rc := &RunContext{
Config: &Config{
Workdir: ".",
Platforms: map[string]string{
"ubuntu-latest": "ubuntu-latest",
},
},
Env: map[string]string{},
Run: &model.Run{
JobID: "job1",
Workflow: &model.Workflow{
Name: "test-workflow",
Jobs: jobs,
},
},
}
rc.ExprEval = rc.NewExpressionEvaluator()
return rc
}
func createJob(t *testing.T, input string, result string) *model.Job {
var job *model.Job
err := yaml.Unmarshal([]byte(input), &job)
assert.NoError(t, err)
job.Result = result
return job
}
func TestRunContextIsEnabled(t *testing.T) {
log.SetLevel(log.DebugLevel)
assertObject := assert.New(t)
// success()
rc := createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ubuntu-latest
if: success()`, ""),
})
assertObject.True(rc.isEnabled(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ubuntu-latest`, "failure"),
"job2": createJob(t, `runs-on: ubuntu-latest
needs: [job1]
if: success()`, ""),
})
rc.Run.JobID = "job2"
assertObject.False(rc.isEnabled(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ubuntu-latest`, "success"),
"job2": createJob(t, `runs-on: ubuntu-latest
needs: [job1]
if: success()`, ""),
})
rc.Run.JobID = "job2"
assertObject.True(rc.isEnabled(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ubuntu-latest`, "failure"),
"job2": createJob(t, `runs-on: ubuntu-latest
if: success()`, ""),
})
rc.Run.JobID = "job2"
assertObject.True(rc.isEnabled(context.Background()))
// failure()
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ubuntu-latest
if: failure()`, ""),
})
assertObject.False(rc.isEnabled(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ubuntu-latest`, "failure"),
"job2": createJob(t, `runs-on: ubuntu-latest
needs: [job1]
if: failure()`, ""),
})
rc.Run.JobID = "job2"
assertObject.True(rc.isEnabled(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ubuntu-latest`, "success"),
"job2": createJob(t, `runs-on: ubuntu-latest
needs: [job1]
if: failure()`, ""),
})
rc.Run.JobID = "job2"
assertObject.False(rc.isEnabled(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ubuntu-latest`, "failure"),
"job2": createJob(t, `runs-on: ubuntu-latest
if: failure()`, ""),
})
rc.Run.JobID = "job2"
assertObject.False(rc.isEnabled(context.Background()))
// always()
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ubuntu-latest
if: always()`, ""),
})
assertObject.True(rc.isEnabled(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ubuntu-latest`, "failure"),
"job2": createJob(t, `runs-on: ubuntu-latest
needs: [job1]
if: always()`, ""),
})
rc.Run.JobID = "job2"
assertObject.True(rc.isEnabled(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ubuntu-latest`, "success"),
"job2": createJob(t, `runs-on: ubuntu-latest
needs: [job1]
if: always()`, ""),
})
rc.Run.JobID = "job2"
assertObject.True(rc.isEnabled(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ubuntu-latest`, "success"),
"job2": createJob(t, `runs-on: ubuntu-latest
if: always()`, ""),
})
rc.Run.JobID = "job2"
assertObject.True(rc.isEnabled(context.Background()))
}
func TestRunContextGetEnv(t *testing.T) {
tests := []struct {
description string
rc *RunContext
targetEnv string
want string
}{
{
description: "Env from Config should overwrite",
rc: &RunContext{
Config: &Config{
Env: map[string]string{"OVERWRITTEN": "true"},
},
Run: &model.Run{
Workflow: &model.Workflow{
Jobs: map[string]*model.Job{"test": {Name: "test"}},
Env: map[string]string{"OVERWRITTEN": "false"},
},
JobID: "test",
},
},
targetEnv: "OVERWRITTEN",
want: "true",
},
{
description: "No overwrite occurs",
rc: &RunContext{
Config: &Config{
Env: map[string]string{"SOME_OTHER_VAR": "true"},
},
Run: &model.Run{
Workflow: &model.Workflow{
Jobs: map[string]*model.Job{"test": {Name: "test"}},
Env: map[string]string{"OVERWRITTEN": "false"},
},
JobID: "test",
},
},
targetEnv: "OVERWRITTEN",
want: "false",
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
envMap := test.rc.GetEnv()
assert.EqualValues(t, test.want, envMap[test.targetEnv])
})
}
}
| [
"\"ACT_ACTOR\"",
"\"ACT_REPOSITORY\"",
"\"ACT_OWNER\""
] | [] | [
"ACT_REPOSITORY",
"ACT_OWNER",
"ACT_ACTOR"
] | [] | ["ACT_REPOSITORY", "ACT_OWNER", "ACT_ACTOR"] | go | 3 | 0 | |
monitoring/server_checker.py | #!/usr/bin/env python
import socket
import paramiko
from datetime import datetime
from email.mime.text import MIMEText
import smtplib
import atexit
import ssl
import time
import os
# #### VARIABLES #### #
# list of servers to check with the following items in the
# definitions per-server: ('hostname', 'ssl or plain', portnumber)
host_ip = os.environ['HOST_IP']
host_username = os.environ['USERNAME']
print(host_ip)
SERVER_LIST = [
('192.168.70.10', 'plain', 1337, 'gryffindor_user_3'),
('192.168.70.3', 'plain', 1337, 'gryffindor_user_2'),
('192.168.70.5', 'plain', 1337, 'gryffindor_user_1'),
]
SERVER_LIST.remove((host_ip, 'plain', 1337, host_username))
# Globally define these lists as 'empty' for population later.
SRV_DOWN = []
SRV_UP = []
# Email handling items - email addresses
ADMIN_NOTIFY_LIST = ['[email protected]','[email protected]','[email protected]','[email protected]','[email protected]','[email protected]']
FROM_ADDRESS = '[email protected]'
# Valid Priorities for Mail
LOW = 1
NORMAL = 2
HIGH = 3
# Begin Execution Here
def exit():
print("%s Server Status Checker Now Exiting." % (current_timestamp()))
def current_timestamp():
return datetime.now().strftime("[%Y-%m-%d %H:%M:%S]")
def send_server_status_report():
# Init priority - should be NORMAL for most cases, so init it to that.
priority = NORMAL
# Init the send_mail flag. Ideally, we would be sending mail if this function is
# called, but we need to make sure that there are cases where it's not necessary
# such as when there are no offline servers.
send_mail = True
if len(SRV_UP) == 0:
up_str = "Servers online: None! ***THIS IS REALLY BAD!!!***"
priority = HIGH
else:
up_str = ""
#up_str = "Servers online: " + ", ".join(SRV_UP)
if len(SRV_DOWN) == 0:
down_str = "Servers down: None!"
send_mail = False
else:
down_str = "Servers down: " + ", ".join(SRV_DOWN) + " ***CHECK IF SERVERS LISTED ARE REALLY DOWN!***"
priority = HIGH
if len(SRV_UP) == len(SERVER_LIST) and len(SRV_DOWN) == 0:
priority = LOW
if send_mail:
body = """Server Status Report - %s
%s
%s""" % (current_timestamp(), down_str, up_str)
# craft msg base
msg = MIMEText(body)
msg['Subject'] = "Server Status Report from " + host_ip + " - %s" % (current_timestamp())
msg['From'] = FROM_ADDRESS
msg['Sender'] = FROM_ADDRESS # This is sort of important...
if priority == LOW:
# ThunderBird "Lowest", works with Exchange.
msg['X-Priority'] = '5'
elif priority == NORMAL:
# Plain old "Normal". Works with Exchange.
msg['X-Priority'] = 'Normal'
elif priority == HIGH:
# ThunderBird "Highest", works with Exchange.
msg['X-Priority'] = '1'
# Initialize SMTP session variable so it has the correct scope
# within this function, rather than just inside the 'try' statement.
smtp = None
try:
# SMTP is important, so configure it via Google Mail.
smtp = smtplib.SMTP('smtp.gmail.com', 587)
smtp.starttls()
smtp.ehlo()
smtp.login(FROM_ADDRESS, 'gryffindor')
except Exception as e:
print("Could not correctly establish SMTP connection with Google, error was: %s" % (e.__str__()))
exit()
for destaddr in ADMIN_NOTIFY_LIST:
# Change 'to' field, so only one shows up in 'To' headers.
msg['To'] = destaddr
try:
# Actually send the email.
smtp.sendmail(FROM_ADDRESS, destaddr, msg.as_string())
print("%s Status email sent to [%s]." % (current_timestamp(), destaddr))
except Exception as e:
print("Could not send message, error was: %s" % (e.__str__()))
continue
# No more emails, so close the SMTP connection!
smtp.close()
else:
print("%s All's good, do nothing." % (current_timestamp()))
def main():
for (srv, mechanism, port, username) in sorted(SERVER_LIST):
# [ 'serverhost' , 'ssl' or 'plain' ]
print(srv, ", ", mechanism, ", ", port, ", ", username)
try:
if mechanism == 'plain':
# Use a plain text connector for this.
print("%s Using Plain for [%s]..." % (current_timestamp(), srv))
socket.create_connection(("%s" % srv, port), timeout=10)
elif mechanism == 'ssl':
# We're going to use an SSL connector for this.
print("%s Using SSL for [%s]..." % (current_timestamp(), srv))
ssl.wrap_socket(socket.create_connection(("%s" % srv, port), timeout=10))
else:
print("%s Invalid mechanism defined for [%s], skipping..." % (current_timestamp(), srv))
continue
SRV_UP.append(srv)
print ("%s %s: UP" % (current_timestamp(), srv))
except socket.timeout:
SRV_DOWN.append(srv)
print ("%s %s: DOWN" % (current_timestamp(), srv))
continue
except Exception as err:
print ("An error occurred: %s" % (err.__str__()))
SRV_DOWN.append(srv)
exit()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(srv, username=username, password="gryffindor", port=port, timeout=30)
print ("%s: SSH CONNECTED" % (current_timestamp()))
except Exception as e:
SRV_DOWN.append(srv)
print ("%s %s: DOWN" % (current_timestamp(), srv))
continue
send_server_status_report() # Create email to send the status notices.
exit() # Exit when done
if __name__ == "__main__":
print("%s Server Status Checker Running...." % (current_timestamp()))
while True:
# reset these global variables on every run
SRV_DOWN = []
SRV_UP = []
main()
time.sleep(180)
| [] | [] | [
"HOST_IP",
"USERNAME"
] | [] | ["HOST_IP", "USERNAME"] | python | 2 | 0 | |
cmd/webhook.go | package main
import (
"crypto/sha256"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"github.com/ghodss/yaml"
"github.com/golang/glog"
av1 "k8s.io/api/admission/v1"
"strconv"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
uv1 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/hashicorp/vault/api"
"os"
"bytes"
yamlv2 "gopkg.in/yaml.v2"
)
var (
runtimeScheme = runtime.NewScheme()
codecs = serializer.NewCodecFactory(runtimeScheme)
deserializer = codecs.UniversalDeserializer()
// (https://github.com/kubernetes/kubernetes/issues/57982)
defaulter = runtime.ObjectDefaulter(runtimeScheme)
token = os.Getenv("TOKEN")
role = os.Getenv("ROLE")
vault_addr = os.Getenv("VAULT_ADDR")
insecure, _ = strconv.ParseBool(os.Getenv("VAULT_SKIP_VERIFY"))
vaultConfigPath = os.Getenv("VAULT_CONFIG_PATH")
)
var ignoredNamespaces = []string{
metav1.NamespaceSystem,
metav1.NamespacePublic,
}
const (
admissionWebhookAnnotationInjectKey = "vault-manifest/inject"
admissionWebhookAnnotationStatusKey = "vault-manifest/status"
admissionWebhookAnnotationPrefixKey = "vault-manifest-inject-secret"
)
type VaultConfig struct {
Address string `yaml:"address"`
Token string `yaml:"token"`
Insecure bool `yaml:"insecure"`
}
type WebhookServer struct {
sidecarConfig *Config
server *http.Server
}
type Config struct {
Containers []corev1.Container `yaml:"containers"`
Volumes []corev1.Volume `yaml:"volumes"`
}
type vaultTokenRequest struct {
Role string `json:"role"`
Jwt string `json:"jwt"`
}
type vaultTokenResponse struct {
Auth struct {
Token string `json:"client_token"`
Accessor string `json:"accessor"`
Policies string `json:"policies"`
Metadata interface{} `json:"metadata"`
Lease_duration int64 `json:"lease_duration"`
Renewable bool `json:"renewable"`
} `json:"auth"`
}
// Webhook Server parameters
type WhSvrParameters struct {
port int // webhook server port
certFile string // path to the x509 certificate for https
keyFile string // path to the x509 private key matching `CertFile`
sidecarCfgFile string // path to sidecar injector configuration file
}
type patchOperation struct {
Op string `json:"op"`
Path string `json:"path"`
Value interface{} `json:"value,omitempty"`
}
func getVaultSecret(client *api.Client, vaultPath string, key string) (string, error) {
renewVaultTokenLease()
secret, err := client.Logical().Read(vaultPath)
if err != nil {
glog.Errorf("Error fetching secret :%v",err)
return "", err
}
if secret == nil {
glog.Errorf("Error fetching secret from the specified path")
return "", err
}
m, ok := secret.Data["data"].(map[string]interface{})
if !ok {
return "", err
}
return m[key].(string), nil
}
func initVaultClient() (*api.Client, error){
config := &api.Config{
Address: vault_addr,
}
tlsConfig := &api.TLSConfig{
Insecure: insecure,
}
config.ConfigureTLS(tlsConfig)
client, err := api.NewClient(config)
if err != nil {
glog.Errorf("Error creating vault client : %v", err)
return nil, err
}
client.SetToken(token)
return client, nil
}
func getVaultConfig(){
config := &VaultConfig{}
if vaultConfigPath == "" {
glog.Infof("No vaultconfig file defined using env vars")
} else {
// Open config file
file, err := os.Open(vaultConfigPath)
if err != nil {
glog.Infof("Unable to locate vault config file, using env vars")
return
}
defer file.Close()
// Init new YAML decode
d := yamlv2.NewDecoder(file)
// Start YAML decoding from file
if err := d.Decode(&config); err != nil {
glog.Infof("Vault configuration file not valid")
return
} else {
if config.Token != "" {
token = config.Token//os.Getenv("TOKEN")
}
if config.Address != "" {
vault_addr = config.Address//os.Getenv("VAULT_ADDR")
}
if config.Insecure == true {
insecure = config.Insecure//os.Getenv("VAULT_SKIP_VERIFY")
}
}
}
if token == "" {
glog.Infof("Unable to fetch token, trying kube auth method in vault")
content, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token")
if err != nil {
glog.Errorf("No service token assigned unable to continue")
fmt.Errorf("%s",err)
}
// Convert []byte to string and print to screen
kube_token := string(content)
token, err = getVaultToken(kube_token)
if err != nil {
glog.Errorf("Unable to fetch vault token, this probably is not going to end well !!")
}
}
}
func init() {
//_ = corev1.AddToScheme(runtimeScheme)
_ = admissionregistrationv1.AddToScheme(runtimeScheme)
// defaulting with webhooks:
// https://github.com/kubernetes/kubernetes/issues/57982
//_ = v1.AddToScheme(runtimeScheme)
}
func loadConfig(configFile string) (*Config, error) {
data, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
glog.Infof("New configuration: sha256sum %x", sha256.Sum256(data))
var cfg Config
if err := yaml.Unmarshal(data, &cfg); err != nil {
return nil, err
}
return &cfg, nil
}
func getVaultToken(kube_token string) (string, error){
url := fmt.Sprintf(vault_addr+"/v1/auth/kubernetes/login")
requestPayload := &vaultTokenRequest{
Role: role,
Jwt: kube_token,
}
j, err := json.Marshal(requestPayload)
if err != nil {
return "", err
}
resp, err := http.Post(url, "application/json", bytes.NewBuffer(j))
if err != nil {
glog.Errorf("Unable to fetch token from vault using kubernetes auth and the service token...")
return "", err
}
defer resp.Body.Close()
bodyBytes, _ := ioutil.ReadAll(resp.Body)
var reponseData vaultTokenResponse
json.Unmarshal(bodyBytes, &reponseData)
if strings.Contains(string(bodyBytes), "error:") {
glog.Errorf("Error from vault: %s", string(bodyBytes))
}
if reponseData.Auth.Token != "" {
glog.Infof("Fetched token using k8s auth")
}
return reponseData.Auth.Token, nil
}
func renewVaultTokenLease(){
url := fmt.Sprintf(vault_addr+"/v1/auth/token/renew-self")
client := &http.Client{}
postData := []byte(`{}`)
req, err := http.NewRequest("POST", url, bytes.NewReader(postData))
req.Header.Add("X-Vault-Token", token)
resp, err := client.Do(req)
defer resp.Body.Close()
if err != nil {
glog.Errorf("Unable to renew vault token, further requests might fail..")
}
}
// Check whether the target resoured need to be mutated
func mutationRequired(ignoredList []string, kubeObj *uv1.Unstructured) bool {
// skip special kubernete system namespaces
for _, namespace := range ignoredList {
if kubeObj.GetNamespace() == namespace {
glog.Infof("Skip mutation for %v for it's in special namespace:%v", kubeObj.GetName(), kubeObj.GetNamespace())
return false
}
}
annotations := kubeObj.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
status := annotations[admissionWebhookAnnotationStatusKey]
// determine whether to perform mutation based on annotation for the target resource
var required bool
if strings.ToLower(status) == "injected" {
required = false
} else {
switch strings.ToLower(annotations[admissionWebhookAnnotationInjectKey]) {
default:
required = false
case "y", "yes", "true", "on":
required = true
}
}
glog.Infof("Mutation policy for %v/%v: status: %q required:%v", kubeObj.GetNamespace(), kubeObj.GetName(), status, required)
return required
}
func updateAnnotation(target map[string]string, added map[string]string) (patch []patchOperation) {
for key, value := range added {
if target == nil || target[key] == "" {
target = map[string]string{}
patch = append(patch, patchOperation{
Op: "add",
Path: "/metadata/annotations/" + strings.Replace(key, "/","~1", -1),
Value: value,
})
} else {
patch = append(patch, patchOperation{
Op: "replace",
Path: "/metadata/annotations/" + key,
Value: value,
})
}
}
return patch
}
func skipAnnotation(key string, value string) bool {
if strings.Contains(key, admissionWebhookAnnotationPrefixKey) {
if len(strings.Split(key, "/")) == 2 {
if (strings.Split(key, "/")[0] != admissionWebhookAnnotationPrefixKey) {
glog.Errorf("Annotation not specified correctly : %v", key)
return true
}
if (len(strings.Split(strings.Split(key, "/")[1], ".")) < 2) {
glog.Errorf("Annotation key should have atleast two level of manifest object reference")
return true
}
if (len(strings.Split(value, ".")) == 2) {
if (len(strings.Split(value, "/")) > 1) {
return false
} else {
glog.Errorf("The Vault path should have atleast two level deep")
return true
}
}
glog.Errorf("Annotation didn't matched the filering criterion")
return true
} else {
glog.Errorf("Annotation not specified correctly : %v", key)
return true
}
} else {
return true
}
}
func patchVaultSecrets(annotations map[string]string) (patch []patchOperation){
client, err := initVaultClient()
if err != nil {
glog.Errorf("Error creating vault client: %v", err)
return []patchOperation{}
}
vaultPatch := []patchOperation{}
for key, value := range annotations {
if skipAnnotation(key, value) {
continue
}
path := "/" + strings.Replace(strings.SplitAfterN(key, "/", 2)[1], ".", "/", -1)
value, err := getVaultSecret(client, strings.Split(value, ".")[0], strings.SplitAfterN(value, ".", 2)[1])
if err == nil {
vaultPatch = append(vaultPatch, patchOperation{
Op: "add",
Path: path,
Value: value,
})
} else {
glog.Errorf("Error processing annotation :%v", key)
}
}
return vaultPatch
}
func createPatch(kubeObj *uv1.Unstructured, annotations map[string]string) ([]byte, error) {
var patch []patchOperation
patch = append(patch, patchVaultSecrets(kubeObj.GetAnnotations())...)
patch = append(patch, updateAnnotation(kubeObj.GetAnnotations(), annotations)...)
return json.Marshal(patch)
}
// main mutation process
func (whsvr *WebhookServer) mutate(ar *av1.AdmissionReview) *av1.AdmissionResponse {
getVaultConfig()
req := ar.Request
var kubeObj uv1.Unstructured
r := strings.NewReplacer("\n", "")
convertedBytes := []byte(r.Replace(string(req.Object.Raw)))
if err := kubeObj.UnmarshalJSON(req.Object.Raw); err != nil {
glog.Errorf("Error while unmarshal to unstructurd")
}
if _, _, err := deserializer.Decode(convertedBytes, nil, &kubeObj); err != nil {
glog.Errorf("Can't decode body: %v", err)
}
glog.Infof("Annotations are: %v", kubeObj.GetAnnotations())
glog.Infof("AdmissionReview for Kind=%v, Namespace=%v Name=%v (%v%v) UID=%v patchOperation=%v UserInfo=%v",
req.Kind, req.Namespace, req.Name, kubeObj.GetName(), kubeObj.GetGenerateName(), req.UID, req.Operation, req.UserInfo)
// determine whether to perform mutation
if !mutationRequired(ignoredNamespaces, &kubeObj) {
glog.Infof("Skipping mutation for %s/%s%s due to policy check", kubeObj.GetNamespace(), kubeObj.GetName(), kubeObj.GetGenerateName())
return &av1.AdmissionResponse{
Allowed: true,
}
}
// Workaround: https://github.com/kubernetes/kubernetes/issues/57982
//applyDefaultsWorkaround(whsvr.sidecarConfig.Containers, whsvr.sidecarConfig.Volumes)
annotations := map[string]string{admissionWebhookAnnotationStatusKey: "injected"}
patchBytes, err := createPatch(&kubeObj, annotations)
if err != nil {
return &av1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
},
}
}
glog.Infof("AdmissionResponse: patch=%v\n", string(patchBytes))
return &av1.AdmissionResponse{
Allowed: true,
Patch: patchBytes,
PatchType: func() *av1.PatchType {
pt := av1.PatchTypeJSONPatch
return &pt
}(),
}
}
// Serve method for webhook server
func (whsvr *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
var body []byte
if r.Body != nil {
if data, err := ioutil.ReadAll(r.Body); err == nil {
body = data
}
}
if len(body) == 0 {
glog.Error("empty body")
http.Error(w, "empty body", http.StatusBadRequest)
return
}
// verify the content type is accurate
contentType := r.Header.Get("Content-Type")
if contentType != "application/json" {
glog.Errorf("Content-Type=%s, expect application/json", contentType)
http.Error(w, "invalid Content-Type, expect `application/json`", http.StatusUnsupportedMediaType)
return
}
var admissionResponse *av1.AdmissionResponse
ar := av1.AdmissionReview{}
if _, _, err := deserializer.Decode(body, nil, &ar); err != nil {
glog.Errorf("Can't decode body: %v", err)
admissionResponse = &av1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
},
}
} else {
admissionResponse = whsvr.mutate(&ar)
}
admissionReview := av1.AdmissionReview{}
if admissionResponse != nil {
admissionReview.Response = admissionResponse
if ar.Request != nil {
admissionReview.Response.UID = ar.Request.UID
}
}
resp, err := json.Marshal(admissionReview)
if err != nil {
glog.Errorf("Can't encode response: %v", err)
http.Error(w, fmt.Sprintf("could not encode response: %v", err), http.StatusInternalServerError)
}
glog.Infof("Ready to write reponse ...")
if _, err := w.Write(resp); err != nil {
glog.Errorf("Can't write response: %v", err)
http.Error(w, fmt.Sprintf("could not write response: %v", err), http.StatusInternalServerError)
}
}
| [
"\"TOKEN\"",
"\"ROLE\"",
"\"VAULT_ADDR\"",
"\"VAULT_SKIP_VERIFY\"",
"\"VAULT_CONFIG_PATH\"",
"\"TOKEN\"",
"\"VAULT_ADDR\"",
"\"VAULT_SKIP_VERIFY\""
] | [] | [
"VAULT_CONFIG_PATH",
"VAULT_SKIP_VERIFY",
"TOKEN",
"VAULT_ADDR",
"ROLE"
] | [] | ["VAULT_CONFIG_PATH", "VAULT_SKIP_VERIFY", "TOKEN", "VAULT_ADDR", "ROLE"] | go | 5 | 0 | |
vote/app.py | from flask import Flask, render_template, request, make_response, g
from redis import Redis
import os
import socket
import random
import json
option_a = os.getenv('OPTION_A', "Emacs")
option_b = os.getenv('OPTION_B', "Vi")
hostname = socket.gethostname()
version = 'v1'
# This is a sample comment
app = Flask(__name__)
def get_redis():
if not hasattr(g, 'redis'):
g.redis = Redis(host="redis", db=0, socket_timeout=5)
return g.redis
@app.route("/", methods=['POST','GET'])
def hello():
voter_id = request.cookies.get('voter_id')
if not voter_id:
voter_id = hex(random.getrandbits(64))[2:-1]
vote = None
if request.method == 'POST':
redis = get_redis()
vote = request.form['vote']
data = json.dumps({'voter_id': voter_id, 'vote': vote})
redis.rpush('votes', data)
resp = make_response(render_template(
'index.html',
option_a=option_a,
option_b=option_b,
hostname=hostname,
vote=vote,
version=version,
))
resp.set_cookie('voter_id', voter_id)
return resp
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True, threaded=True)
| [] | [] | [
"OPTION_A",
"OPTION_B"
] | [] | ["OPTION_A", "OPTION_B"] | python | 2 | 0 | |
bin2tif/bin2tif.py | #!/usr/bin/env python3
"""Converts .bin files to geotiff
"""
import argparse
import os
import json
import logging
from pyclowder.utils import setup_logging as do_setup_logging
from terrautils.extractors import load_json_file as do_load_json_file
from terrautils.formats import create_geotiff as do_create_geotiff
from terrautils.metadata import get_terraref_metadata as do_get_terraref_metadata, \
get_season_and_experiment as do_get_season_and_experiment
from terrautils.spatial import geojson_to_tuples as do_geojson_to_tuples
from terrautils.sensors import Sensors
import terraref.stereo_rgb
import terrautils.lemnatec
terrautils.lemnatec.SENSOR_METADATA_CACHE = os.path.dirname(os.path.realpath(__file__))
SELF_DESCRIPTION = "Maricopa agricultural gantry bin to geotiff converter"
EXTRACTOR_NAME = 'stereoTop'
EXTRCTOR_VERSION = "1.0"
def get_metadata_timestamp(metadata: dict) -> str:
"""Looks up the timestamp in the metadata
Arguments:
metadata: the metadata to find the timestamp in
"""
if 'content' in metadata:
check_md = metadata['content']
else:
check_md = metadata
timestamp = None
if not 'timestamp' in check_md:
if 'gantry_variable_metadata' in check_md:
if 'datetime' in check_md['gantry_variable_metadata']:
timestamp = check_md['gantry_variable_metadata']['datetime']
else:
timestamp = check_md['timestamp']
return timestamp
def save_result(working_space: str, result: dict) -> None:
"""Saves the result dictionary as JSON to a well known location in the
working space folder. Relative to the working space folder, the JSON
is stored in 'output/results.json'. Folders are created as needed.
Arguments:
working_space: path to our working space
result: dictionary containing the results of a run
"""
result_path = os.path.join(working_space, 'output')
if not os.path.exists(result_path):
os.makedirs(result_path)
result_path = os.path.join(result_path, 'result.json')
logging.info("Storing result at location: '%s'", result_path)
logging.debug("Result: %s", str(result))
with open(result_path, 'w') as out_file:
json.dump(result, out_file, indent=2)
def args_to_params(args: list) -> dict:
"""Looks through the arguments and returns a dict with the found values.
Arguments:
args: command line arguments as provided by argparse
"""
found = {}
# Setup the dictionary identifying the parameters
found['filename'] = args.bin_file
found['metadata'] = args.metadata_file
found['working_space'] = args.working_space
# Note: Return an empty dict if we're missing mandatory parameters
return found
def bin2tif(filename: str, metadata: str, working_space: str) -> dict:
"""Converts the bin file to a geotiff file
ArgumentsL
filename: the path to the .bin file
metadata: the path to the cleaned metadata file
working_space: the path to our working space
"""
result = {}
loaded_json = do_load_json_file(metadata)
if not loaded_json:
msg = "Unable to load JSON from file '%s'" % metadata
logging.error(msg)
logging.error(" JSON may be missing or invalid. Returning an error")
result['error'] = {'message': msg}
result['code'] = -1
return result
if 'content' in loaded_json:
parse_json = loaded_json['content']
else:
parse_json = loaded_json
terra_md_full = do_get_terraref_metadata(parse_json, EXTRACTOR_NAME)
if not terra_md_full:
msg = "Unable to find %s metadata in JSON file '%s'" % (EXTRACTOR_NAME, metadata)
logging.error(msg)
logging.error(" JSON may be missing or invalid. Returning an error")
result['error'] = {'message': msg}
result['code'] = -2
return result
timestamp = get_metadata_timestamp(terra_md_full)
if not timestamp:
msg = "Unable to find timestamp in JSON file '%s'" % filename
logging.error(msg)
logging.error(" JSON may be missing or invalid. Returning an error")
result['error'] = {'message': msg}
result['code'] = -3
return result
# Fetch experiment name from terra metadata
_, _, updated_experiment = do_get_season_and_experiment(timestamp, 'stereoTop', terra_md_full)
# if None in [season_name, experiment_name]:
# raise ValueError("season and experiment could not be determined")
#
# # Determine output directory
# self.log_info(resource, "Hierarchy: %s / %s / %s / %s / %s / %s / %s" % (season_name, experiment_name, self.sensors.get_display_name(),
# timestamp[:4], timestamp[5:7], timestamp[8:10], timestamp))
# target_dsid = build_dataset_hierarchy_crawl(host, secret_key, self.clowder_user, self.clowder_pass, self.clowderspace,
# season_name, experiment_name, self.sensors.get_display_name(),
# timestamp[:4], timestamp[5:7], timestamp[8:10],
# leaf_ds_name=self.sensors.get_display_name() + ' - ' + timestamp)
sensor = Sensors(base='', station='ua-mac', sensor='rgb_geotiff')
leaf_name = sensor.get_display_name()
bin_type = 'left' if filename.endswith('_left.bin') else 'right' if filename.endswith('_right.bin') else None
if not bin_type:
msg = "Bin file must be a left or right file: '%s'" % filename
logging.error(msg)
logging.error(" Returning an error")
result['error'] = {'message': msg}
result['code'] = -4
return result
terra_md_trim = do_get_terraref_metadata(parse_json)
if updated_experiment is not None:
terra_md_trim['experiment_metadata'] = updated_experiment
terra_md_trim['raw_data_source'] = filename
tiff_filename = os.path.splitext(os.path.basename(filename))[0] + '.tif'
tiff_path = os.path.join(working_space, tiff_filename)
try:
bin_shape = terraref.stereo_rgb.get_image_shape(terra_md_full, bin_type)
gps_bounds_bin = do_geojson_to_tuples(terra_md_full['spatial_metadata'][bin_type]['bounding_box'])
except KeyError:
msg = "Spatial metadata is not properly identified. Unable to continue"
logging.error(msg)
logging.error(" Returning an error")
result['error'] = {'message': msg}
result['code'] = -5
return result
# Extractor info
extractor_info = {
'name': EXTRACTOR_NAME,
'version': EXTRCTOR_VERSION,
'author': "[email protected]",
'description': "Maricopa agricultural gantry bin to geotiff converter",
'repository': [{"repType": "git", "repUrl": "https://github.com/terraref/extractors-stereo-rgb.git"}]
}
# Perform actual processing
new_image = terraref.stereo_rgb.process_raw(bin_shape, filename, None)
do_create_geotiff(new_image, gps_bounds_bin, tiff_path, None, True,
extractor_info, terra_md_full, compress=True)
# level1_md = build_metadata(host, self.extractor_info, target_dsid, terra_md_trim, 'dataset')
context = ['https://clowder.ncsa.illinois.edu/contexts/metadata.jsonld']
terra_md_trim['extractor_version'] = EXTRCTOR_VERSION
new_md = {
'@context': context,
'content': terra_md_trim,
'filename': tiff_filename,
'agent': {
'@type': 'cat:extractor',
'version': EXTRCTOR_VERSION,
'name': EXTRACTOR_NAME
}
}
# Setup the result
result['container'] = [{
'name': leaf_name,
'exists': False,
'metadata' : {
'replace': True,
'data': new_md
},
'file': [{
'path': tiff_path,
'key': sensor.sensor
}]
}]
result['code'] = 0
return result
def do_work(parser) -> None:
"""Function to prepare and execute work unit
Arguments:
parser: an instance of argparse.ArgumentParser
"""
parser.add_argument('--logging', '-l', nargs='?', default=os.getenv("LOGGING"),
help='file or url or logging configuration (default=None)')
parser.add_argument('--debug', '-d', action='store_const',
default=logging.WARN, const=logging.DEBUG,
help='enable debug logging (default=WARN)')
parser.add_argument('--info', '-i', action='store_const',
default=logging.WARN, const=logging.INFO,
help='enable info logging (default=WARN)')
parser.add_argument('bin_file', type=str, help='full path to the bin file to convert')
parser.add_argument('metadata_file', type=str, help='full path to the cleaned metadata')
parser.add_argument('working_space', type=str, help='the folder to use use as a workspace and for storing results')
args = parser.parse_args()
# start logging system
do_setup_logging(args.logging)
logging.getLogger().setLevel(args.debug if args.debug == logging.DEBUG else args.info)
params_dict = args_to_params(args)
logging.debug("Calling bin2tif() with the following parameters: %s", str(params_dict))
result = bin2tif(**params_dict)
# Save the result to a well known location
logging.debug("Saving the result to the working space: '%s'", params_dict['working_space'])
save_result(params_dict['working_space'], result)
if __name__ == "__main__":
try:
PARSER = argparse.ArgumentParser(description=SELF_DESCRIPTION,
epilog="The cleaned metadata is written to the working space, and " +
"the results are written off the working space in 'output/result.json'")
do_work(PARSER)
except Exception as ex:
logging.error("Top level exception handler caught an exception: %s", str(ex))
raise
| [] | [] | [
"LOGGING"
] | [] | ["LOGGING"] | python | 1 | 0 | |
pkg/broker/logger/logger.go | package logger
import (
"fmt"
"os"
"path/filepath"
"github.com/jexia/semaphore/pkg/broker"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// WithFields adds structured context to the defined logger. Fields added
// to the child don't affect the parent, and vice versa.
func WithFields(parent *broker.Context, fields ...zap.Field) *broker.Context {
ctx := WithLogger(parent)
ctx.Zap = ctx.Zap.With(fields...)
return ctx
}
// WithLogger creates a child context
func WithLogger(parent *broker.Context) *broker.Context {
ctx := broker.Child(parent)
atom := zap.NewAtomicLevel()
if parent.Atom != nil {
atom.SetLevel(parent.Atom.Level())
}
config := zap.NewProductionEncoderConfig()
config.EncodeTime = zapcore.EpochNanosTimeEncoder
var encoder zapcore.Encoder
switch os.Getenv("LOG_ENCODER") {
case "json":
encoder = zapcore.NewJSONEncoder(config)
default:
encoder = zapcore.NewConsoleEncoder(config)
}
core := zapcore.NewCore(
encoder,
zapcore.Lock(os.Stdout),
atom,
)
ctx.Zap = zap.New(core, zap.AddStacktrace(zapcore.ErrorLevel)).Named(ctx.Module)
ctx.Atom = &atom
return ctx
}
// SetLevel sets all modules matching the given pattern with the given log level
func SetLevel(ctx *broker.Context, pattern string, level zapcore.Level) error {
matched, err := filepath.Match(pattern, ctx.Module)
if err != nil {
return fmt.Errorf("failed to match pattern: %w", err)
}
for _, child := range ctx.Children {
// errors could only occure inside the pattern which are validate above
// this error could safely be ignored
_ = SetLevel(child, pattern, level)
}
if (matched || pattern == ctx.Module) && ctx.Atom != nil {
ctx.Atom.SetLevel(level)
}
return nil
}
// Error logs a message at ErrorLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func Error(ctx *broker.Context, msg string, fields ...zap.Field) {
if ctx == nil {
return
}
if ctx.Zap == nil {
panic("context logger not set")
}
ctx.Zap.Error(msg, fields...)
}
// Warn logs a message at WarnLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func Warn(ctx *broker.Context, msg string, fields ...zap.Field) {
if ctx == nil {
return
}
if ctx.Zap == nil {
panic("context logger not set")
}
ctx.Zap.Warn(msg, fields...)
}
// Info logs a message at InfoLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func Info(ctx *broker.Context, msg string, fields ...zap.Field) {
if ctx == nil {
return
}
if ctx.Zap == nil {
panic("context logger not set")
}
ctx.Zap.Info(msg, fields...)
}
// Debug logs a message at DebugLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func Debug(ctx *broker.Context, msg string, fields ...zap.Field) {
if ctx == nil {
return
}
if ctx.Zap == nil {
panic("context logger not set")
}
ctx.Zap.Debug(msg, fields...)
}
| [
"\"LOG_ENCODER\""
] | [] | [
"LOG_ENCODER"
] | [] | ["LOG_ENCODER"] | go | 1 | 0 | |
rich/console.py | from collections.abc import Mapping, Sequence
from contextlib import contextmanager
from dataclasses import dataclass, field, replace
from enum import Enum
from functools import wraps
import inspect
from itertools import chain
import os
from operator import itemgetter
import platform
import re
import shutil
import sys
import threading
from typing import (
Any,
Callable,
cast,
Dict,
IO,
Iterable,
List,
Optional,
NamedTuple,
overload,
Tuple,
TYPE_CHECKING,
Union,
)
from typing_extensions import Protocol, runtime_checkable, Literal
from ._emoji_replace import _emoji_replace
from .align import Align, AlignValues
from .markup import render as render_markup
from .measure import measure_renderables, Measurement
from ._log_render import LogRender
from .default_styles import DEFAULT_STYLES
from . import errors
from .color import ColorSystem
from .control import Control
from .highlighter import NullHighlighter, ReprHighlighter
from .pretty import Pretty
from .style import Style
from .tabulate import tabulate_mapping
from . import highlighter
from . import themes
from .pretty import Pretty
from .terminal_theme import TerminalTheme, DEFAULT_TERMINAL_THEME
from .segment import Segment
from .text import Text
from .theme import Theme
if TYPE_CHECKING: # pragma: no cover
from .text import Text
WINDOWS = platform.system() == "Windows"
HighlighterType = Callable[[Union[str, "Text"]], "Text"]
JustifyMethod = Literal["default", "left", "center", "right", "full"]
OverflowMethod = Literal["fold", "crop", "ellipsis"]
CONSOLE_HTML_FORMAT = """\
<!DOCTYPE html>
<head>
<style>
{stylesheet}
body {{
color: {foreground};
background-color: {background};
}}
</style>
</head>
<html>
<body>
<code>
<pre style="font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">{code}</pre>
</code>
</body>
</html>
"""
@dataclass
class ConsoleOptions:
"""Options for __rich_console__ method."""
min_width: int
max_width: int
is_terminal: bool
encoding: str
justify: Optional[JustifyMethod] = None
overflow: Optional[OverflowMethod] = None
no_wrap: Optional[bool] = False
def update(
self,
width: int = None,
min_width: int = None,
max_width: int = None,
justify: JustifyMethod = None,
overflow: OverflowMethod = None,
no_wrap: bool = None,
) -> "ConsoleOptions":
"""Update values, return a copy."""
options = replace(self)
if width is not None:
options.min_width = options.max_width = width
if min_width is not None:
options.min_width = min_width
if max_width is not None:
options.max_width = max_width
if justify is not None:
options.justify = justify
if overflow is not None:
options.overflow = overflow
if no_wrap is not None:
options.no_wrap = no_wrap
return options
@runtime_checkable
class RichCast(Protocol):
"""An object that may be 'cast' to a console renderable."""
def __rich__(self) -> Union["ConsoleRenderable", str]: # pragma: no cover
...
@runtime_checkable
class ConsoleRenderable(Protocol):
"""An object that supports the console protocol."""
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult": # pragma: no cover
...
RenderableType = Union[ConsoleRenderable, RichCast, str]
"""A type that may be rendered by Console."""
RenderResult = Iterable[Union[RenderableType, Segment]]
"""The result of calling a __rich_console__ method."""
_null_highlighter = NullHighlighter()
class RenderGroup:
"""Takes a group of renderables and returns a renderable object that renders the group.
Args:
renderables (Iterable[RenderableType]): An iterable of renderable objects.
"""
def __init__(self, *renderables: RenderableType, fit: bool = True) -> None:
self._renderables = renderables
self.fit = fit
self._render: Optional[List[RenderableType]] = None
@property
def renderables(self) -> List["RenderableType"]:
if self._render is None:
self._render = list(self._renderables)
return self._render
def __rich_measure__(self, console: "Console", max_width: int) -> "Measurement":
if self.fit:
return measure_renderables(console, self.renderables, max_width)
else:
return Measurement(max_width, max_width)
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> RenderResult:
yield from self.renderables
def render_group(fit: bool = False) -> Callable:
def decorator(method):
"""Convert a method that returns an iterable of renderables in to a RenderGroup."""
@wraps(method)
def _replace(*args, **kwargs):
renderables = method(*args, **kwargs)
return RenderGroup(*renderables, fit=fit)
return _replace
return decorator
class ConsoleDimensions(NamedTuple):
"""Size of the terminal."""
width: int
height: int
def _is_jupyter() -> bool:
"""Check if we're running in a Jupyter notebook."""
try:
get_ipython # type: ignore
except NameError:
return False
shell = get_ipython().__class__.__name__ # type: ignore
if shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
elif shell == "TerminalInteractiveShell":
return False # Terminal running IPython
else:
return False # Other type (?)
COLOR_SYSTEMS = {
"standard": ColorSystem.STANDARD,
"256": ColorSystem.EIGHT_BIT,
"truecolor": ColorSystem.TRUECOLOR,
"windows": ColorSystem.WINDOWS,
}
_COLOR_SYSTEMS_NAMES = {system: name for name, system in COLOR_SYSTEMS.items()}
@dataclass
class ConsoleThreadLocals(threading.local):
"""Thread local values for Console context."""
buffer: List[Segment] = field(default_factory=list)
buffer_index: int = 0
class RenderHook:
"""Provides hooks in to the render process."""
def process_renderables(
self, renderables: List[ConsoleRenderable]
) -> List[ConsoleRenderable]:
"""Called with a list of objects to render.
This method can return a new list of renderables, or modify and return the same list.
Args:
renderables (List[ConsoleRenderable]): A number of renderable objects.
Returns:
List[ConsoleRenderable]: A replacement list of renderables.
"""
return renderables
def detect_legacy_windows() -> bool:
"""Detect legacy Windows."""
return "WINDIR" in os.environ and "WT_SESSION" not in os.environ
if detect_legacy_windows(): # pragma: no cover
from colorama import init
init()
class Console:
"""A high level console interface.
Args:
color_system (str, optional): The color system supported by your terminal,
either ``"standard"``, ``"256"`` or ``"truecolor"``. Leave as ``"auto"`` to autodetect.
force_terminal (bool, optional): Force the Console to write control codes even when a terminal is not detected. Defaults to False.
force_jupyter (bool, optional): Force the Console to write to Jupyter even when Jupyter is not detected. Defaults to False
theme (Theme, optional): An optional style theme object, or ``None`` for default theme.
file (IO, optional): A file object where the console should write to. Defaults to stdoutput.
width (int, optional): The width of the terminal. Leave as default to auto-detect width.
height (int, optional): The height of the terminal. Leave as default to auto-detect height.
record (bool, optional): Boolean to enable recording of terminal output,
required to call :meth:`export_html` and :meth:`export_text`. Defaults to False.
markup (bool, optional): Boolean to enable :ref:`console_markup`. Defaults to True.
emoji (bool, optional): Enable emoji code. Defaults to True.
highlight (bool, optional): Enable automatic highlighting. Defaults to True.
log_time (bool, optional): Boolean to enable logging of time by :meth:`log` methods. Defaults to True.
log_path (bool, optional): Boolean to enable the logging of the caller by :meth:`log`. Defaults to True.
log_time_format (str, optional): Log time format if ``log_time`` is enabled. Defaults to "[%X] ".
highlighter(HighlighterType, optional): Default highlighter.
"""
def __init__(
self,
*,
color_system: Optional[
Literal["auto", "standard", "256", "truecolor", "windows"]
] = "auto",
force_terminal: bool = False,
force_jupyter: bool = False,
theme: Theme = None,
file: IO[str] = None,
width: int = None,
height: int = None,
tab_size: int = 8,
record: bool = False,
markup: bool = True,
emoji: bool = True,
highlight: bool = True,
log_time: bool = True,
log_path: bool = True,
log_time_format: str = "[%X]",
highlighter: Optional["HighlighterType"] = ReprHighlighter(),
):
self.is_jupyter = force_jupyter or _is_jupyter()
if self.is_jupyter:
width = width or 93
height = height or 100
self._styles = themes.DEFAULT.styles if theme is None else theme.styles
self._width = width
self._height = height
self.tab_size = tab_size
self.record = record
self._markup = markup
self._emoji = emoji
self._highlight = highlight
self.legacy_windows: bool = detect_legacy_windows()
self._color_system: Optional[ColorSystem]
self._force_terminal = force_terminal
self.file = file or sys.stdout
if color_system is None:
self._color_system = None
elif color_system == "auto":
self._color_system = self._detect_color_system()
else:
self._color_system = COLOR_SYSTEMS[color_system]
self._lock = threading.RLock()
self._log_render = LogRender(
show_time=log_time, show_path=log_path, time_format=log_time_format
)
self.highlighter: HighlighterType = highlighter or _null_highlighter
self._record_buffer_lock = threading.RLock()
self._thread_locals = ConsoleThreadLocals()
self._record_buffer: List[Segment] = []
self._render_hooks: List[RenderHook] = []
def __repr__(self) -> str:
return f"<console width={self.width} {str(self._color_system)}>"
@property
def _buffer(self) -> List[Segment]:
"""Get a thread local buffer."""
return self._thread_locals.buffer
@property
def _buffer_index(self) -> int:
"""Get a thread local buffer."""
return self._thread_locals.buffer_index
@_buffer_index.setter
def _buffer_index(self, value: int) -> None:
self._thread_locals.buffer_index = value
def _detect_color_system(self) -> Optional[ColorSystem]:
"""Detect color system from env vars."""
if not self.is_terminal:
return None
if self.legacy_windows: # pragma: no cover
return ColorSystem.WINDOWS
if "WT_SESSION" in os.environ:
# Exception for Windows terminal
return ColorSystem.TRUECOLOR
color_term = os.environ.get("COLORTERM", "").strip().lower()
return (
ColorSystem.TRUECOLOR
if color_term in ("truecolor", "24bit")
else ColorSystem.EIGHT_BIT
)
def _enter_buffer(self) -> None:
"""Enter in to a buffer context, and buffer all output."""
self._buffer_index += 1
def _exit_buffer(self) -> None:
"""Leave buffer context, and render content if required."""
self._buffer_index -= 1
self._check_buffer()
def push_render_hook(self, hook: RenderHook) -> None:
"""Add a new render hook to the stack.
Args:
hook (RenderHook): Render hook instance.
"""
self._render_hooks.append(hook)
def pop_render_hook(self) -> None:
"""Pop the last renderhook from the stack."""
self._render_hooks.pop()
def __enter__(self) -> "Console":
"""Own context manager to enter buffer context."""
self._enter_buffer()
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
"""Exit buffer context."""
self._exit_buffer()
@property
def color_system(self) -> Optional[str]:
"""Get color system string.
Returns:
Optional[str]: "standard", "256" or "truecolor".
"""
if self._color_system is not None:
return _COLOR_SYSTEMS_NAMES[self._color_system]
else:
return None
@property
def encoding(self) -> str:
"""Get the encoding of the console file, e.g. ``"utf-8"``.
Returns:
str: A standard encoding string.
"""
return getattr(self.file, "encoding", "utf-8")
@property
def is_terminal(self) -> bool:
"""Check if the console is writing to a terminal.
Returns:
bool: True if the console writting to a device capable of
understanding terminal codes, otherwise False.
"""
if self._force_terminal:
return True
isatty = getattr(self.file, "isatty", None)
return False if isatty is None else isatty()
@property
def options(self) -> ConsoleOptions:
"""Get default console options."""
return ConsoleOptions(
min_width=1,
max_width=self.width,
encoding=self.encoding,
is_terminal=self.is_terminal,
)
@property
def size(self) -> ConsoleDimensions:
"""Get the size of the console.
Returns:
ConsoleDimensions: A named tuple containing the dimensions.
"""
if self._width is not None and self._height is not None:
return ConsoleDimensions(self._width, self._height)
width, height = shutil.get_terminal_size()
return ConsoleDimensions(
(width - self.legacy_windows) if self._width is None else self._width,
height if self._height is None else self._height,
)
@property
def width(self) -> int:
"""Get the width of the console.
Returns:
int: The width (in characters) of the console.
"""
width, _ = self.size
return width
def line(self, count: int = 1) -> None:
"""Write new line(s).
Args:
count (int, optional): Number of new lines. Defaults to 1.
"""
assert count >= 0, "count must be >= 0"
if count:
self._buffer.append(Segment("\n" * count))
self._check_buffer()
def clear(self, home: bool = True) -> None:
"""Clear the screen.
Args:
home (bool, optional): Also move the cursor to 'home' position. Defaults to True.
"""
self.control("\033[2J\033[H" if home else "\033[2J")
def show_cursor(self, show: bool = True) -> None:
"""Show or hide the cursor.
Args:
show (bool, optional): Set visibility of the cursor.
"""
if self.is_terminal and not self.legacy_windows:
self.control("\033[?25h" if show else "\033[?25l")
def render(
self, renderable: RenderableType, options: ConsoleOptions
) -> Iterable[Segment]:
"""Render an object in to an iterable of `Segment` instances.
This method contains the logic for rendering objects with the console protocol.
You are unlikely to need to use it directly, unless you are extending the library.
Args:
renderable (RenderableType): An object supporting the console protocol, or
an object that may be converted to a string.
options (ConsoleOptions): An options objects. Defaults to None.
Returns:
Iterable[Segment]: An iterable of segments that may be rendered.
"""
render_iterable: RenderResult
if isinstance(renderable, ConsoleRenderable):
render_iterable = renderable.__rich_console__(self, options)
elif isinstance(renderable, str):
yield from self.render(self.render_str(renderable), options)
return
else:
raise errors.NotRenderableError(
f"Unable to render {renderable!r}; "
"A str, Segment or object with __rich_console__ method is required"
)
try:
iter_render = iter(render_iterable)
except TypeError:
raise errors.NotRenderableError(
f"object {render_iterable!r} is not renderable"
)
for render_output in iter_render:
if isinstance(render_output, Segment):
yield render_output
else:
yield from self.render(render_output, options)
def render_lines(
self,
renderable: RenderableType,
options: Optional[ConsoleOptions],
*,
style: Optional[Style] = None,
pad: bool = True,
) -> List[List[Segment]]:
"""Render objects in to a list of lines.
The output of render_lines is useful when further formatting of rendered console text
is required, such as the Panel class which draws a border around any renderable object.
Args:
renderables (Iterable[RenderableType]): Any object or objects renderable in the console.
options (Optional[ConsoleOptions]): Console options used to render with.
style (Style, optional): Optional style to apply to renderables. Defaults to ``None``.
pad (bool, optional): Pad lines shorter than render width. Defaults to ``True``.
Returns:
List[List[Segment]]: A list of lines, where a line is a list of Segment objects.
"""
render_options = options or self.options
_rendered = self.render(renderable, render_options)
if style is not None:
_rendered = Segment.apply_style(_rendered, style)
lines = list(
Segment.split_and_crop_lines(
_rendered,
render_options.max_width,
style=style,
include_new_lines=False,
pad=pad,
)
)
return lines
def render_str(
self,
text: str,
*,
style: Union[str, Style] = "",
justify: JustifyMethod = None,
overflow: OverflowMethod = None,
emoji: bool = None,
markup: bool = None,
highlighter: HighlighterType = None,
) -> "Text":
"""Convert a string to a Text instance. This is is called automatically if
you print or log a string.
Args:
text (str): Text to render.
style (Union[str, Style], optional): Style to apply to rendered text.
justify (str, optional): Justify method: "default", "left", "center", "full", or "right". Defaults to ``None``.
overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to ``None``.
emoji (Optional[bool], optional): Enable emoji, or ``None`` to use Console default.
markup (Optional[bool], optional): Enable markup, or ``None`` to use Console default.
highlighter (HighlighterType, optional): Optional highlighter to apply.
Returns:
ConsoleRenderable: Renderable object.
"""
emoji_enabled = emoji or (emoji is None and self._emoji)
markup_enabled = markup or (markup is None and self._markup)
if markup_enabled:
rich_text = render_markup(text, style=style, emoji=emoji_enabled)
rich_text.justify = justify
rich_text.overflow = overflow
else:
rich_text = Text(
_emoji_replace(text) if emoji_enabled else text,
justify=justify,
overflow=overflow,
style=style,
)
if highlighter is not None:
highlight_text = highlighter(str(rich_text))
highlight_text.copy_styles(rich_text)
return highlight_text
return rich_text
def get_style(
self, name: Union[str, Style], *, default: Union[Style, str] = None
) -> Style:
"""Get a Style instance by it's theme name or parse a definition.
Args:
name (str): The name of a style or a style definition.
Returns:
Style: A Style object.
Raises:
MissingStyle: If no style could be parsed from name.
"""
if isinstance(name, Style):
return name
try:
style = self._styles.get(name)
return style if style is not None else Style.parse(name)
except errors.StyleSyntaxError as error:
if default is not None:
return self.get_style(default)
raise errors.MissingStyle(f"Failed to get style {name!r}; {error}")
def _collect_renderables(
self,
objects: Iterable[Any],
sep: str,
end: str,
*,
justify: JustifyMethod = None,
emoji: bool = None,
markup: bool = None,
highlight: bool = None,
) -> List[ConsoleRenderable]:
"""Combined a number of renderables and text in to one renderable.
Args:
renderables (Iterable[Union[str, ConsoleRenderable]]): Anyting that Rich can render.
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\n".
justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``.
emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default.
markup (Optional[bool], optional): Enable markup, or ``None`` to use console default.
highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default.
Returns:
List[ConsoleRenderable]: A list of things to render.
"""
renderables: List[ConsoleRenderable] = []
_append = renderables.append
text: List[Text] = []
append_text = text.append
align = cast(
AlignValues, justify if justify in ("left", "center", "right") else "left"
)
if align == "left":
append = _append
else:
def append(renderable: RenderableType) -> None:
_append(Align(renderable, align))
_highlighter: HighlighterType = _null_highlighter
if highlight or (highlight is None and self._highlight):
_highlighter = self.highlighter
def check_text() -> None:
if text:
sep_text = Text(sep, end=end)
append(sep_text.join(text))
del text[:]
for renderable in objects:
rich_cast = getattr(renderable, "__rich__", None)
if rich_cast:
renderable = rich_cast()
if isinstance(renderable, str):
append_text(
self.render_str(
renderable,
emoji=emoji,
markup=markup,
highlighter=_highlighter,
)
)
elif isinstance(renderable, ConsoleRenderable):
check_text()
append(renderable)
elif isinstance(renderable, (Mapping, Sequence)):
check_text()
append(Pretty(renderable, highlighter=_highlighter))
else:
append_text(_highlighter(str(renderable)))
check_text()
return renderables
def rule(
self,
title: str = "",
*,
character: str = "─",
style: Union[str, Style] = "rule.line",
) -> None:
"""Draw a line with optional centered title.
Args:
title (str, optional): Text to render over the rule. Defaults to "".
character (str, optional): Character to form the line. Defaults to "─".
"""
from .rule import Rule
rule = Rule(title=title, character=character, style=style)
self.print(rule)
def control(self, control_codes: Union["Control", str]) -> None:
"""Insert non-printing control codes.
Args:
control_codes (str): Control codes, such as those that may move the cursor.
"""
self._buffer.append(Segment.control(str(control_codes)))
self._check_buffer()
def print(
self,
*objects: Any,
sep=" ",
end="\n",
style: Union[str, Style] = None,
justify: JustifyMethod = None,
overflow: OverflowMethod = None,
no_wrap: bool = None,
emoji: bool = None,
markup: bool = None,
highlight: bool = None,
width: int = None,
) -> None:
r"""Print to the console.
Args:
objects (positional args): Objects to log to the terminal.
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\n".
style (Union[str, Style], optional): A style to apply to output. Defaults to None.
justify (str, optional): Justify method: "default", "left", "right", "center", or "full". Defaults to ``None``.
overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to None.
emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to ``None``.
markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to ``None``.
highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to ``None``.
width (Optional[int], optional): Width of output, or ``None`` to auto-detect. Defaults to ``None``.
"""
if not objects:
self.line()
return
with self:
renderables = self._collect_renderables(
objects,
sep,
end,
justify=justify,
emoji=emoji,
markup=markup,
highlight=highlight,
)
for hook in self._render_hooks:
renderables = hook.process_renderables(renderables)
render_options = self.options.update(
justify=justify, overflow=overflow, width=width, no_wrap=no_wrap
)
extend = self._buffer.extend
render = self.render
if style is None:
for renderable in renderables:
extend(render(renderable, render_options))
else:
for renderable in renderables:
extend(
Segment.apply_style(
render(renderable, render_options), self.get_style(style)
)
)
def print_exception(
self,
*,
width: Optional[int] = 88,
extra_lines: int = 3,
theme: Optional[str] = None,
word_wrap: bool = False,
) -> None:
"""Prints a rich render of the last exception and traceback.
Args:
code_width (Optional[int], optional): Number of characters used to render code. Defaults to 88.
extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
theme (str, optional): Override pygments theme used in traceback
word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
"""
from .traceback import Traceback
traceback = Traceback(
width=width, extra_lines=extra_lines, theme=theme, word_wrap=word_wrap
)
self.print(traceback)
def log(
self,
*objects: Any,
sep=" ",
end="\n",
justify: JustifyMethod = None,
emoji: bool = None,
markup: bool = None,
highlight: bool = None,
log_locals: bool = False,
_stack_offset=1,
) -> None:
r"""Log rich content to the terminal.
Args:
objects (positional args): Objects to log to the terminal.
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\n".
justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``.
emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to None.
markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to None.
highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to None.
log_locals (bool, optional): Boolean to enable logging of locals where ``log()``
was called. Defaults to False.
_stack_offset (int, optional): Offset of caller from end of call stack. Defaults to 1.
"""
if not objects:
self.line()
return
with self:
renderables = self._collect_renderables(
objects,
sep,
end,
justify=justify,
emoji=emoji,
markup=markup,
highlight=highlight,
)
caller = inspect.stack()[_stack_offset]
link_path = (
None
if caller.filename.startswith("<")
else os.path.abspath(caller.filename)
)
path = caller.filename.rpartition(os.sep)[-1]
line_no = caller.lineno
if log_locals:
locals_map = {
key: value
for key, value in caller.frame.f_locals.items()
if not key.startswith("__")
}
renderables.append(tabulate_mapping(locals_map, title="Locals"))
renderables = [
self._log_render(
self, renderables, path=path, line_no=line_no, link_path=link_path,
)
]
for hook in self._render_hooks:
renderables = hook.process_renderables(renderables)
extend = self._buffer.extend
render = self.render
render_options = self.options
for renderable in renderables:
extend(render(renderable, render_options))
def _check_buffer(self) -> None:
"""Check if the buffer may be rendered."""
with self._lock:
if self._buffer_index == 0:
if self.is_jupyter:
from .jupyter import display
display(self._buffer)
del self._buffer[:]
else:
text = self._render_buffer()
if text:
self.file.write(text)
self.file.flush()
def _render_buffer(self) -> str:
"""Render buffered output, and clear buffer."""
output: List[str] = []
append = output.append
color_system = self._color_system
buffer = self._buffer[:]
if self.record:
with self._record_buffer_lock:
self._record_buffer.extend(buffer)
del self._buffer[:]
not_terminal = not self.is_terminal
for line in Segment.split_and_crop_lines(buffer, self.width, pad=False):
for text, style, is_control in line:
if style and not is_control:
append(style.render(text, color_system=color_system))
else:
if not (not_terminal and is_control):
append(text)
rendered = "".join(output)
return rendered
def input(
self, prompt: Union[str, Text] = "", *, markup: bool = True, emoji: bool = True
) -> str:
"""Displays a prompt and waits for input from the user. The prompt may contain color / style.
Args:
prompt (Union[Str, Text]): Text to render in the prompt.
markup (bool, optional): Enable console markup (requires a str prompt). Defaults to True.
emoji (bool, optional): Enable emoji (requires a str prompt). Defaults to True.
Returns:
str: Text read from stdin.
"""
self.print(prompt, markup=markup, emoji=emoji, end="")
result = input()
return result
def export_text(self, *, clear: bool = True, styles: bool = False) -> str:
"""Generate text from console contents (requires record=True argument in constructor).
Args:
clear (bool, optional): Set to ``True`` to clear the record buffer after exporting.
styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text.
Defaults to ``False``.
Returns:
str: String containing console contents.
"""
assert (
self.record
), "To export console contents set record=True in the constructor or instance"
with self._record_buffer_lock:
if styles:
text = "".join(
(style.render(text) if style else text)
for text, style, _ in self._record_buffer
)
else:
text = "".join(text for text, _, _ in self._record_buffer)
if clear:
del self._record_buffer[:]
return text
def save_text(self, path: str, *, clear: bool = True, styles: bool = False) -> None:
"""Generate text from console and save to a given location (requires record=True argument in constructor).
Args:
path (str): Path to write text files.
clear (bool, optional): Set to ``True`` to clear the record buffer after exporting.
styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text.
Defaults to ``False``.
"""
text = self.export_text(clear=clear, styles=styles)
with open(path, "wt", encoding="utf-8") as write_file:
write_file.write(text)
def export_html(
self,
*,
theme: TerminalTheme = None,
clear: bool = True,
code_format: str = None,
inline_styles: bool = False,
) -> str:
"""Generate HTML from console contents (requires record=True argument in constructor).
Args:
theme (TerminalTheme, optional): TerminalTheme object containing console colors.
clear (bool, optional): Set to ``True`` to clear the record buffer after generating the HTML.
code_format (str, optional): Format string to render HTML, should contain {foreground}
{background} and {code}.
inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
Defaults to False.
Returns:
str: String containing console contents as HTML.
"""
assert (
self.record
), "To export console contents set record=True in the constructor or instance"
fragments: List[str] = []
append = fragments.append
_theme = theme or DEFAULT_TERMINAL_THEME
stylesheet = ""
def escape(text: str) -> str:
"""Escape html."""
return text.replace("&", "&").replace("<", "<").replace(">", ">")
render_code_format = CONSOLE_HTML_FORMAT if code_format is None else code_format
with self._record_buffer_lock:
if inline_styles:
for text, style, _ in Segment.filter_control(
Segment.simplify(self._record_buffer)
):
text = escape(text)
if style:
rule = style.get_html_style(_theme)
text = f'<span style="{rule}">{text}</span>' if rule else text
if style.link:
text = f'<a href="{style.link}">{text}</a>'
append(text)
else:
styles: Dict[str, int] = {}
for text, style, _ in Segment.filter_control(
Segment.simplify(self._record_buffer)
):
text = escape(text)
if style:
rule = style.get_html_style(_theme)
if rule:
style_number = styles.setdefault(rule, len(styles) + 1)
text = f'<span class="r{style_number}">{text}</span>'
if style.link:
text = f'<a href="{style.link}">{text}</a>'
append(text)
stylesheet_rules: List[str] = []
stylesheet_append = stylesheet_rules.append
for style_rule, style_number in styles.items():
if style_rule:
stylesheet_append(f".r{style_number} {{{style_rule}}}")
stylesheet = "\n".join(stylesheet_rules)
rendered_code = render_code_format.format(
code="".join(fragments),
stylesheet=stylesheet,
foreground=_theme.foreground_color.hex,
background=_theme.background_color.hex,
)
if clear:
del self._record_buffer[:]
return rendered_code
def save_html(
self,
path: str,
*,
theme: TerminalTheme = None,
clear: bool = True,
code_format=CONSOLE_HTML_FORMAT,
inline_styles: bool = False,
) -> None:
"""Generate HTML from console contents and write to a file (requires record=True argument in constructor).
Args:
path (str): Path to write html file.
theme (TerminalTheme, optional): TerminalTheme object containing console colors.
clear (bool, optional): Set to True to clear the record buffer after generating the HTML.
code_format (str, optional): Format string to render HTML, should contain {foreground}
{background} and {code}.
inline_styes (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
Defaults to False.
"""
html = self.export_html(
theme=theme,
clear=clear,
code_format=code_format,
inline_styles=inline_styles,
)
with open(path, "wt", encoding="utf-8") as write_file:
write_file.write(html)
if __name__ == "__main__": # pragma: no cover
console = Console()
console.log(
"JSONRPC [i]request[/i]",
5,
1.3,
True,
False,
None,
{
"jsonrpc": "2.0",
"method": "subtract",
"params": {"minuend": 42, "subtrahend": 23},
"id": 3,
},
)
console.log("Hello, World!", "{'a': 1}", repr(console))
console.log(
{
"name": None,
"empty": [],
"quiz": {
"sport": {
"answered": True,
"q1": {
"question": "Which one is correct team name in NBA?",
"options": [
"New York Bulls",
"Los Angeles Kings",
"Golden State Warriros",
"Huston Rocket",
],
"answer": "Huston Rocket",
},
},
"maths": {
"answered": False,
"q1": {
"question": "5 + 7 = ?",
"options": [10, 11, 12, 13],
"answer": 12,
},
"q2": {
"question": "12 - 8 = ?",
"options": [1, 2, 3, 4],
"answer": 4,
},
},
},
}
)
console.log("foo")
| [] | [] | [
"COLORTERM"
] | [] | ["COLORTERM"] | python | 1 | 0 | |
db/log.go | package db
import (
"context"
pkglog "log"
"os"
)
var logger = newLogFromEnv()
//var logger = newContextLogFromEnv()
// SetLogger sets logger for the package.
func SetLogger(l Logger) {
logger = l
}
// // SetContextLogger sets logger for the package.
// func SetContextLogger(l ContextLogger) {
// logger = l
// }
// Logger interface used in this package.
type Logger interface {
Debugf(format string, args ...interface{})
Infof(format string, args ...interface{})
Warningf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
}
// LogLevel ...
type LogLevel int
const (
// DebugLevel ...
DebugLevel LogLevel = 3
// InfoLevel ...
InfoLevel LogLevel = 2
// WarnLevel ...
WarnLevel LogLevel = 1
// ErrLevel ...
ErrLevel LogLevel = 0
)
// NewLogger ...
func NewLogger(lev LogLevel) Logger {
return &defaultLog{Level: lev}
}
func newLogFromEnv() Logger {
return NewLogger(parseLogLevel(os.Getenv("LOG")))
}
func parseLogLevel(s string) LogLevel {
switch s {
case "debug":
return DebugLevel
case "info":
return InfoLevel
case "warn":
return WarnLevel
default:
return ErrLevel
}
}
type defaultLog struct {
Level LogLevel
}
func (l defaultLog) Debugf(format string, args ...interface{}) {
if l.Level >= 3 {
pkglog.Printf("[DEBG] "+format+"\n", args...)
}
}
func (l defaultLog) Infof(format string, args ...interface{}) {
if l.Level >= 2 {
pkglog.Printf("[INFO] "+format+"\n", args...)
}
}
func (l defaultLog) Warningf(format string, args ...interface{}) {
if l.Level >= 1 {
pkglog.Printf("[WARN] "+format+"\n", args...)
}
}
func (l defaultLog) Errorf(format string, args ...interface{}) {
if l.Level >= 0 {
pkglog.Printf("[ERR] "+format+"\n", args...)
}
}
func (l defaultLog) Fatalf(format string, args ...interface{}) {
pkglog.Fatalf(format, args...)
}
// ContextLogger interface used in this package with request context.
type ContextLogger interface {
Debugf(ctx context.Context, format string, args ...interface{})
Infof(ctx context.Context, format string, args ...interface{})
Warningf(ctx context.Context, format string, args ...interface{})
Errorf(ctx context.Context, format string, args ...interface{})
}
// NewContextLogger ...
func NewContextLogger(lev LogLevel) ContextLogger {
return &defaultContextLog{Level: lev}
}
type defaultContextLog struct {
Level LogLevel
}
func (l defaultContextLog) Debugf(ctx context.Context, format string, args ...interface{}) {
if l.Level >= 3 {
pkglog.Printf("[DEBG] "+format+"\n", args...)
}
}
func (l defaultContextLog) Infof(ctx context.Context, format string, args ...interface{}) {
if l.Level >= 2 {
pkglog.Printf("[INFO] "+format+"\n", args...)
}
}
func (l defaultContextLog) Warningf(ctx context.Context, format string, args ...interface{}) {
if l.Level >= 1 {
pkglog.Printf("[WARN] "+format+"\n", args...)
}
}
func (l defaultContextLog) Errorf(ctx context.Context, format string, args ...interface{}) {
if l.Level >= 0 {
pkglog.Printf("[ERR] "+format+"\n", args...)
}
}
| [
"\"LOG\""
] | [] | [
"LOG"
] | [] | ["LOG"] | go | 1 | 0 | |
p0f/app.py | import datetime
import ipaddress
import json
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import pika
import pyshark
def connect_rabbit(host='messenger', port=5672, queue='task_queue'):
params = pika.ConnectionParameters(host=host, port=port)
connection = pika.BlockingConnection(params)
channel = connection.channel()
channel.queue_declare(queue=queue, durable=True)
return (connection, channel)
def send_rabbit_msg(msg, channel, exchange='', routing_key='task_queue'):
channel.basic_publish(exchange=exchange,
routing_key=routing_key,
body=json.dumps(msg),
properties=pika.BasicProperties(delivery_mode=2))
print(" [X] %s UTC %r %r" % (str(datetime.datetime.utcnow()),
str(msg['id']), str(msg['file_path'])))
def get_version():
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'VERSION'), 'r') as f:
return f.read().strip()
def run_proc(args, output=subprocess.DEVNULL):
proc = subprocess.Popen(args, stdout=output)
return proc.communicate()
def run_p0f(path):
with tempfile.TemporaryDirectory() as tempdir:
p0f = shutil.which('p0f')
# p0f not in PATH, default to alpine location.
if p0f is None:
p0f = '/usr/bin/p0f'
p0f_output = os.path.join(tempdir, 'p0f_output.txt')
args = [p0f, '-r', path, '-o', p0f_output]
run_proc(args)
with open(p0f_output, 'r') as f:
return f.read()
def parse_ip(packet):
for ip_type in ('ip', 'ipv6'):
try:
ip_fields = getattr(packet, ip_type)
except AttributeError:
continue
src_ip_address = getattr(ip_fields, '%s.src' % ip_type)
dst_ip_address = getattr(ip_fields, '%s.dst' % ip_type)
return (src_ip_address, dst_ip_address)
return (None, None)
def parse_eth(packet):
src_eth_address = packet.eth.src
dst_eth_address = packet.eth.dst
return (src_eth_address, dst_eth_address)
def run_tshark(path):
addresses = set()
with pyshark.FileCapture(path, include_raw=False, keep_packets=False,
custom_parameters=['-o', 'tcp.desegment_tcp_streams:false']) as cap:
for packet in cap:
src_eth_address, dst_eth_address = parse_eth(packet)
src_address, dst_address = parse_ip(packet)
if src_eth_address and src_address:
addresses.add((src_address, src_eth_address))
if dst_eth_address and dst_address:
addresses.add((dst_address, dst_eth_address))
return addresses
def parse_output(p0f_output, addresses):
results = {}
for p0f_line in p0f_output.splitlines():
fields = p0f_line.split('|')
fields_data = {}
for field in fields[1:]:
k, v = field.split('=', 1)
fields_data[k] = v
subj = fields_data.get('subj', None)
host = str(ipaddress.ip_address(fields_data[subj].split('/')[0]))
host_results = {}
if 'os' in fields_data:
full_os = fields_data['os']
if not full_os.startswith('?'):
short_os = full_os.split(' ')[0]
host_results.update({
'full_os': full_os,
'short_os': short_os})
for host_field in ('link', 'raw_mtu'):
host_value = fields_data.get(host_field, None)
if host_value is not None and not host_value.startswith('?'):
host_results.update({host_field: host_value})
if host_results:
if host not in results:
results[host] = {}
results[host].update(host_results)
for address, eth_address in addresses:
if address in results:
results[address].update({'mac': eth_address})
return results
def ispcap(pathfile):
for ext in ('pcap', 'pcapng', 'dump', 'capture'):
if pathfile.endswith(''.join(('.', ext))):
return True
return False
def main():
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
pcap_paths = []
path = sys.argv[1]
if os.path.isdir(path):
for root, _, files in os.walk(path):
for pathfile in files:
if ispcap(pathfile):
pcap_paths.append(os.path.join(root, pathfile))
else:
pcap_paths.append(path)
for path in pcap_paths:
p0f_output = run_p0f(path)
addresses = run_tshark(path)
results = parse_output(p0f_output, addresses)
print(results)
if os.environ.get('rabbit', '') == 'true':
uid = os.environ.get('id', '')
version = get_version()
queue = os.getenv('RABBIT_QUEUE_NAME', 'task_queue')
routing_key = os.getenv('RABBIT_ROUTING_KEY', 'task_queue')
exchange = os.getenv('RABBIT_EXCHANGE', 'task_queue')
try:
connection, channel = connect_rabbit(queue=queue)
body = {
'id': uid, 'type': 'metadata', 'file_path': path, 'data': results, 'results': {
'tool': 'p0f', 'version': version}}
send_rabbit_msg(body, channel, exchange=exchange, routing_key=routing_key)
if path == pcap_paths[-1]:
body = {
'id': uid, 'type': 'metadata', 'file_path': path, 'data': '', 'results': {
'tool': 'p0f', 'version': version}}
send_rabbit_msg(body, channel)
connection.close()
except Exception as e:
print(str(e))
if __name__ == "__main__": # pragma: no cover
main()
| [] | [] | [
"id",
"RABBIT_QUEUE_NAME",
"rabbit",
"RABBIT_ROUTING_KEY",
"RABBIT_EXCHANGE"
] | [] | ["id", "RABBIT_QUEUE_NAME", "rabbit", "RABBIT_ROUTING_KEY", "RABBIT_EXCHANGE"] | python | 5 | 0 | |
source/SC_Event_Service/app.py | from sqlalchemy import create_engine, Column, Integer, String, Numeric, DateTime, func, Boolean
from sqlalchemy.ext.declarative import declarative_base
from aiokafka import AIOKafkaConsumer
import asyncio, os, ast , sys
import nest_asyncio
nest_asyncio.apply()
## global variable :: setting this for kafka Consumer
KAFKA_ENDPOINT = os.getenv('KAFKA_ENDPOINT', 'localhost:9092')
KAFKA_TOPIC = os.getenv('KAFKA_TOPIC', 'lpr')
KAFKA_CONSUMER_GROUP_ID = os.getenv('KAFKA_CONSUMER_GROUP_ID', 'event_consumer_group')
loop = asyncio.get_event_loop()
## Database details and connection
DB_USER = os.getenv('DB_USER', 'dbadmin')
DB_PASSWORD = os.getenv('DB_PASSWORD', 'HT@1202k')
DB_HOST = os.getenv('DB_HOST', '127.0.0.1')
DB_NAME = os.getenv('DB_NAME','pgdb')
TABLE_NAME = os.getenv('TABLE_NAME','event')
Base = declarative_base()
class Event(Base):
__tablename__ = "event"
event_id = Column(String, primary_key=True, index=True)
event_timestamp = Column('date', DateTime(timezone=True), default=func.now())
event_vehicle_detected_plate_number = Column(String, index=True)
event_vehicle_lpn_detection_status = Column(String)
stationa1 = Column(Boolean, unique=False)
stationa5201 = Column(Boolean, unique=False)
stationa13 = Column(Boolean, unique=False)
stationa2 = Column(Boolean, unique=False)
stationa23 = Column(Boolean, unique=False)
stationb313 = Column(Boolean, unique=False)
stationa4202 = Column(Boolean, unique=False)
stationa41 = Column(Boolean, unique=False)
stationb504 = Column(Boolean, unique=False)
async def consume():
engine = create_engine('postgresql://'+DB_USER+':'+DB_PASSWORD+'@'+DB_HOST+'/'+DB_NAME+'?tcp_user_timeout=3000&connect_timeout=10', pool_pre_ping=True, connect_args={})
connection = engine.connect()
kafkaConsumer = AIOKafkaConsumer(KAFKA_TOPIC, loop=loop, bootstrap_servers=KAFKA_ENDPOINT, group_id=KAFKA_CONSUMER_GROUP_ID)
## Create Table if does not exists
Event.__table__.create(bind=engine, checkfirst=True)
await kafkaConsumer.start()
try:
async for msg in kafkaConsumer:
print(msg.key)
message = msg.value
payload=ast.literal_eval(message.decode('utf-8'))
try:
connection.execute(f"""INSERT INTO public.{TABLE_NAME}(event_id, date, event_vehicle_detected_plate_number, event_vehicle_lpn_detection_status, "stationa1", "stationa5201", "stationa13", "stationa2", "stationa23", "stationb313", "stationa4202"
, "stationa41", "stationb504" ) VALUES('{payload['event_id']}', '{payload['event_timestamp']}', '{payload['event_vehicle_detected_plate_number']}', '{payload['event_vehicle_lpn_detection_status']}', '{payload['stationa1']}', '{payload['stationa5201']}', '{payload['stationa13']}', '{payload['stationa2']}', '{payload['stationa23']}', '{payload['stationb313']}', '{payload['stationa4202']}', '{payload['stationa41']}', '{payload['stationb504']}'
)""")
print("===============================================")
print(payload)
print("Message written to DB successfully")
print("===============================================")
except Exception as e:
print(e)
print("Exiting ....")
sys.exit(1)
except Exception as e:
print(e.message)
print("Exiting ....")
sys.exit(1)
finally:
await kafkaConsumer.stop()
loop.run_until_complete(consume())
| [] | [] | [
"TABLE_NAME",
"KAFKA_TOPIC",
"DB_PASSWORD",
"DB_HOST",
"DB_NAME",
"KAFKA_CONSUMER_GROUP_ID",
"KAFKA_ENDPOINT",
"DB_USER"
] | [] | ["TABLE_NAME", "KAFKA_TOPIC", "DB_PASSWORD", "DB_HOST", "DB_NAME", "KAFKA_CONSUMER_GROUP_ID", "KAFKA_ENDPOINT", "DB_USER"] | python | 8 | 0 | |
alf/algorithms/merlin_algorithm_test.py | # Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import logging
import os
import psutil
import time
import tensorflow as tf
from tf_agents.environments.tf_py_environment import TFPyEnvironment
from alf.algorithms.merlin_algorithm import create_merlin_algorithm
from alf.drivers.on_policy_driver import OnPolicyDriver
from alf.environments.suite_unittest import RNNPolicyUnittestEnv
class MerlinAlgorithmTest(tf.test.TestCase):
def setUp(self):
super().setUp()
if os.environ.get('SKIP_LONG_TIME_COST_TESTS', False):
self.skipTest("It takes very long to run this test.")
def test_merlin_algorithm(self):
batch_size = 100
steps_per_episode = 15
gap = 10
env = RNNPolicyUnittestEnv(
batch_size, steps_per_episode, gap, obs_dim=3)
env = TFPyEnvironment(env)
algorithm = create_merlin_algorithm(
env, learning_rate=1e-3, debug_summaries=False)
driver = OnPolicyDriver(env, algorithm, train_interval=6)
eval_driver = OnPolicyDriver(env, algorithm, training=False)
proc = psutil.Process(os.getpid())
policy_state = driver.get_initial_policy_state()
time_step = driver.get_initial_time_step()
for i in range(100):
t0 = time.time()
time_step, policy_state, _ = driver.run(
max_num_steps=150 * batch_size,
time_step=time_step,
policy_state=policy_state)
mem = proc.memory_info().rss // 1e6
logging.info('%s time=%.3f mem=%s' % (i, time.time() - t0, mem))
env.reset()
time_step, _ = eval_driver.run(max_num_steps=14 * batch_size)
logging.info("eval reward=%.3f" % tf.reduce_mean(time_step.reward))
self.assertAlmostEqual(
1.0, float(tf.reduce_mean(time_step.reward)), delta=1e-2)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
from alf.utils.common import set_per_process_memory_growth
set_per_process_memory_growth()
tf.test.main()
| [] | [] | [
"SKIP_LONG_TIME_COST_TESTS"
] | [] | ["SKIP_LONG_TIME_COST_TESTS"] | python | 1 | 0 | |
pkg/disk/nodeserver.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package disk
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/kubernetes/pkg/util/resizefs"
utilexec "k8s.io/utils/exec"
k8smount "k8s.io/utils/mount"
"github.com/kubernetes-sigs/alibaba-cloud-csi-driver/pkg/utils"
)
type nodeServer struct {
zone string
maxVolumesPerNode int64
nodeID string
mounter utils.Mounter
k8smounter k8smount.Interface
clientSet *kubernetes.Clientset
*csicommon.DefaultNodeServer
}
const (
// DiskStatusInuse disk inuse status
DiskStatusInuse = "In_use"
// DiskStatusAttaching disk attaching status
DiskStatusAttaching = "Attaching"
// DiskStatusAvailable disk available status
DiskStatusAvailable = "Available"
// DiskStatusAttached disk attached status
DiskStatusAttached = "attached"
// DiskStatusDetached disk detached status
DiskStatusDetached = "detached"
// SharedEnable tag
SharedEnable = "shared"
// SysConfigTag tag
SysConfigTag = "sysConfig"
// MkfsOptions tag
MkfsOptions = "mkfsOptions"
// DiskTagedByPlugin tag
DiskTagedByPlugin = "DISK_TAGED_BY_PLUGIN"
// DiskMetricByPlugin tag
DiskMetricByPlugin = "DISK_METRIC_BY_PLUGIN"
// DiskDetachDisable tag
DiskDetachDisable = "DISK_DETACH_DISABLE"
// DiskBdfEnable tag
DiskBdfEnable = "DISK_BDF_ENABLE"
// DiskDetachBeforeDelete tag
DiskDetachBeforeDelete = "DISK_DETACH_BEFORE_DELETE"
// DiskAttachByController tag
DiskAttachByController = "DISK_AD_CONTROLLER"
// DiskAttachedKey attached key
DiskAttachedKey = "k8s.aliyun.com"
// DiskAttachedValue attached value
DiskAttachedValue = "true"
// VolumeDir volume dir
VolumeDir = "/host/etc/kubernetes/volumes/disk/"
// VolumeDirRemove volume dir remove
VolumeDirRemove = "/host/etc/kubernetes/volumes/disk/remove"
// MixRunTimeMode support both runc and runv
MixRunTimeMode = "runc-runv"
// RunvRunTimeMode tag
RunvRunTimeMode = "runv"
// InputOutputErr tag
InputOutputErr = "input/output error"
// BLOCKVOLUMEPREFIX block volume mount prefix
BLOCKVOLUMEPREFIX = "/var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/publish"
)
// QueryResponse response struct for query server
type QueryResponse struct {
device string
volumeType string
identity string
mountfile string
runtime string
}
// NewNodeServer creates node server
func NewNodeServer(d *csicommon.CSIDriver, c *ecs.Client) csi.NodeServer {
var maxVolumesNum int64 = 15
volumeNum := os.Getenv("MAX_VOLUMES_PERNODE")
if "" != volumeNum {
num, err := strconv.ParseInt(volumeNum, 10, 64)
if err != nil {
log.Fatalf("NewNodeServer: MAX_VOLUMES_PERNODE must be int64, but get: %s", volumeNum)
} else {
if num < 0 || num > 15 {
log.Errorf("NewNodeServer: MAX_VOLUMES_PERNODE must between 0-15, but get: %s", volumeNum)
} else {
maxVolumesNum = num
log.Infof("NewNodeServer: MAX_VOLUMES_PERNODE is set to(not default): %d", maxVolumesNum)
}
}
} else {
log.Infof("NewNodeServer: MAX_VOLUMES_PERNODE is set to(default): %d", maxVolumesNum)
}
doc, err := getInstanceDoc()
if err != nil {
log.Fatalf("Error happens to get node document: %v", err)
}
cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
if err != nil {
log.Fatalf("Error building kubeconfig: %s", err.Error())
}
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
log.Fatalf("Error building kubernetes clientset: %s", err.Error())
}
// Create Directory
os.MkdirAll(VolumeDir, os.FileMode(0755))
os.MkdirAll(VolumeDirRemove, os.FileMode(0755))
if IsVFNode() {
log.Infof("Currently node is VF model")
} else {
log.Infof("Currently node is NOT VF model")
}
return &nodeServer{
zone: doc.ZoneID,
maxVolumesPerNode: maxVolumesNum,
nodeID: doc.InstanceID,
DefaultNodeServer: csicommon.NewDefaultNodeServer(d),
mounter: utils.NewMounter(),
k8smounter: k8smount.New(""),
clientSet: kubeClient,
}
}
func (ns *nodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
// currently there is a single NodeServer capability according to the spec
nscap := &csi.NodeServiceCapability{
Type: &csi.NodeServiceCapability_Rpc{
Rpc: &csi.NodeServiceCapability_RPC{
Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
},
},
}
nscap2 := &csi.NodeServiceCapability{
Type: &csi.NodeServiceCapability_Rpc{
Rpc: &csi.NodeServiceCapability_RPC{
Type: csi.NodeServiceCapability_RPC_EXPAND_VOLUME,
},
},
}
nscap3 := &csi.NodeServiceCapability{
Type: &csi.NodeServiceCapability_Rpc{
Rpc: &csi.NodeServiceCapability_RPC{
Type: csi.NodeServiceCapability_RPC_GET_VOLUME_STATS,
},
},
}
// Disk Metric enable config
nodeSvcCap := []*csi.NodeServiceCapability{nscap, nscap2}
if GlobalConfigVar.MetricEnable {
nodeSvcCap = []*csi.NodeServiceCapability{nscap, nscap2, nscap3}
}
return &csi.NodeGetCapabilitiesResponse{
Capabilities: nodeSvcCap,
}, nil
}
// csi disk driver: bind directory from global to pod.
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
// check target mount path
sourcePath := req.StagingTargetPath
// running in runc/runv mode
if GlobalConfigVar.RunTimeClass == MixRunTimeMode {
if runtime, err := utils.GetPodRunTime(req, ns.clientSet); err != nil {
return nil, status.Errorf(codes.Internal, "NodePublishVolume: cannot get pod runtime: %v", err)
} else if runtime == RunvRunTimeMode {
log.Infof("NodePublishVolume:: Kata Disk Volume %s Mount with: %v", req.VolumeId, req)
// umount the stage path, which is mounted in Stage
if err := ns.unmountStageTarget(sourcePath); err != nil {
log.Errorf("NodePublishVolume(runv): unmountStageTarget %s with error: %s", sourcePath, err.Error())
return nil, status.Error(codes.InvalidArgument, "NodePublishVolume: unmountStageTarget "+sourcePath+" with error: "+err.Error())
}
deviceName, err := GetDeviceByVolumeID(req.VolumeId)
if err != nil && deviceName == "" {
deviceName = getVolumeConfig(req.VolumeId)
}
if deviceName == "" {
log.Errorf("NodePublishVolume(runv): cannot get local deviceName for volume: %s", req.VolumeId)
return nil, status.Error(codes.InvalidArgument, "NodePublishVolume: cannot get local deviceName for volume: "+req.VolumeId)
}
// save volume info to local file
mountFile := filepath.Join(req.GetTargetPath(), utils.CsiPluginRunTimeFlagFile)
if err := utils.CreateDest(req.GetTargetPath()); err != nil {
log.Errorf("NodePublishVolume(runv): Create Dest %s error: %s", req.GetTargetPath(), err.Error())
return nil, status.Error(codes.InvalidArgument, "NodePublishVolume(runv): Create Dest "+req.GetTargetPath()+" with error: "+err.Error())
}
qResponse := QueryResponse{}
qResponse.device = deviceName
qResponse.identity = req.GetTargetPath()
qResponse.volumeType = "block"
qResponse.mountfile = mountFile
qResponse.runtime = RunvRunTimeMode
if err := utils.WriteJosnFile(qResponse, mountFile); err != nil {
log.Errorf("NodePublishVolume(runv): Write Josn File error: %s", err.Error())
return nil, status.Error(codes.InvalidArgument, "NodePublishVolume(runv): Write Josn File error: "+err.Error())
}
log.Infof("NodePublishVolume:: Kata Disk Volume %s Mount Successful", req.VolumeId)
return &csi.NodePublishVolumeResponse{}, nil
}
}
isBlock := req.GetVolumeCapability().GetBlock() != nil
if isBlock {
sourcePath = filepath.Join(req.StagingTargetPath, req.VolumeId)
}
targetPath := req.GetTargetPath()
log.Infof("NodePublishVolume: Starting Mount Volume %s, source %s > target %s", req.VolumeId, sourcePath, targetPath)
if req.VolumeId == "" {
return nil, status.Error(codes.InvalidArgument, "NodePublishVolume: Volume ID must be provided")
}
if req.StagingTargetPath == "" {
return nil, status.Error(codes.InvalidArgument, "NodePublishVolume: Staging Target Path must be provided")
}
if req.VolumeCapability == nil {
return nil, status.Error(codes.InvalidArgument, "NodePublishVolume: Volume Capability must be provided")
}
// check if block volume
if isBlock {
if !utils.IsMounted(targetPath) {
if err := ns.mounter.EnsureBlock(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
options := []string{"bind"}
if err := ns.mounter.MountBlock(sourcePath, targetPath, options...); err != nil {
return nil, err
}
}
log.Infof("NodePublishVolume: Mount Successful Block Volume: %s, from source %s to target %v", req.VolumeId, sourcePath, targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
if !strings.HasSuffix(targetPath, "/mount") {
return nil, status.Errorf(codes.InvalidArgument, "NodePublishVolume: volume %s malformed the value of target path: %s", req.VolumeId, targetPath)
}
if err := ns.mounter.EnsureFolder(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
notmounted, err := ns.k8smounter.IsLikelyNotMountPoint(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if !notmounted {
log.Infof("NodePublishVolume: VolumeId: %s, Path %s is already mounted", req.VolumeId, targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
sourceNotMounted, err := ns.k8smounter.IsLikelyNotMountPoint(sourcePath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if sourceNotMounted {
device, _ := GetDeviceByVolumeID(req.GetVolumeId())
if device != "" {
if err := ns.mountDeviceToGlobal(req.VolumeCapability, req.VolumeContext, device, sourcePath); err != nil {
log.Errorf("NodePublishVolume: VolumeId: %s, remount disk to global %s error: %s", req.VolumeId, sourcePath, err.Error())
return nil, status.Error(codes.Internal, "NodePublishVolume: VolumeId: %s, remount disk error "+err.Error())
}
log.Infof("NodePublishVolume: SourcePath %s not mounted, and mounted again with device %s", sourcePath, device)
} else {
log.Errorf("NodePublishVolume: VolumeId: %s, sourcePath %s is Not mounted and device cannot found", req.VolumeId, sourcePath)
return nil, status.Error(codes.Internal, "NodePublishVolume: VolumeId: %s, sourcePath %s is Not mounted "+sourcePath)
}
}
// start to mount
mnt := req.VolumeCapability.GetMount()
options := append(mnt.MountFlags, "bind")
if req.Readonly {
options = append(options, "ro")
}
fsType := "ext4"
if mnt.FsType != "" {
fsType = mnt.FsType
}
// check device name available
expectName := GetVolumeDeviceName(req.VolumeId)
realDevice := GetDeviceByMntPoint(sourcePath)
if realDevice == "" {
opts := append(mnt.MountFlags, "shared")
if err := ns.k8smounter.Mount(expectName, sourcePath, fsType, opts); err != nil {
log.Errorf("NodePublishVolume: mount source error: %s, %s, %s", expectName, sourcePath, err.Error())
return nil, status.Error(codes.Internal, "NodePublishVolume: mount source error: "+expectName+", "+sourcePath+", "+err.Error())
}
realDevice = GetDeviceByMntPoint(sourcePath)
}
if expectName != realDevice || realDevice == "" {
log.Errorf("NodePublishVolume: Volume: %s, sourcePath: %s real Device: %s not same with expected: %s", req.VolumeId, sourcePath, realDevice, expectName)
return nil, status.Error(codes.Internal, "NodePublishVolume: sourcePath: "+sourcePath+" real Device: "+realDevice+" not same with Saved: "+expectName)
}
log.Infof("NodePublishVolume: Starting mount volume %s with flags %v and fsType %s", req.VolumeId, options, fsType)
if err = ns.k8smounter.Mount(sourcePath, targetPath, fsType, options); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
log.Infof("NodePublishVolume: Mount Successful Volume: %s, from source %s to target %v", req.VolumeId, sourcePath, targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
targetPath := req.GetTargetPath()
log.Infof("NodeUnpublishVolume: Starting to Unmount Volume %s, Target %v", req.VolumeId, targetPath)
// Step 1: check folder exists
if !IsFileExisting(targetPath) {
if err := ns.unmountDuplicateMountPoint(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
log.Infof("NodeUnpublishVolume: Volume %s Folder %s doesn't exist", req.VolumeId, targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil
}
// check runtime mode
if GlobalConfigVar.RunTimeClass == MixRunTimeMode && utils.IsMountPointRunv(targetPath) {
fileName := filepath.Join(targetPath, utils.CsiPluginRunTimeFlagFile)
if err := os.Remove(fileName); err != nil {
msg := fmt.Sprintf("NodeUnpublishVolume: Remove Runv File %s with error: %s", fileName, err.Error())
return nil, status.Error(codes.InvalidArgument, msg)
}
log.Infof("NodeUnpublishVolume(runv): Remove Runv File Successful: %s", fileName)
return &csi.NodeUnpublishVolumeResponse{}, nil
}
// Step 2: check mount point
notmounted, err := ns.k8smounter.IsLikelyNotMountPoint(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if notmounted {
if empty, _ := IsDirEmpty(targetPath); empty {
if err := ns.unmountDuplicateMountPoint(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
log.Infof("NodeUnpublishVolume: %s is unmounted and empty", targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil
}
// Block device
if !utils.IsDir(targetPath) && strings.HasPrefix(targetPath, BLOCKVOLUMEPREFIX) {
if removeErr := os.Remove(targetPath); removeErr != nil {
return nil, status.Errorf(codes.Internal, "Could not remove mount block target %s: %v", targetPath, removeErr)
}
return &csi.NodeUnpublishVolumeResponse{}, nil
}
log.Errorf("NodeUnpublishVolume: VolumeId: %s, Path %s is unmounted, but not empty dir", req.VolumeId, targetPath)
return nil, status.Errorf(codes.Internal, "NodeUnpublishVolume: VolumeId: %s, Path %s is unmounted, but not empty dir", req.VolumeId, targetPath)
}
// Step 3: umount target path
err = ns.k8smounter.Unmount(targetPath)
if err != nil {
log.Errorf("NodeUnpublishVolume: volumeId: %s, umount path: %s with error: %s", req.VolumeId, targetPath, err.Error())
return nil, status.Error(codes.Internal, err.Error())
}
if utils.IsMounted(targetPath) {
log.Errorf("NodeUnpublishVolume: TargetPath mounted yet: volumeId: %s with target %s", req.VolumeId, targetPath)
return nil, status.Error(codes.Internal, "NodeUnpublishVolume: TargetPath mounted yet with target"+targetPath)
}
// below directory can not be umounted by kubelet in ack
if err := ns.unmountDuplicateMountPoint(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
log.Infof("NodeUnpublishVolume: Umount Successful for volume %s, target %v", req.VolumeId, targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil
}
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
log.Infof("NodeStageVolume: Stage VolumeId: %s, Target Path: %s, VolumeContext: %v", req.GetVolumeId(), req.StagingTargetPath, req.VolumeContext)
// Step 1: check input parameters
targetPath := req.StagingTargetPath
if req.VolumeId == "" {
return nil, status.Error(codes.InvalidArgument, "NodeStageVolume Volume ID must be provided")
}
// targetPath format: /var/lib/kubelet/plugins/kubernetes.io/csi/pv/pv-disk-1e7001e0-c54a-11e9-8f89-00163e0e78a0/globalmount
if targetPath == "" {
return nil, status.Error(codes.InvalidArgument, "NodeStageVolume Staging Target Path must be provided")
}
if req.VolumeCapability == nil {
return nil, status.Error(codes.InvalidArgument, "NodeStageVolume Volume Capability must be provided")
}
isBlock := req.GetVolumeCapability().GetBlock() != nil
if isBlock {
targetPath = filepath.Join(targetPath, req.VolumeId)
if utils.IsMounted(targetPath) {
log.Infof("NodeStageVolume: Block Already Mounted: volumeId: %s target %s", req.VolumeId, targetPath)
return &csi.NodeStageVolumeResponse{}, nil
}
if err := ns.mounter.EnsureBlock(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
} else {
if err := ns.mounter.EnsureFolder(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
// Step 2: check target path mounted
notmounted, err := ns.k8smounter.IsLikelyNotMountPoint(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if !notmounted {
deviceName := GetDeviceByMntPoint(targetPath)
if err := checkDeviceAvailable(deviceName); err != nil {
log.Errorf("NodeStageVolume: mountPath is mounted %s, but check device available error: %s", targetPath, err.Error())
return nil, status.Error(codes.Internal, err.Error())
}
log.Infof("NodeStageVolume: volumeId: %s, Path: %s is already mounted, device: %s", req.VolumeId, targetPath, deviceName)
return &csi.NodeStageVolumeResponse{}, nil
}
device := ""
isSharedDisk := false
if value, ok := req.VolumeContext[SharedEnable]; ok {
value = strings.ToLower(value)
if value == "enable" || value == "true" || value == "yes" {
isSharedDisk = true
}
}
// Step 4 Attach volume
if GlobalConfigVar.ADControllerEnable {
var bdf string
device, err = GetDeviceByVolumeID(req.GetVolumeId())
if IsVFNode() && device == "" {
if bdf, err = bindBdfDisk(req.GetVolumeId()); err != nil {
if err := unbindBdfDisk(req.GetVolumeId()); err != nil {
return nil, status.Errorf(codes.Aborted, "NodeStageVolume: failed to detach bdf disk: %v", err)
}
return nil, status.Errorf(codes.Aborted, "NodeStageVolume: failed to attach bdf disk: %v", err)
}
device, err = GetDeviceByVolumeID(req.GetVolumeId())
if bdf != "" && device == "" {
device, err = GetDeviceByBdf(bdf)
}
}
if err != nil {
log.Errorf("NodeStageVolume: ADController Enabled, but device can't be found in node: %s, error: %s", req.VolumeId, err.Error())
return nil, status.Error(codes.Aborted, "NodeStageVolume: ADController Enabled, but device can't be found:"+req.VolumeId+err.Error())
}
} else {
device, err = attachDisk(req.GetVolumeId(), ns.nodeID, isSharedDisk)
if err != nil {
log.Errorf("NodeStageVolume: Attach volume: %s with error: %s", req.VolumeId, err.Error())
return nil, err
}
}
if err := checkDeviceAvailable(device); err != nil {
log.Errorf("NodeStageVolume: Attach device with error: %s", err.Error())
return nil, status.Error(codes.Internal, err.Error())
}
if err := saveVolumeConfig(req.VolumeId, device); err != nil {
return nil, status.Error(codes.Aborted, "NodeStageVolume: saveVolumeConfig for ("+req.VolumeId+device+") error with: "+err.Error())
}
log.Infof("NodeStageVolume: Volume Successful Attached: %s, to Node: %s, Device: %s", req.VolumeId, ns.nodeID, device)
// sysConfig
if value, ok := req.VolumeContext[SysConfigTag]; ok {
configList := strings.Split(strings.TrimSpace(value), ",")
for _, configStr := range configList {
keyValue := strings.Split(configStr, "=")
if len(keyValue) == 2 {
fileName := filepath.Join("/sys/block/", filepath.Base(device), keyValue[0])
configCmd := "echo '" + keyValue[1] + "' > " + fileName
if _, err := utils.Run(configCmd); err != nil {
log.Errorf("NodeStageVolume: Volume Block System Config with cmd: %s, get error: %v", configCmd, err)
return nil, status.Error(codes.Aborted, "NodeStageVolume: Volume Block System Config with cmd:"+configCmd+", error with: "+err.Error())
}
log.Infof("NodeStageVolume: Volume Block System Config Successful with command: %s, for volume: %v", configCmd, req.VolumeId)
} else {
log.Errorf("NodeStageVolume: Volume Block System Config with format error: %s", configStr)
return nil, status.Error(codes.Aborted, "NodeStageVolume: Volume Block System Config with format error "+configStr)
}
}
}
// Block volume not need to format
if isBlock {
if utils.IsMounted(targetPath) {
log.Infof("NodeStageVolume: Block Already Mounted: volumeId: %s with target %s", req.VolumeId, targetPath)
return &csi.NodeStageVolumeResponse{}, nil
}
options := []string{"bind"}
if err := ns.mounter.MountBlock(device, targetPath, options...); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
log.Infof("NodeStageVolume: Successfully Mount Device %s to %s with options: %v", device, targetPath, options)
return &csi.NodeStageVolumeResponse{}, nil
}
// Step 5 Start to format
mnt := req.VolumeCapability.GetMount()
options := append(mnt.MountFlags, "shared")
fsType := "ext4"
if mnt.FsType != "" {
fsType = mnt.FsType
}
if err := ns.mounter.EnsureFolder(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
// Set mkfs options for ext3, ext4
mkfsOptions := make([]string, 0)
if value, ok := req.VolumeContext[MkfsOptions]; ok {
mkfsOptions = strings.Split(value, " ")
}
// do format-mount or mount
diskMounter := &k8smount.SafeFormatAndMount{Interface: ns.k8smounter, Exec: utilexec.New()}
if len(mkfsOptions) > 0 && (fsType == "ext4" || fsType == "ext3") {
if err := formatAndMount(diskMounter, device, targetPath, fsType, mkfsOptions, options); err != nil {
log.Errorf("Mountdevice: FormatAndMount fail with mkfsOptions %s, %s, %s, %s, %s with error: %s", device, targetPath, fsType, mkfsOptions, options, err.Error())
return nil, status.Error(codes.Internal, err.Error())
}
} else {
if err := diskMounter.FormatAndMount(device, targetPath, fsType, options); err != nil {
log.Errorf("NodeStageVolume: Volume: %s, Device: %s, FormatAndMount error: %s", req.VolumeId, device, err.Error())
return nil, status.Error(codes.Internal, err.Error())
}
}
log.Infof("NodeStageVolume: Mount Successful: volumeId: %s target %v, device: %s, mkfsOptions: %v", req.VolumeId, targetPath, device, mkfsOptions)
return &csi.NodeStageVolumeResponse{}, nil
}
// target format: /var/lib/kubelet/plugins/kubernetes.io/csi/pv/pv-disk-1e7001e0-c54a-11e9-8f89-00163e0e78a0/globalmount
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
log.Infof("NodeUnstageVolume:: Starting to Unmount volume, volumeId: %s, target: %v", req.VolumeId, req.StagingTargetPath)
if req.VolumeId == "" {
return nil, status.Error(codes.InvalidArgument, "NodeUnstageVolume Volume ID must be provided")
}
if req.StagingTargetPath == "" {
return nil, status.Error(codes.InvalidArgument, "NodeUnstageVolume Staging Target Path must be provided")
}
// check block device mountpoint
targetPath := req.GetStagingTargetPath()
tmpPath := filepath.Join(req.GetStagingTargetPath(), req.VolumeId)
if IsFileExisting(tmpPath) {
fileInfo, err := os.Lstat(tmpPath)
if err != nil {
if strings.Contains(strings.ToLower(err.Error()), InputOutputErr) {
if err = isPathAvailiable(targetPath); err != nil {
if err = utils.Umount(targetPath); err != nil {
return nil, status.Errorf(codes.InvalidArgument, "NodeUnstageVolume umount target %s with errror: %v", targetPath, err)
}
log.Warnf("NodeUnstageVolume: target path %s show input/output error: %v, umount it.", targetPath, err)
}
} else {
log.Errorf("NodeUnstageVolume: lstat mountpoint: %s with error: %s", tmpPath, err.Error())
return nil, status.Error(codes.InvalidArgument, "NodeUnstageVolume: stat mountpoint error: "+err.Error())
}
} else if (fileInfo.Mode() & os.ModeDevice) != 0 {
log.Infof("NodeUnstageVolume: mountpoint %s, is block device", tmpPath)
targetPath = tmpPath
}
}
// Step 1: check folder exists and umount
msgLog := ""
if IsFileExisting(targetPath) {
notmounted, err := ns.k8smounter.IsLikelyNotMountPoint(targetPath)
if err != nil {
log.Errorf("NodeUnstageVolume: VolumeId: %s, check mountPoint: %s mountpoint error: %v", req.VolumeId, targetPath, err)
return nil, status.Error(codes.Internal, err.Error())
}
if !notmounted {
err = ns.k8smounter.Unmount(targetPath)
if err != nil {
log.Errorf("NodeUnstageVolume: VolumeId: %s, umount path: %s failed with: %v", req.VolumeId, targetPath, err)
return nil, status.Error(codes.Internal, err.Error())
}
if utils.IsMounted(targetPath) {
log.Errorf("NodeUnstageVolume: TargetPath mounted yet: volumeId: %s with target %s", req.VolumeId, targetPath)
return nil, status.Error(codes.Internal, "NodeUnstageVolume: TargetPath mounted yet with target"+targetPath)
}
} else {
msgLog = fmt.Sprintf("NodeUnstageVolume: VolumeId: %s, mountpoint: %s not mounted, skipping and continue to detach", req.VolumeId, targetPath)
}
// safe remove mountpoint
err = ns.mounter.SafePathRemove(targetPath)
if err != nil {
log.Errorf("NodeUnstageVolume: VolumeId: %s, Remove targetPath failed, target %v", req.VolumeId, targetPath)
return nil, status.Error(codes.Internal, err.Error())
}
} else {
msgLog = fmt.Sprintf("NodeUnstageVolume: VolumeId: %s, Path %s doesn't exist, continue to detach", req.VolumeId, targetPath)
}
if msgLog == "" {
log.Infof("NodeUnstageVolume: Unmount TargetPath successful, target %v, volumeId: %s", targetPath, req.VolumeId)
} else {
log.Infof(msgLog)
}
if IsVFNode() {
if err := unbindBdfDisk(req.VolumeId); err != nil {
log.Errorf("NodeUnstageVolume: unbind bdf disk error: %v", err)
return nil, err
}
}
// Do detach if ADController disable
if !GlobalConfigVar.ADControllerEnable {
// if DetachDisabled is set to true, return
if GlobalConfigVar.DetachDisabled {
log.Infof("NodeUnstageVolume: ADController is Disable, Detach Flag Set to false, PV %s", req.VolumeId)
return &csi.NodeUnstageVolumeResponse{}, nil
}
err := detachDisk(req.VolumeId, ns.nodeID)
if err != nil {
log.Errorf("NodeUnstageVolume: VolumeId: %s, Detach failed with error %v", req.VolumeId, err.Error())
return nil, err
}
removeVolumeConfig(req.VolumeId)
}
return &csi.NodeUnstageVolumeResponse{}, nil
}
func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
return &csi.NodeGetInfoResponse{
NodeId: ns.nodeID,
MaxVolumesPerNode: ns.maxVolumesPerNode,
// make sure that the driver works on this particular zone only
AccessibleTopology: &csi.Topology{
Segments: map[string]string{
TopologyZoneKey: ns.zone,
},
},
}, nil
}
func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (
*csi.NodeExpandVolumeResponse, error) {
log.Infof("NodeExpandVolume: node expand volume: %v", req)
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID is empty")
}
if len(req.GetVolumePath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume path is empty")
}
volumePath := req.GetVolumePath()
diskID := req.GetVolumeId()
if strings.Contains(volumePath, BLOCKVOLUMEPREFIX) {
log.Infof("NodeExpandVolume:: Block Volume not Expand FS, volumeId: %s, volumePath: %s", diskID, volumePath)
return &csi.NodeExpandVolumeResponse{}, nil
}
devicePath := GetVolumeDeviceName(diskID)
if devicePath == "" {
log.Errorf("NodeExpandVolume:: can't get devicePath: %s", diskID)
return nil, status.Error(codes.InvalidArgument, "can't get devicePath for "+diskID)
}
log.Infof("NodeExpandVolume:: volumeId: %s, devicePath: %s, volumePath: %s", diskID, devicePath, volumePath)
// use resizer to expand volume filesystem
resizer := resizefs.NewResizeFs(&k8smount.SafeFormatAndMount{Interface: ns.k8smounter, Exec: utilexec.New()})
ok, err := resizer.Resize(devicePath, volumePath)
if err != nil {
log.Errorf("NodeExpandVolume:: Resize Error, volumeId: %s, devicePath: %s, volumePath: %s, err: %s", diskID, devicePath, volumePath, err.Error())
return nil, status.Error(codes.Internal, err.Error())
}
if !ok {
log.Errorf("NodeExpandVolume:: Resize failed, volumeId: %s, devicePath: %s, volumePath: %s", diskID, devicePath, volumePath)
return nil, status.Error(codes.Internal, "Fail to resize volume fs")
}
log.Infof("NodeExpandVolume:: resizefs successful volumeId: %s, devicePath: %s, volumePath: %s", diskID, devicePath, volumePath)
return &csi.NodeExpandVolumeResponse{}, nil
}
// NodeGetVolumeStats used for csi metrics
func (ns *nodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {
var err error
targetPath := req.GetVolumePath()
if targetPath == "" {
err = fmt.Errorf("NodeGetVolumeStats targetpath %v is empty", targetPath)
return nil, status.Error(codes.InvalidArgument, err.Error())
}
return utils.GetMetrics(targetPath)
}
// umount path and not remove
func (ns *nodeServer) unmountStageTarget(targetPath string) error {
msgLog := "UnmountStageTarget: Unmount Stage Target: " + targetPath
if IsFileExisting(targetPath) {
notmounted, err := ns.k8smounter.IsLikelyNotMountPoint(targetPath)
if err != nil {
log.Errorf("unmountStageTarget: check mountPoint: %s mountpoint error: %v", targetPath, err)
return status.Error(codes.Internal, err.Error())
}
if !notmounted {
err = ns.k8smounter.Unmount(targetPath)
if err != nil {
log.Errorf("unmountStageTarget: umount path: %s failed with: %v", targetPath, err)
return status.Error(codes.Internal, err.Error())
}
} else {
msgLog = fmt.Sprintf("unmountStageTarget: umount %s Successful", targetPath)
}
} else {
msgLog = fmt.Sprintf("unmountStageTarget: Path %s doesn't exist", targetPath)
}
log.Infof(msgLog)
return nil
}
func (ns *nodeServer) mountDeviceToGlobal(capability *csi.VolumeCapability, volumeContext map[string]string, device, sourcePath string) error {
mnt := capability.GetMount()
options := append(mnt.MountFlags, "shared")
fsType := "ext4"
if mnt.FsType != "" {
fsType = mnt.FsType
}
if err := ns.mounter.EnsureFolder(sourcePath); err != nil {
return status.Error(codes.Internal, err.Error())
}
// Set mkfs options for ext3, ext4
mkfsOptions := make([]string, 0)
if value, ok := volumeContext[MkfsOptions]; ok {
mkfsOptions = strings.Split(value, " ")
}
// do format-mount or mount
diskMounter := &k8smount.SafeFormatAndMount{Interface: ns.k8smounter, Exec: utilexec.New()}
if len(mkfsOptions) > 0 && (fsType == "ext4" || fsType == "ext3") {
if err := formatAndMount(diskMounter, device, sourcePath, fsType, mkfsOptions, options); err != nil {
log.Errorf("mountDeviceToGlobal: FormatAndMount fail with mkfsOptions %s, %s, %s, %s, %s with error: %s", device, sourcePath, fsType, mkfsOptions, options, err.Error())
return status.Error(codes.Internal, err.Error())
}
} else {
if err := diskMounter.FormatAndMount(device, sourcePath, fsType, options); err != nil {
log.Errorf("mountDeviceToGlobal: Device: %s, FormatAndMount error: %s", device, err.Error())
return status.Error(codes.Internal, err.Error())
}
}
return nil
}
func (ns *nodeServer) unmountDuplicateMountPoint(targetPath string) error {
pathParts := strings.Split(targetPath, "/")
partsLen := len(pathParts)
if partsLen > 2 && pathParts[partsLen-1] == "mount" {
globalPath2 := filepath.Join("/var/lib/container/kubelet/plugins/kubernetes.io/csi/pv/", pathParts[partsLen-2], "/globalmount")
if utils.IsFileExisting(globalPath2) {
// check globalPath2 is mountpoint
notmounted, err := ns.k8smounter.IsLikelyNotMountPoint(globalPath2)
if err == nil && !notmounted {
// check device is used by others
refs, err := ns.k8smounter.GetMountRefs(globalPath2)
if err == nil && !ns.mounter.HasMountRefs(globalPath2, refs) {
log.Infof("NodeUnpublishVolume: VolumeId Unmount global path %s for ack with kubelet data disk", globalPath2)
if err := utils.Umount(globalPath2); err != nil {
log.Errorf("NodeUnpublishVolume: volumeId: unmount global path %s failed with err: %v", globalPath2, err)
return status.Error(codes.Internal, err.Error())
}
} else {
log.Infof("Global Path %s is mounted by others: %v", globalPath2, refs)
}
} else {
log.Warnf("Global Path is not mounted: %s", globalPath2)
}
}
} else {
log.Warnf("Target Path is illegal format: %s", targetPath)
}
return nil
}
| [
"\"MAX_VOLUMES_PERNODE\""
] | [] | [
"MAX_VOLUMES_PERNODE"
] | [] | ["MAX_VOLUMES_PERNODE"] | go | 1 | 0 | |
cmd/integrationArtifactGetServiceEndpoint_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type integrationArtifactGetServiceEndpointOptions struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
IntegrationFlowID string `json:"integrationFlowId,omitempty"`
Platform string `json:"platform,omitempty"`
Host string `json:"host,omitempty"`
OAuthTokenProviderURL string `json:"oAuthTokenProviderUrl,omitempty"`
}
type integrationArtifactGetServiceEndpointCommonPipelineEnvironment struct {
custom struct {
iFlowServiceEndpoint string
}
}
func (p *integrationArtifactGetServiceEndpointCommonPipelineEnvironment) persist(path, resourceName string) {
content := []struct {
category string
name string
value interface{}
}{
{category: "custom", name: "iFlowServiceEndpoint", value: p.custom.iFlowServiceEndpoint},
}
errCount := 0
for _, param := range content {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(param.category, param.name), param.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting piper environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Fatal("failed to persist Piper environment")
}
}
// IntegrationArtifactGetServiceEndpointCommand Get an deployed CPI intgeration flow service endpoint
func IntegrationArtifactGetServiceEndpointCommand() *cobra.Command {
const STEP_NAME = "integrationArtifactGetServiceEndpoint"
metadata := integrationArtifactGetServiceEndpointMetadata()
var stepConfig integrationArtifactGetServiceEndpointOptions
var startTime time.Time
var commonPipelineEnvironment integrationArtifactGetServiceEndpointCommonPipelineEnvironment
var logCollector *log.CollectorHook
var createIntegrationArtifactGetServiceEndpointCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Get an deployed CPI intgeration flow service endpoint",
Long: `With this step you can obtain information about the service endpoints exposed by SAP Cloud Platform Integration on a tenant using OData API. Learn more about the SAP Cloud Integration remote API for getting service endpoint of deployed integration artifact [here](https://help.sap.com/viewer/368c481cd6954bdfa5d0435479fd4eaf/Cloud/en-US/d1679a80543f46509a7329243b595bdb.html).`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
commonPipelineEnvironment.persist(GeneralConfig.EnvRootPath, "commonPipelineEnvironment")
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Send(&telemetryData, logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
integrationArtifactGetServiceEndpoint(stepConfig, &telemetryData, &commonPipelineEnvironment)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addIntegrationArtifactGetServiceEndpointFlags(createIntegrationArtifactGetServiceEndpointCmd, &stepConfig)
return createIntegrationArtifactGetServiceEndpointCmd
}
func addIntegrationArtifactGetServiceEndpointFlags(cmd *cobra.Command, stepConfig *integrationArtifactGetServiceEndpointOptions) {
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User to authenticate to the SAP Cloud Platform Integration Service")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password to authenticate to the SAP Cloud Platform Integration Service")
cmd.Flags().StringVar(&stepConfig.IntegrationFlowID, "integrationFlowId", os.Getenv("PIPER_integrationFlowId"), "Specifies the ID of the Integration Flow artifact")
cmd.Flags().StringVar(&stepConfig.Platform, "platform", os.Getenv("PIPER_platform"), "Specifies the running platform of the SAP Cloud platform integraion service")
cmd.Flags().StringVar(&stepConfig.Host, "host", os.Getenv("PIPER_host"), "Specifies the protocol and host address, including the port. Please provide in the format `<protocol>://<host>:<port>`. Supported protocols are `http` and `https`.")
cmd.Flags().StringVar(&stepConfig.OAuthTokenProviderURL, "oAuthTokenProviderUrl", os.Getenv("PIPER_oAuthTokenProviderUrl"), "Specifies the oAuth Provider protocol and host address, including the port. Please provide in the format `<protocol>://<host>:<port>`. Supported protocols are `http` and `https`.")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("integrationFlowId")
cmd.MarkFlagRequired("host")
cmd.MarkFlagRequired("oAuthTokenProviderUrl")
}
// retrieve step metadata
func integrationArtifactGetServiceEndpointMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "integrationArtifactGetServiceEndpoint",
Aliases: []config.Alias{},
Description: "Get an deployed CPI intgeration flow service endpoint",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "username",
ResourceRef: []config.ResourceReference{
{
Name: "cpiCredentialsId",
Param: "username",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "password",
ResourceRef: []config.ResourceReference{
{
Name: "cpiCredentialsId",
Param: "password",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "integrationFlowId",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "platform",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "host",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "oAuthTokenProviderUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
},
},
Outputs: config.StepOutputs{
Resources: []config.StepResources{
{
Name: "commonPipelineEnvironment",
Type: "piperEnvironment",
Parameters: []map[string]interface{}{
{"Name": "custom/iFlowServiceEndpoint"},
},
},
},
},
},
}
return theMetaData
}
| [
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_integrationFlowId\"",
"\"PIPER_platform\"",
"\"PIPER_host\"",
"\"PIPER_oAuthTokenProviderUrl\""
] | [] | [
"PIPER_oAuthTokenProviderUrl",
"PIPER_host",
"PIPER_integrationFlowId",
"PIPER_password",
"PIPER_username",
"PIPER_platform"
] | [] | ["PIPER_oAuthTokenProviderUrl", "PIPER_host", "PIPER_integrationFlowId", "PIPER_password", "PIPER_username", "PIPER_platform"] | go | 6 | 0 | |
tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1beta1.services.dataset_service import (
DatasetServiceAsyncClient,
)
from google.cloud.aiplatform_v1beta1.services.dataset_service import (
DatasetServiceClient,
)
from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers
from google.cloud.aiplatform_v1beta1.services.dataset_service import transports
from google.cloud.aiplatform_v1beta1.services.dataset_service.transports.base import (
_API_CORE_VERSION,
)
from google.cloud.aiplatform_v1beta1.services.dataset_service.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.aiplatform_v1beta1.types import annotation
from google.cloud.aiplatform_v1beta1.types import annotation_spec
from google.cloud.aiplatform_v1beta1.types import data_item
from google.cloud.aiplatform_v1beta1.types import dataset
from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset
from google.cloud.aiplatform_v1beta1.types import dataset_service
from google.cloud.aiplatform_v1beta1.types import encryption_spec
from google.cloud.aiplatform_v1beta1.types import io
from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-api-core >= 1.26.0 is required:
# - Delete all the api-core and auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
requires_api_core_lt_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) >= packaging.version.parse("1.26.0"),
reason="This test requires google-api-core < 1.26.0",
)
requires_api_core_gte_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) < packaging.version.parse("1.26.0"),
reason="This test requires google-api-core >= 1.26.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert DatasetServiceClient._get_default_mtls_endpoint(None) is None
assert (
DatasetServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
DatasetServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
DatasetServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
DatasetServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
DatasetServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [DatasetServiceClient, DatasetServiceAsyncClient,]
)
def test_dataset_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
@pytest.mark.parametrize(
"client_class", [DatasetServiceClient, DatasetServiceAsyncClient,]
)
def test_dataset_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_dataset_service_client_get_transport_class():
transport = DatasetServiceClient.get_transport_class()
available_transports = [
transports.DatasetServiceGrpcTransport,
]
assert transport in available_transports
transport = DatasetServiceClient.get_transport_class("grpc")
assert transport == transports.DatasetServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"),
(
DatasetServiceAsyncClient,
transports.DatasetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
DatasetServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatasetServiceClient),
)
@mock.patch.object(
DatasetServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatasetServiceAsyncClient),
)
def test_dataset_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(DatasetServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(DatasetServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "true"),
(
DatasetServiceAsyncClient,
transports.DatasetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "false"),
(
DatasetServiceAsyncClient,
transports.DatasetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
DatasetServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatasetServiceClient),
)
@mock.patch.object(
DatasetServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatasetServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_dataset_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"),
(
DatasetServiceAsyncClient,
transports.DatasetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_dataset_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"),
(
DatasetServiceAsyncClient,
transports.DatasetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_dataset_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_dataset_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = DatasetServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_create_dataset(
transport: str = "grpc", request_type=dataset_service.CreateDatasetRequest
):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.CreateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_dataset_from_dict():
test_create_dataset(request_type=dict)
def test_create_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
client.create_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.CreateDatasetRequest()
@pytest.mark.asyncio
async def test_create_dataset_async(
transport: str = "grpc_asyncio", request_type=dataset_service.CreateDatasetRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.CreateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_dataset_async_from_dict():
await test_create_dataset_async(request_type=dict)
def test_create_dataset_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.CreateDatasetRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_dataset_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.CreateDatasetRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_dataset_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_dataset(
parent="parent_value", dataset=gca_dataset.Dataset(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].dataset == gca_dataset.Dataset(name="name_value")
def test_create_dataset_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_dataset(
dataset_service.CreateDatasetRequest(),
parent="parent_value",
dataset=gca_dataset.Dataset(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_dataset_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_dataset(
parent="parent_value", dataset=gca_dataset.Dataset(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].dataset == gca_dataset.Dataset(name="name_value")
@pytest.mark.asyncio
async def test_create_dataset_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_dataset(
dataset_service.CreateDatasetRequest(),
parent="parent_value",
dataset=gca_dataset.Dataset(name="name_value"),
)
def test_get_dataset(
transport: str = "grpc", request_type=dataset_service.GetDatasetRequest
):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset(
name="name_value",
display_name="display_name_value",
metadata_schema_uri="metadata_schema_uri_value",
etag="etag_value",
)
response = client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.GetDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.Dataset)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.metadata_schema_uri == "metadata_schema_uri_value"
assert response.etag == "etag_value"
def test_get_dataset_from_dict():
test_get_dataset(request_type=dict)
def test_get_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
client.get_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.GetDatasetRequest()
@pytest.mark.asyncio
async def test_get_dataset_async(
transport: str = "grpc_asyncio", request_type=dataset_service.GetDatasetRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset.Dataset(
name="name_value",
display_name="display_name_value",
metadata_schema_uri="metadata_schema_uri_value",
etag="etag_value",
)
)
response = await client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.GetDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.Dataset)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.metadata_schema_uri == "metadata_schema_uri_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_get_dataset_async_from_dict():
await test_get_dataset_async(request_type=dict)
def test_get_dataset_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.GetDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
call.return_value = dataset.Dataset()
client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_dataset_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.GetDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset())
await client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_dataset_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_dataset(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_dataset_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_dataset(
dataset_service.GetDatasetRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_dataset_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_dataset(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_dataset_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_dataset(
dataset_service.GetDatasetRequest(), name="name_value",
)
def test_update_dataset(
transport: str = "grpc", request_type=dataset_service.UpdateDatasetRequest
):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset(
name="name_value",
display_name="display_name_value",
metadata_schema_uri="metadata_schema_uri_value",
etag="etag_value",
)
response = client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.UpdateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_dataset.Dataset)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.metadata_schema_uri == "metadata_schema_uri_value"
assert response.etag == "etag_value"
def test_update_dataset_from_dict():
test_update_dataset(request_type=dict)
def test_update_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
client.update_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.UpdateDatasetRequest()
@pytest.mark.asyncio
async def test_update_dataset_async(
transport: str = "grpc_asyncio", request_type=dataset_service.UpdateDatasetRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_dataset.Dataset(
name="name_value",
display_name="display_name_value",
metadata_schema_uri="metadata_schema_uri_value",
etag="etag_value",
)
)
response = await client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.UpdateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_dataset.Dataset)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.metadata_schema_uri == "metadata_schema_uri_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_update_dataset_async_from_dict():
await test_update_dataset_async(request_type=dict)
def test_update_dataset_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.UpdateDatasetRequest()
request.dataset.name = "dataset.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
call.return_value = gca_dataset.Dataset()
client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "dataset.name=dataset.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_dataset_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.UpdateDatasetRequest()
request.dataset.name = "dataset.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset())
await client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "dataset.name=dataset.name/value",) in kw[
"metadata"
]
def test_update_dataset_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_dataset(
dataset=gca_dataset.Dataset(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].dataset == gca_dataset.Dataset(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
def test_update_dataset_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_dataset(
dataset_service.UpdateDatasetRequest(),
dataset=gca_dataset.Dataset(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_dataset_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_dataset(
dataset=gca_dataset.Dataset(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].dataset == gca_dataset.Dataset(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_update_dataset_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_dataset(
dataset_service.UpdateDatasetRequest(),
dataset=gca_dataset.Dataset(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_list_datasets(
transport: str = "grpc", request_type=dataset_service.ListDatasetsRequest
):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListDatasetsResponse(
next_page_token="next_page_token_value",
)
response = client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListDatasetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDatasetsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_datasets_from_dict():
test_list_datasets(request_type=dict)
def test_list_datasets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
client.list_datasets()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListDatasetsRequest()
@pytest.mark.asyncio
async def test_list_datasets_async(
transport: str = "grpc_asyncio", request_type=dataset_service.ListDatasetsRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListDatasetsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListDatasetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDatasetsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_datasets_async_from_dict():
await test_list_datasets_async(request_type=dict)
def test_list_datasets_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ListDatasetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
call.return_value = dataset_service.ListDatasetsResponse()
client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_datasets_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ListDatasetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListDatasetsResponse()
)
await client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_datasets_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListDatasetsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_datasets(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_datasets_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_datasets(
dataset_service.ListDatasetsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_datasets_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListDatasetsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListDatasetsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_datasets(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_datasets_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_datasets(
dataset_service.ListDatasetsRequest(), parent="parent_value",
)
def test_list_datasets_pager():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),],
next_page_token="abc",
),
dataset_service.ListDatasetsResponse(datasets=[], next_page_token="def",),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(),], next_page_token="ghi",
),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_datasets(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, dataset.Dataset) for i in results)
def test_list_datasets_pages():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),],
next_page_token="abc",
),
dataset_service.ListDatasetsResponse(datasets=[], next_page_token="def",),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(),], next_page_token="ghi",
),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(),],
),
RuntimeError,
)
pages = list(client.list_datasets(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_datasets_async_pager():
client = DatasetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_datasets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),],
next_page_token="abc",
),
dataset_service.ListDatasetsResponse(datasets=[], next_page_token="def",),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(),], next_page_token="ghi",
),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(),],
),
RuntimeError,
)
async_pager = await client.list_datasets(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, dataset.Dataset) for i in responses)
@pytest.mark.asyncio
async def test_list_datasets_async_pages():
client = DatasetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_datasets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),],
next_page_token="abc",
),
dataset_service.ListDatasetsResponse(datasets=[], next_page_token="def",),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(),], next_page_token="ghi",
),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_datasets(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_delete_dataset(
transport: str = "grpc", request_type=dataset_service.DeleteDatasetRequest
):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.DeleteDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_dataset_from_dict():
test_delete_dataset(request_type=dict)
def test_delete_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
client.delete_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.DeleteDatasetRequest()
@pytest.mark.asyncio
async def test_delete_dataset_async(
transport: str = "grpc_asyncio", request_type=dataset_service.DeleteDatasetRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.DeleteDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_dataset_async_from_dict():
await test_delete_dataset_async(request_type=dict)
def test_delete_dataset_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.DeleteDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_dataset_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.DeleteDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_dataset_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_dataset(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_dataset_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_dataset(
dataset_service.DeleteDatasetRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_dataset_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_dataset(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_dataset_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_dataset(
dataset_service.DeleteDatasetRequest(), name="name_value",
)
def test_import_data(
transport: str = "grpc", request_type=dataset_service.ImportDataRequest
):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ImportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_import_data_from_dict():
test_import_data(request_type=dict)
def test_import_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
client.import_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ImportDataRequest()
@pytest.mark.asyncio
async def test_import_data_async(
transport: str = "grpc_asyncio", request_type=dataset_service.ImportDataRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ImportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_import_data_async_from_dict():
await test_import_data_async(request_type=dict)
def test_import_data_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ImportDataRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_import_data_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ImportDataRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_import_data_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.import_data(
name="name_value",
import_configs=[
dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=["uris_value"]))
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
assert args[0].import_configs == [
dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=["uris_value"]))
]
def test_import_data_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.import_data(
dataset_service.ImportDataRequest(),
name="name_value",
import_configs=[
dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=["uris_value"]))
],
)
@pytest.mark.asyncio
async def test_import_data_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.import_data(
name="name_value",
import_configs=[
dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=["uris_value"]))
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
assert args[0].import_configs == [
dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=["uris_value"]))
]
@pytest.mark.asyncio
async def test_import_data_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.import_data(
dataset_service.ImportDataRequest(),
name="name_value",
import_configs=[
dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=["uris_value"]))
],
)
def test_export_data(
transport: str = "grpc", request_type=dataset_service.ExportDataRequest
):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ExportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_export_data_from_dict():
test_export_data(request_type=dict)
def test_export_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
client.export_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ExportDataRequest()
@pytest.mark.asyncio
async def test_export_data_async(
transport: str = "grpc_asyncio", request_type=dataset_service.ExportDataRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ExportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_export_data_async_from_dict():
await test_export_data_async(request_type=dict)
def test_export_data_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ExportDataRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_export_data_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ExportDataRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_export_data_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.export_data(
name="name_value",
export_config=dataset.ExportDataConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
assert args[0].export_config == dataset.ExportDataConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
def test_export_data_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.export_data(
dataset_service.ExportDataRequest(),
name="name_value",
export_config=dataset.ExportDataConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
),
)
@pytest.mark.asyncio
async def test_export_data_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.export_data(
name="name_value",
export_config=dataset.ExportDataConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
assert args[0].export_config == dataset.ExportDataConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
@pytest.mark.asyncio
async def test_export_data_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.export_data(
dataset_service.ExportDataRequest(),
name="name_value",
export_config=dataset.ExportDataConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
),
)
def test_list_data_items(
transport: str = "grpc", request_type=dataset_service.ListDataItemsRequest
):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListDataItemsResponse(
next_page_token="next_page_token_value",
)
response = client.list_data_items(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListDataItemsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDataItemsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_data_items_from_dict():
test_list_data_items(request_type=dict)
def test_list_data_items_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
client.list_data_items()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListDataItemsRequest()
@pytest.mark.asyncio
async def test_list_data_items_async(
transport: str = "grpc_asyncio", request_type=dataset_service.ListDataItemsRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListDataItemsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_data_items(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListDataItemsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDataItemsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_data_items_async_from_dict():
await test_list_data_items_async(request_type=dict)
def test_list_data_items_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ListDataItemsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
call.return_value = dataset_service.ListDataItemsResponse()
client.list_data_items(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_data_items_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ListDataItemsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListDataItemsResponse()
)
await client.list_data_items(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_data_items_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListDataItemsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_data_items(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_data_items_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_data_items(
dataset_service.ListDataItemsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_data_items_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListDataItemsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListDataItemsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_data_items(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_data_items_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_data_items(
dataset_service.ListDataItemsRequest(), parent="parent_value",
)
def test_list_data_items_pager():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDataItemsResponse(
data_items=[
data_item.DataItem(),
data_item.DataItem(),
data_item.DataItem(),
],
next_page_token="abc",
),
dataset_service.ListDataItemsResponse(
data_items=[], next_page_token="def",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(),], next_page_token="ghi",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(), data_item.DataItem(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_data_items(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, data_item.DataItem) for i in results)
def test_list_data_items_pages():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDataItemsResponse(
data_items=[
data_item.DataItem(),
data_item.DataItem(),
data_item.DataItem(),
],
next_page_token="abc",
),
dataset_service.ListDataItemsResponse(
data_items=[], next_page_token="def",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(),], next_page_token="ghi",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(), data_item.DataItem(),],
),
RuntimeError,
)
pages = list(client.list_data_items(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_data_items_async_pager():
client = DatasetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_items), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDataItemsResponse(
data_items=[
data_item.DataItem(),
data_item.DataItem(),
data_item.DataItem(),
],
next_page_token="abc",
),
dataset_service.ListDataItemsResponse(
data_items=[], next_page_token="def",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(),], next_page_token="ghi",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(), data_item.DataItem(),],
),
RuntimeError,
)
async_pager = await client.list_data_items(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, data_item.DataItem) for i in responses)
@pytest.mark.asyncio
async def test_list_data_items_async_pages():
client = DatasetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_items), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDataItemsResponse(
data_items=[
data_item.DataItem(),
data_item.DataItem(),
data_item.DataItem(),
],
next_page_token="abc",
),
dataset_service.ListDataItemsResponse(
data_items=[], next_page_token="def",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(),], next_page_token="ghi",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(), data_item.DataItem(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_data_items(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_get_annotation_spec(
transport: str = "grpc", request_type=dataset_service.GetAnnotationSpecRequest
):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec.AnnotationSpec(
name="name_value", display_name="display_name_value", etag="etag_value",
)
response = client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.GetAnnotationSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, annotation_spec.AnnotationSpec)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
def test_get_annotation_spec_from_dict():
test_get_annotation_spec(request_type=dict)
def test_get_annotation_spec_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec), "__call__"
) as call:
client.get_annotation_spec()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.GetAnnotationSpecRequest()
@pytest.mark.asyncio
async def test_get_annotation_spec_async(
transport: str = "grpc_asyncio",
request_type=dataset_service.GetAnnotationSpecRequest,
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
annotation_spec.AnnotationSpec(
name="name_value", display_name="display_name_value", etag="etag_value",
)
)
response = await client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.GetAnnotationSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, annotation_spec.AnnotationSpec)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_get_annotation_spec_async_from_dict():
await test_get_annotation_spec_async(request_type=dict)
def test_get_annotation_spec_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.GetAnnotationSpecRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec), "__call__"
) as call:
call.return_value = annotation_spec.AnnotationSpec()
client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_annotation_spec_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.GetAnnotationSpecRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
annotation_spec.AnnotationSpec()
)
await client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_annotation_spec_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec.AnnotationSpec()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_annotation_spec(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_annotation_spec_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_annotation_spec(
dataset_service.GetAnnotationSpecRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_annotation_spec_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec.AnnotationSpec()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
annotation_spec.AnnotationSpec()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_annotation_spec(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_annotation_spec_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_annotation_spec(
dataset_service.GetAnnotationSpecRequest(), name="name_value",
)
def test_list_annotations(
transport: str = "grpc", request_type=dataset_service.ListAnnotationsRequest
):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListAnnotationsResponse(
next_page_token="next_page_token_value",
)
response = client.list_annotations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListAnnotationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAnnotationsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_annotations_from_dict():
test_list_annotations(request_type=dict)
def test_list_annotations_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
client.list_annotations()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListAnnotationsRequest()
@pytest.mark.asyncio
async def test_list_annotations_async(
transport: str = "grpc_asyncio", request_type=dataset_service.ListAnnotationsRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListAnnotationsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_annotations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListAnnotationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAnnotationsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_annotations_async_from_dict():
await test_list_annotations_async(request_type=dict)
def test_list_annotations_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ListAnnotationsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
call.return_value = dataset_service.ListAnnotationsResponse()
client.list_annotations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_annotations_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ListAnnotationsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListAnnotationsResponse()
)
await client.list_annotations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_annotations_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListAnnotationsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_annotations(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_annotations_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_annotations(
dataset_service.ListAnnotationsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_annotations_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListAnnotationsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListAnnotationsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_annotations(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_annotations_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_annotations(
dataset_service.ListAnnotationsRequest(), parent="parent_value",
)
def test_list_annotations_pager():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListAnnotationsResponse(
annotations=[
annotation.Annotation(),
annotation.Annotation(),
annotation.Annotation(),
],
next_page_token="abc",
),
dataset_service.ListAnnotationsResponse(
annotations=[], next_page_token="def",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(),], next_page_token="ghi",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(), annotation.Annotation(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_annotations(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, annotation.Annotation) for i in results)
def test_list_annotations_pages():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListAnnotationsResponse(
annotations=[
annotation.Annotation(),
annotation.Annotation(),
annotation.Annotation(),
],
next_page_token="abc",
),
dataset_service.ListAnnotationsResponse(
annotations=[], next_page_token="def",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(),], next_page_token="ghi",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(), annotation.Annotation(),],
),
RuntimeError,
)
pages = list(client.list_annotations(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_annotations_async_pager():
client = DatasetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotations), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListAnnotationsResponse(
annotations=[
annotation.Annotation(),
annotation.Annotation(),
annotation.Annotation(),
],
next_page_token="abc",
),
dataset_service.ListAnnotationsResponse(
annotations=[], next_page_token="def",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(),], next_page_token="ghi",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(), annotation.Annotation(),],
),
RuntimeError,
)
async_pager = await client.list_annotations(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, annotation.Annotation) for i in responses)
@pytest.mark.asyncio
async def test_list_annotations_async_pages():
client = DatasetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotations), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListAnnotationsResponse(
annotations=[
annotation.Annotation(),
annotation.Annotation(),
annotation.Annotation(),
],
next_page_token="abc",
),
dataset_service.ListAnnotationsResponse(
annotations=[], next_page_token="def",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(),], next_page_token="ghi",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(), annotation.Annotation(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_annotations(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.DatasetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.DatasetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DatasetServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.DatasetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DatasetServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.DatasetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = DatasetServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.DatasetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.DatasetServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.DatasetServiceGrpcTransport,
transports.DatasetServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.DatasetServiceGrpcTransport,)
def test_dataset_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.DatasetServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_dataset_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.DatasetServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_dataset",
"get_dataset",
"update_dataset",
"list_datasets",
"delete_dataset",
"import_data",
"export_data",
"list_data_items",
"get_annotation_spec",
"list_annotations",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
@requires_google_auth_gte_1_25_0
def test_dataset_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DatasetServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_dataset_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DatasetServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_dataset_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DatasetServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_dataset_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
DatasetServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_dataset_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
DatasetServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.DatasetServiceGrpcTransport,
transports.DatasetServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_dataset_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.DatasetServiceGrpcTransport,
transports.DatasetServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_dataset_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.DatasetServiceGrpcTransport, grpc_helpers),
(transports.DatasetServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
@requires_api_core_gte_1_26_0
def test_dataset_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.DatasetServiceGrpcTransport, grpc_helpers),
(transports.DatasetServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
@requires_api_core_lt_1_26_0
def test_dataset_service_transport_create_channel_old_api_core(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus")
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.DatasetServiceGrpcTransport, grpc_helpers),
(transports.DatasetServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
@requires_api_core_lt_1_26_0
def test_dataset_service_transport_create_channel_user_scopes(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=["1", "2"],
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.DatasetServiceGrpcTransport,
transports.DatasetServiceGrpcAsyncIOTransport,
],
)
def test_dataset_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_dataset_service_host_no_port():
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_dataset_service_host_with_port():
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
)
assert client.transport._host == "aiplatform.googleapis.com:8000"
def test_dataset_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DatasetServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_dataset_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DatasetServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.DatasetServiceGrpcTransport,
transports.DatasetServiceGrpcAsyncIOTransport,
],
)
def test_dataset_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.DatasetServiceGrpcTransport,
transports.DatasetServiceGrpcAsyncIOTransport,
],
)
def test_dataset_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_dataset_service_grpc_lro_client():
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_dataset_service_grpc_lro_async_client():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_annotation_path():
project = "squid"
location = "clam"
dataset = "whelk"
data_item = "octopus"
annotation = "oyster"
expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(
project=project,
location=location,
dataset=dataset,
data_item=data_item,
annotation=annotation,
)
actual = DatasetServiceClient.annotation_path(
project, location, dataset, data_item, annotation
)
assert expected == actual
def test_parse_annotation_path():
expected = {
"project": "nudibranch",
"location": "cuttlefish",
"dataset": "mussel",
"data_item": "winkle",
"annotation": "nautilus",
}
path = DatasetServiceClient.annotation_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_annotation_path(path)
assert expected == actual
def test_annotation_spec_path():
project = "scallop"
location = "abalone"
dataset = "squid"
annotation_spec = "clam"
expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(
project=project,
location=location,
dataset=dataset,
annotation_spec=annotation_spec,
)
actual = DatasetServiceClient.annotation_spec_path(
project, location, dataset, annotation_spec
)
assert expected == actual
def test_parse_annotation_spec_path():
expected = {
"project": "whelk",
"location": "octopus",
"dataset": "oyster",
"annotation_spec": "nudibranch",
}
path = DatasetServiceClient.annotation_spec_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_annotation_spec_path(path)
assert expected == actual
def test_data_item_path():
project = "cuttlefish"
location = "mussel"
dataset = "winkle"
data_item = "nautilus"
expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(
project=project, location=location, dataset=dataset, data_item=data_item,
)
actual = DatasetServiceClient.data_item_path(project, location, dataset, data_item)
assert expected == actual
def test_parse_data_item_path():
expected = {
"project": "scallop",
"location": "abalone",
"dataset": "squid",
"data_item": "clam",
}
path = DatasetServiceClient.data_item_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_data_item_path(path)
assert expected == actual
def test_dataset_path():
project = "whelk"
location = "octopus"
dataset = "oyster"
expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
actual = DatasetServiceClient.dataset_path(project, location, dataset)
assert expected == actual
def test_parse_dataset_path():
expected = {
"project": "nudibranch",
"location": "cuttlefish",
"dataset": "mussel",
}
path = DatasetServiceClient.dataset_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_dataset_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = DatasetServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = DatasetServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = DatasetServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = DatasetServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = DatasetServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = DatasetServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = DatasetServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = DatasetServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = DatasetServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = DatasetServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.DatasetServiceTransport, "_prep_wrapped_messages"
) as prep:
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.DatasetServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = DatasetServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
contrib/spendfrom/spendfrom.py | #!/usr/bin/env python
#
# Use the raw transactions API to spend CIVs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a civitasd or civitas-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the civitas data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Civitas/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Civitas")
return os.path.expanduser("~/.civitas")
def read_bitcoin_config(dbdir):
"""Read the civitas.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "civitas.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a civitas JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 38843 if testnet else 28843
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the civitasd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(civitasd):
info = civitasd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
civitasd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = civitasd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(civitasd):
address_summary = dict()
address_to_account = dict()
for info in civitasd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = civitasd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = civitasd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-civitas-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(civitasd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(civitasd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to civitasd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = civitasd.createrawtransaction(inputs, outputs)
signed_rawtx = civitasd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(civitasd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = civitasd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(civitasd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = civitasd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(civitasd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get CIVs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send CIVs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of civitas.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
civitasd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(civitasd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(civitasd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(civitasd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(civitasd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = civitasd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| [] | [] | [
"APPDATA"
] | [] | ["APPDATA"] | python | 1 | 0 | |
speech_separation_RNN.py | # -*- coding = utf-8 -*-
# @Author:何欣泽
# @Time:2020/10/18 19:14
# @File:speech_separation_IRM.py
# @Software:PyCharm
import librosa
import librosa.display
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
def get_audio_separation_LSTM(path):
model = load_model('./model/LSTMfunction23_model(2).h5')
data, fs = librosa.load(path, sr=8000)
win_length = 256
hop_length = 64
nfft = 512
spectrum = librosa.stft(data, win_length=win_length, hop_length=hop_length, n_fft=nfft)
magnitude = np.abs(spectrum).T
phase = np.angle(spectrum).T
magnitude_input = np.reshape(magnitude,(1,720,257))
mask = model.predict(magnitude_input)
print(np.shape(mask))
mask = mask[0,:,:]
print(np.shape(mask))
en_magnitude = np.multiply(magnitude, mask)
en_spectrum = en_magnitude.T * np.exp(1.0j * phase.T)
# spectrogram(spectrum_early=spectrum,spectrum_late=en_spectrum)
frame = librosa.istft(en_spectrum, win_length=win_length, hop_length=hop_length)
frame = np.multiply(1.5,frame)
for i in frame:
if i > 0.6:
frame[i] = 0.6
# time_pic(data, frame)
out_file_path = './output/seprartion/RNNseprartion.wav'
librosa.output.write_wav(out_file_path, frame, sr=8000)
print('输出成功')
return out_file_path,spectrum,en_spectrum,data,frame
# if __name__ == '__main__':
# path = r'C:\Users\MACHENIKE\Desktop\数字信号处理B\项目\mixed_series\mixed_series2.wav'
# for time in range(7):
# path = get_audio_separation_LSTM(path)
# path = get_audio_separation_LSTM(path)
| [] | [] | [] | [] | [] | python | null | null | null |
src/wats/helpers_test.go | package wats
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/cloudfoundry-incubator/cf-test-helpers/cf"
"github.com/cloudfoundry-incubator/cf-test-helpers/helpers"
"github.com/onsi/gomega/gbytes"
. "github.com/cloudfoundry-incubator/cf-test-helpers/workflowhelpers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
)
const SkipCredhubMessage = `Skipping this test because Config.CredhubMode is not set to either 'assisted' or 'non-assisted'.
NOTE: Ensure instance identity credential is turned on and CredHub is deployed before enabling this test`
const SkipAssistedCredhubMessage = `Skipping this test because Config.CredhubMode is not set to 'assisted'.
NOTE: Ensure instance identity credential is turned on and CredHub is deployed before enabling this test`
const SkipNonAssistedCredhubMessage = `Skipping this test because Config.CredhubMode is not set to 'non-assisted'.
NOTE: Ensure instance identity credential is turned on and CredHub is deployed before enabling this test`
func appRunning(appName string, instances int, timeout time.Duration) func() error {
return func() error {
type StatsResponse map[string]struct {
State string `json:"state"`
}
buf, err := runCfWithOutput("app", appName, "--guid")
if err != nil {
return err
}
appGuid := strings.Replace(string(buf.Contents()), "\n", "", -1)
endpoint := fmt.Sprintf("/v2/apps/%s/stats", appGuid)
var response StatsResponse
ApiRequest("GET", endpoint, &response, timeout)
err = nil
for k, v := range response {
if v.State != "RUNNING" {
err = errors.New(fmt.Sprintf("App %s instance %s is not running: State = %s", appName, k, v.State))
}
}
return err
}
}
func runCfWithOutput(values ...string) (*gbytes.Buffer, error) {
session := cf.Cf(values...)
session.Wait(CF_PUSH_TIMEOUT)
if session.ExitCode() == 0 {
return session.Out, nil
}
return session.Out, fmt.Errorf("non zero exit code %d", session.ExitCode())
}
func DopplerUrl() string {
doppler := os.Getenv("DOPPLER_URL")
if doppler == "" {
cfInfoBuffer, err := runCfWithOutput("curl", "/v2/info")
Expect(err).NotTo(HaveOccurred())
var cfInfo struct {
DopplerLoggingEndpoint string `json:"doppler_logging_endpoint"`
}
err = json.NewDecoder(bytes.NewReader(cfInfoBuffer.Contents())).Decode(&cfInfo)
Expect(err).NotTo(HaveOccurred())
doppler = cfInfo.DopplerLoggingEndpoint
}
return doppler
}
func CredhubDescribe(description string, callback func()) bool {
return Describe("[credhub]", func() {
BeforeEach(func() {
if config, err := LoadWatsConfig(); err == nil && !config.GetIncludeCredhubAssisted() && !config.GetIncludeCredhubNonAssisted() {
Skip(SkipCredhubMessage)
}
})
Describe(description, callback)
})
}
func AssistedCredhubDescribe(description string, callback func()) bool {
return Describe("[assisted credhub]", func() {
BeforeEach(func() {
if config, err := LoadWatsConfig(); err == nil && !config.GetIncludeCredhubAssisted() {
Skip(SkipAssistedCredhubMessage)
}
})
Describe(description, callback)
})
}
func NonAssistedCredhubDescribe(description string, callback func()) bool {
return Describe("[non-assisted credhub]", func() {
BeforeEach(func() {
if config, err := LoadWatsConfig(); err == nil && !config.GetIncludeCredhubNonAssisted() {
Skip(SkipNonAssistedCredhubMessage)
}
})
Describe(description, callback)
})
}
func pushAndStartNora(appName string) {
By("pushing it")
Expect(pushNora(appName).Wait(CF_PUSH_TIMEOUT)).To(gexec.Exit(0))
By("staging and running it on Diego")
Expect(cf.Cf("start", appName).Wait(CF_PUSH_TIMEOUT)).To(gexec.Exit(0))
By("verifying it's up")
Eventually(helpers.CurlingAppRoot(config, appName)).Should(ContainSubstring("hello i am nora"))
}
func pushNora(appName string) *gexec.Session {
return pushNoraWithOptions(appName, 1, "256m")
}
func pushNoraWithNoRoute(appName string) *gexec.Session {
return pushApp(appName, "../../assets/nora/NoraPublished", 1, "256m", hwcBuildPackURL, "--no-route")
}
func pushNoraWithOptions(appName string, instances int, memory string) *gexec.Session {
return pushApp(appName, "../../assets/nora/NoraPublished", instances, memory, hwcBuildPackURL)
}
func pushApp(appName, path string, instances int, memory, buildpack string, args ...string) *gexec.Session {
cfArgs := []string{
"push", appName,
"-p", path,
"--no-start",
"-i", strconv.Itoa(instances),
"-m", memory,
"-b", buildpack,
"-s", config.GetStack(),
}
cfArgs = append(cfArgs, args...)
return cf.Cf(cfArgs...)
}
func setTotalMemoryLimit(memoryLimit string) {
type quotaDefinitionUrl struct {
Resources []struct {
Entity struct {
QuotaDefinitionUrl string `json:"quota_definition_url"`
} `json:"entity"`
} `json:"resources"`
}
orgEndpoint := fmt.Sprintf("/v2/organizations?q=name%%3A%s", environment.GetOrganizationName())
var org quotaDefinitionUrl
ApiRequest("GET", orgEndpoint, &org, DEFAULT_TIMEOUT)
Expect(org.Resources).ToNot(BeEmpty())
type quotaDefinition struct {
Entity struct {
Name string `json:"name"`
} `json:"entity"`
}
var quota quotaDefinition
ApiRequest("GET", org.Resources[0].Entity.QuotaDefinitionUrl, "a, DEFAULT_TIMEOUT)
Expect(quota.Entity.Name).ToNot(BeEmpty())
AsUser(environment.AdminUserContext(), DEFAULT_TIMEOUT, func() {
Expect(cf.Cf("update-quota", quota.Entity.Name, "-m", memoryLimit).Wait(DEFAULT_TIMEOUT)).To(gexec.Exit(0))
})
}
| [
"\"DOPPLER_URL\""
] | [] | [
"DOPPLER_URL"
] | [] | ["DOPPLER_URL"] | go | 1 | 0 | |
faucet.py | """
Minimal NEO node with custom code in a background thread.
It will log events from all smart contracts on the blockchain
as they are seen in the received blocks.
"""
import os
import json
import pdb
from datetime import date,timedelta,datetime
from logzero import logger
from twisted.internet import reactor, task
from neo.Network.NodeLeader import NodeLeader
from neo.Core.Blockchain import Blockchain
from neo.Implementations.Blockchains.LevelDB.LevelDBBlockchain import LevelDBBlockchain
from neo.Settings import settings
from neo.Fixed8 import Fixed8
from neo.Core.Helper import Helper
from neo.Core.TX.Transaction import TransactionOutput,ContractTransaction
from neo.Implementations.Wallets.peewee.UserWallet import UserWallet
from neo.SmartContract.ContractParameterContext import ContractParametersContext
from peewee import *
from twisted.web.static import File
from twisted.internet.defer import succeed
from klein import Klein
from jinja2 import Template,FileSystemLoader,Environment
class FaucetRequest(Model):
address = CharField()
last = DateField()
class IPRequest(Model):
client = CharField(max_length=1024)
last = DateField()
settings.set_logfile("logfile.log", max_bytes=1e7, backup_count=3)
class ItemStore(object):
app = Klein()
wallet = None
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
j2_env = Environment(loader=FileSystemLoader(BASE_DIR),
trim_blocks=True)
run_db = None
run_db_path = 'faucet_run.db3'
sent_tx = None
def __init__(self):
self._build_run_db()
wallet_path = os.environ.get('FAUCET_WALLET_PATH','')
passwd = os.environ.get('FAUCET_WALLET_PASSWORD', '')
if len(passwd) < 1 or len(wallet_path) < 1:
raise Exception("Please set FAUCET_WALLET_PASSWORD and FAUCET_WALLET_PATH in your ENV vars")
self.wallet = UserWallet.Open(path=wallet_path, password=passwd)
dbloop = task.LoopingCall(self.wallet.ProcessBlocks)
dbloop.start(.1)
self.wallet.Rebuild()
self.wallet._current_height = 100000
print("created wallet: %s " % self.wallet)
def _build_run_db(self):
try:
self.run_db = SqliteDatabase(self.run_db_path)
self.run_db.connect()
except Exception as e:
logger.error("database file does not exist, or incorrect permissions")
try:
self.run_db.create_tables([FaucetRequest,IPRequest,], safe=True)
except Exception as e:
logger.error("couldnt build database %s " % e)
def _get_context(self):
neo_balance = Fixed8.Zero()
for coin in self.wallet.FindUnspentCoinsByAsset(Blockchain.SystemShare().Hash):
neo_balance += coin.Output.Value
gas_balance = Fixed8.Zero()
for coin in self.wallet.FindUnspentCoinsByAsset(Blockchain.SystemCoin().Hash):
gas_balance += coin.Output.Value
return {
'message':'Hello',
'height':Blockchain.Default().Height,
'neo': neo_balance.ToInt(),
'gas': gas_balance.ToInt(),
'wallet_height': self.wallet.WalletHeight
}
def _make_tx(self, addr_to):
output1 = TransactionOutput(
AssetId = Blockchain.SystemCoin().Hash,
Value = Fixed8.FromDecimal(2000),
script_hash = addr_to
)
output2 = TransactionOutput(
AssetId = Blockchain.SystemShare().Hash,
Value = Fixed8.FromDecimal(100),
script_hash = addr_to
)
contract_tx = ContractTransaction()
contract_tx.outputs = [output1, output2]
contract_tx = self.wallet.MakeTransaction(contract_tx)
print("tx to json: %s " % json.dumps(contract_tx.ToJson(), indent=4))
context = ContractParametersContext(contract_tx, isMultiSig=False)
self.wallet.Sign(context)
if context.Completed:
contract_tx.scripts = context.GetScripts()
self.wallet.SaveTransaction(contract_tx)
# print("will send tx: %s " % json.dumps(tx.ToJson(),indent=4))
relayed = NodeLeader.Instance().Relay(contract_tx)
if relayed:
print("Relayed Tx: %s " % contract_tx.Hash.ToString())
return contract_tx
else:
print("Could not relay tx %s " % contract_tx.Hash.ToString())
else:
print("Transaction initiated, but the signature is incomplete")
print(json.dumps(context.ToJson(), separators=(',', ':')))
return False
return False
@app.route('/')
def app_home(self, request):
ctx = self._get_context()
output = self.j2_env.get_template('index.html').render(ctx)
return output
@app.route('/index.html')
def app_home(self, request):
ctx = self._get_context()
if ctx['neo'] < 100 or ctx['gas'] < 2000:
print("NO ASSETS AVALAIBLE")
ctx['come_back'] = True
print("contex:%s " % json.dumps(ctx, indent=4))
output = self.j2_env.get_template('index.html').render(ctx)
return output
@app.route('/ask', methods=['POST'])
def ask_for_assets(self, request):
self.sent_tx = None
ctx = self._get_context()
ctx['error'] = True
addr = None
try:
if b'coz_addr' in request.args:
addr = request.args.get(b'coz_addr')[0]
ctx['addr'] = addr.decode('utf-8')
if b'do_agree' in request.args:
agree = request.args.get(b'do_agree')[0]
if agree != b'on':
print("must agree to guidelines")
ctx['message_error'] = 'You must agree to the guidelines to proceed'
else:
# check addr
today = date.today()
client = str(request.client)
go_ahead = True
total = IPRequest.filter(client=client,last=today).count()
print("TOTAL: %s " % total)
if total > 3:
ctx['message_error'] = 'Too many requests. Try again later'
go_ahead = False
IPRequest.create(
client=client,
last=today
)
if go_ahead:
freq, created = FaucetRequest.get_or_create(
address=addr,
last = today
)
if not created:
go_ahead = False
ctx['message_error'] = 'Already requested today'
# pdb.set_trace()
if go_ahead:
addr_shash = self.wallet.ToScriptHash(addr.decode('utf-8'))
tx = self._make_tx(addr_shash)
if type(tx) is ContractTransaction:
print("ALL OK!!!!!")
self.sent_tx = tx
request.redirect('/success')
return succeed(None)
else:
ctx['message_error'] = 'Error constructing transaction: %s ' % tx
else:
ctx['message_error'] = 'You must agree to the guidelines to proceed'
except Exception as e:
error = 'Could not process request. %s ' % e
print("excetption: %s " % e)
ctx['message_error'] = 'Could not process your request: %s ' % e
output = self.j2_env.get_template('index.html').render(ctx)
return output
@app.route('/success')
def app_success(self, request):
ctx = self._get_context()
if not self.sent_tx:
print("NO SENT TX:")
request.redirect('/')
return succeed(None)
senttx_json = json.dumps(self.sent_tx.ToJson(), indent=4)
ctx['tx_json'] = senttx_json
ctx['message_success'] = "Your request has been relayed to the network. Transaction: %s " % self.sent_tx.Hash.ToString()
output = self.j2_env.get_template('success.html').render(ctx)
self.sent_tx = None
self.wallet.Rebuild()
self.wallet._current_height = 100000
return output
@app.route('/about')
def app_about(self,request):
return 'I am about!'
@app.route('/static/', branch=True)
def static(self, request):
return File("./static")
def main():
# Setup the blockchain
settings.setup('protocol.faucet.json')
blockchain = LevelDBBlockchain(settings.LEVELDB_PATH)
Blockchain.RegisterBlockchain(blockchain)
dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks)
dbloop.start(.1)
NodeLeader.Instance().Start()
port = os.environ.get('FAUCET_PORT', 8080 )
host = os.environ.get('FAUCET_HOST', 'localhost')
store = ItemStore()
store.app.run(host, int(port))
logger.info("Shutting down.")
if __name__ == "__main__":
main()
| [] | [] | [
"FAUCET_WALLET_PASSWORD",
"FAUCET_WALLET_PATH",
"FAUCET_PORT",
"FAUCET_HOST"
] | [] | ["FAUCET_WALLET_PASSWORD", "FAUCET_WALLET_PATH", "FAUCET_PORT", "FAUCET_HOST"] | python | 4 | 0 | |
client/grpc/grpc.go | // Package grpc provides a gRPC client
package grpc
import (
"context"
"crypto/tls"
"fmt"
"os"
"sync"
"time"
"github.com/micro/go-micro/broker"
"github.com/micro/go-micro/client"
"github.com/micro/go-micro/client/selector"
raw "github.com/micro/go-micro/codec/bytes"
"github.com/micro/go-micro/errors"
"github.com/micro/go-micro/metadata"
"github.com/micro/go-micro/registry"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/encoding"
gmetadata "google.golang.org/grpc/metadata"
)
type grpcClient struct {
once sync.Once
opts client.Options
pool *pool
}
func init() {
encoding.RegisterCodec(wrapCodec{jsonCodec{}})
encoding.RegisterCodec(wrapCodec{protoCodec{}})
encoding.RegisterCodec(wrapCodec{bytesCodec{}})
}
// secure returns the dial option for whether its a secure or insecure connection
func (g *grpcClient) secure() grpc.DialOption {
if g.opts.Context != nil {
if v := g.opts.Context.Value(tlsAuth{}); v != nil {
tls := v.(*tls.Config)
creds := credentials.NewTLS(tls)
return grpc.WithTransportCredentials(creds)
}
}
return grpc.WithInsecure()
}
func (g *grpcClient) next(request client.Request, opts client.CallOptions) (selector.Next, error) {
service := request.Service()
// get proxy
if prx := os.Getenv("MICRO_PROXY"); len(prx) > 0 {
service = prx
}
// get proxy address
if prx := os.Getenv("MICRO_PROXY_ADDRESS"); len(prx) > 0 {
opts.Address = []string{prx}
}
// return remote address
if len(opts.Address) > 0 {
return func() (*registry.Node, error) {
return ®istry.Node{
Address: opts.Address[0],
}, nil
}, nil
}
// get next nodes from the selector
next, err := g.opts.Selector.Select(service, opts.SelectOptions...)
if err != nil {
if err == selector.ErrNotFound {
return nil, errors.InternalServerError("go.micro.client", "service %s: %s", service, err.Error())
}
return nil, errors.InternalServerError("go.micro.client", "error selecting %s node: %s", service, err.Error())
}
return next, nil
}
func (g *grpcClient) call(ctx context.Context, node *registry.Node, req client.Request, rsp interface{}, opts client.CallOptions) error {
address := node.Address
header := make(map[string]string)
if md, ok := metadata.FromContext(ctx); ok {
for k, v := range md {
header[k] = v
}
}
// set timeout in nanoseconds
header["timeout"] = fmt.Sprintf("%d", opts.RequestTimeout)
// set the content type for the request
header["x-content-type"] = req.ContentType()
md := gmetadata.New(header)
ctx = gmetadata.NewOutgoingContext(ctx, md)
cf, err := g.newGRPCCodec(req.ContentType())
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
maxRecvMsgSize := g.maxRecvMsgSizeValue()
maxSendMsgSize := g.maxSendMsgSizeValue()
var grr error
grpcDialOptions := []grpc.DialOption{
grpc.WithDefaultCallOptions(grpc.ForceCodec(cf)),
grpc.WithTimeout(opts.DialTimeout),
g.secure(),
grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(maxRecvMsgSize),
grpc.MaxCallSendMsgSize(maxSendMsgSize),
),
}
if opts := g.getGrpcDialOptions(); opts != nil {
grpcDialOptions = append(grpcDialOptions, opts...)
}
cc, err := g.pool.getConn(address, grpcDialOptions...)
if err != nil {
return errors.InternalServerError("go.micro.client", fmt.Sprintf("Error sending request: %v", err))
}
defer func() {
// defer execution of release
g.pool.release(address, cc, grr)
}()
ch := make(chan error, 1)
go func() {
grpcCallOptions := []grpc.CallOption{grpc.CallContentSubtype(cf.Name())}
if opts := g.getGrpcCallOptions(); opts != nil {
grpcCallOptions = append(grpcCallOptions, opts...)
}
err := cc.Invoke(ctx, methodToGRPC(req.Service(), req.Endpoint()), req.Body(), rsp, grpcCallOptions...)
ch <- microError(err)
}()
select {
case err := <-ch:
grr = err
case <-ctx.Done():
grr = ctx.Err()
}
return grr
}
func (g *grpcClient) stream(ctx context.Context, node *registry.Node, req client.Request, opts client.CallOptions) (client.Stream, error) {
address := node.Address
header := make(map[string]string)
if md, ok := metadata.FromContext(ctx); ok {
for k, v := range md {
header[k] = v
}
}
// set timeout in nanoseconds
header["timeout"] = fmt.Sprintf("%d", opts.RequestTimeout)
// set the content type for the request
header["x-content-type"] = req.ContentType()
md := gmetadata.New(header)
ctx = gmetadata.NewOutgoingContext(ctx, md)
cf, err := g.newGRPCCodec(req.ContentType())
if err != nil {
return nil, errors.InternalServerError("go.micro.client", err.Error())
}
var dialCtx context.Context
var cancel context.CancelFunc
if opts.DialTimeout >= 0 {
dialCtx, cancel = context.WithTimeout(ctx, opts.DialTimeout)
} else {
dialCtx, cancel = context.WithCancel(ctx)
}
defer cancel()
wc := wrapCodec{cf}
grpcDialOptions := []grpc.DialOption{
grpc.WithDefaultCallOptions(grpc.ForceCodec(wc)),
g.secure(),
}
if opts := g.getGrpcDialOptions(); opts != nil {
grpcDialOptions = append(grpcDialOptions, opts...)
}
cc, err := grpc.DialContext(dialCtx, address, grpcDialOptions...)
if err != nil {
return nil, errors.InternalServerError("go.micro.client", fmt.Sprintf("Error sending request: %v", err))
}
desc := &grpc.StreamDesc{
StreamName: req.Service() + req.Endpoint(),
ClientStreams: true,
ServerStreams: true,
}
grpcCallOptions := []grpc.CallOption{}
if opts := g.getGrpcCallOptions(); opts != nil {
grpcCallOptions = append(grpcCallOptions, opts...)
}
st, err := cc.NewStream(ctx, desc, methodToGRPC(req.Service(), req.Endpoint()), grpcCallOptions...)
if err != nil {
return nil, errors.InternalServerError("go.micro.client", fmt.Sprintf("Error creating stream: %v", err))
}
codec := &grpcCodec{
s: st,
c: wc,
}
// set request codec
if r, ok := req.(*grpcRequest); ok {
r.codec = codec
}
rsp := &response{
conn: cc,
stream: st,
codec: cf,
gcodec: codec,
}
return &grpcStream{
context: ctx,
request: req,
response: rsp,
stream: st,
conn: cc,
}, nil
}
func (g *grpcClient) poolMaxStreams() int {
if g.opts.Context == nil {
return DefaultPoolMaxStreams
}
v := g.opts.Context.Value(poolMaxStreams{})
if v == nil {
return DefaultPoolMaxStreams
}
return v.(int)
}
func (g *grpcClient) poolMaxIdle() int {
if g.opts.Context == nil {
return DefaultPoolMaxIdle
}
v := g.opts.Context.Value(poolMaxIdle{})
if v == nil {
return DefaultPoolMaxIdle
}
return v.(int)
}
func (g *grpcClient) maxRecvMsgSizeValue() int {
if g.opts.Context == nil {
return DefaultMaxRecvMsgSize
}
v := g.opts.Context.Value(maxRecvMsgSizeKey{})
if v == nil {
return DefaultMaxRecvMsgSize
}
return v.(int)
}
func (g *grpcClient) maxSendMsgSizeValue() int {
if g.opts.Context == nil {
return DefaultMaxSendMsgSize
}
v := g.opts.Context.Value(maxSendMsgSizeKey{})
if v == nil {
return DefaultMaxSendMsgSize
}
return v.(int)
}
func (g *grpcClient) newGRPCCodec(contentType string) (encoding.Codec, error) {
codecs := make(map[string]encoding.Codec)
if g.opts.Context != nil {
if v := g.opts.Context.Value(codecsKey{}); v != nil {
codecs = v.(map[string]encoding.Codec)
}
}
if c, ok := codecs[contentType]; ok {
return wrapCodec{c}, nil
}
if c, ok := defaultGRPCCodecs[contentType]; ok {
return wrapCodec{c}, nil
}
return nil, fmt.Errorf("Unsupported Content-Type: %s", contentType)
}
func (g *grpcClient) Init(opts ...client.Option) error {
size := g.opts.PoolSize
ttl := g.opts.PoolTTL
for _, o := range opts {
o(&g.opts)
}
// update pool configuration if the options changed
if size != g.opts.PoolSize || ttl != g.opts.PoolTTL {
g.pool.Lock()
g.pool.size = g.opts.PoolSize
g.pool.ttl = int64(g.opts.PoolTTL.Seconds())
g.pool.Unlock()
}
return nil
}
func (g *grpcClient) Options() client.Options {
return g.opts
}
func (g *grpcClient) NewMessage(topic string, msg interface{}, opts ...client.MessageOption) client.Message {
return newGRPCEvent(topic, msg, g.opts.ContentType, opts...)
}
func (g *grpcClient) NewRequest(service, method string, req interface{}, reqOpts ...client.RequestOption) client.Request {
return newGRPCRequest(service, method, req, g.opts.ContentType, reqOpts...)
}
func (g *grpcClient) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
// make a copy of call opts
callOpts := g.opts.CallOptions
for _, opt := range opts {
opt(&callOpts)
}
next, err := g.next(req, callOpts)
if err != nil {
return err
}
// check if we already have a deadline
d, ok := ctx.Deadline()
if !ok {
// no deadline so we create a new one
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, callOpts.RequestTimeout)
defer cancel()
} else {
// got a deadline so no need to setup context
// but we need to set the timeout we pass along
opt := client.WithRequestTimeout(time.Until(d))
opt(&callOpts)
}
// should we noop right here?
select {
case <-ctx.Done():
return errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
default:
}
// make copy of call method
gcall := g.call
// wrap the call in reverse
for i := len(callOpts.CallWrappers); i > 0; i-- {
gcall = callOpts.CallWrappers[i-1](gcall)
}
// return errors.New("go.micro.client", "request timeout", 408)
call := func(i int) error {
// call backoff first. Someone may want an initial start delay
t, err := callOpts.Backoff(ctx, req, i)
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
// only sleep if greater than 0
if t.Seconds() > 0 {
time.Sleep(t)
}
// select next node
node, err := next()
service := req.Service()
if err != nil {
if err == selector.ErrNotFound {
return errors.InternalServerError("go.micro.client", "service %s: %s", service, err.Error())
}
return errors.InternalServerError("go.micro.client", "error selecting %s node: %s", service, err.Error())
}
// make the call
err = gcall(ctx, node, req, rsp, callOpts)
g.opts.Selector.Mark(service, node, err)
return err
}
ch := make(chan error, callOpts.Retries+1)
var gerr error
for i := 0; i <= callOpts.Retries; i++ {
go func(i int) {
ch <- call(i)
}(i)
select {
case <-ctx.Done():
return errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
case err := <-ch:
// if the call succeeded lets bail early
if err == nil {
return nil
}
retry, rerr := callOpts.Retry(ctx, req, i, err)
if rerr != nil {
return rerr
}
if !retry {
return err
}
gerr = err
}
}
return gerr
}
func (g *grpcClient) Stream(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
// make a copy of call opts
callOpts := g.opts.CallOptions
for _, opt := range opts {
opt(&callOpts)
}
next, err := g.next(req, callOpts)
if err != nil {
return nil, err
}
// #200 - streams shouldn't have a request timeout set on the context
// should we noop right here?
select {
case <-ctx.Done():
return nil, errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
default:
}
call := func(i int) (client.Stream, error) {
// call backoff first. Someone may want an initial start delay
t, err := callOpts.Backoff(ctx, req, i)
if err != nil {
return nil, errors.InternalServerError("go.micro.client", err.Error())
}
// only sleep if greater than 0
if t.Seconds() > 0 {
time.Sleep(t)
}
node, err := next()
service := req.Service()
if err != nil {
if err == selector.ErrNotFound {
return nil, errors.InternalServerError("go.micro.client", "service %s: %s", service, err.Error())
}
return nil, errors.InternalServerError("go.micro.client", "error selecting %s node: %s", service, err.Error())
}
stream, err := g.stream(ctx, node, req, callOpts)
g.opts.Selector.Mark(service, node, err)
return stream, err
}
type response struct {
stream client.Stream
err error
}
ch := make(chan response, callOpts.Retries+1)
var grr error
for i := 0; i <= callOpts.Retries; i++ {
go func(i int) {
s, err := call(i)
ch <- response{s, err}
}(i)
select {
case <-ctx.Done():
return nil, errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
case rsp := <-ch:
// if the call succeeded lets bail early
if rsp.err == nil {
return rsp.stream, nil
}
retry, rerr := callOpts.Retry(ctx, req, i, err)
if rerr != nil {
return nil, rerr
}
if !retry {
return nil, rsp.err
}
grr = rsp.err
}
}
return nil, grr
}
func (g *grpcClient) Publish(ctx context.Context, p client.Message, opts ...client.PublishOption) error {
var options client.PublishOptions
for _, o := range opts {
o(&options)
}
md, ok := metadata.FromContext(ctx)
if !ok {
md = make(map[string]string)
}
md["Content-Type"] = p.ContentType()
md["Micro-Topic"] = p.Topic()
cf, err := g.newGRPCCodec(p.ContentType())
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
var body []byte
// passed in raw data
if d, ok := p.Payload().(*raw.Frame); ok {
body = d.Data
} else {
// set the body
b, err := cf.Marshal(p.Payload())
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
body = b
}
g.once.Do(func() {
g.opts.Broker.Connect()
})
topic := p.Topic()
// get proxy topic
if prx := os.Getenv("MICRO_PROXY"); len(prx) > 0 {
options.Exchange = prx
}
// get the exchange
if len(options.Exchange) > 0 {
topic = options.Exchange
}
return g.opts.Broker.Publish(topic, &broker.Message{
Header: md,
Body: body,
})
}
func (g *grpcClient) String() string {
return "grpc"
}
func (g *grpcClient) getGrpcDialOptions() []grpc.DialOption {
if g.opts.CallOptions.Context == nil {
return nil
}
v := g.opts.CallOptions.Context.Value(grpcDialOptions{})
if v == nil {
return nil
}
opts, ok := v.([]grpc.DialOption)
if !ok {
return nil
}
return opts
}
func (g *grpcClient) getGrpcCallOptions() []grpc.CallOption {
if g.opts.CallOptions.Context == nil {
return nil
}
v := g.opts.CallOptions.Context.Value(grpcCallOptions{})
if v == nil {
return nil
}
opts, ok := v.([]grpc.CallOption)
if !ok {
return nil
}
return opts
}
func newClient(opts ...client.Option) client.Client {
options := client.NewOptions()
// default content type for grpc
options.ContentType = "application/grpc+proto"
for _, o := range opts {
o(&options)
}
rc := &grpcClient{
once: sync.Once{},
opts: options,
}
rc.pool = newPool(options.PoolSize, options.PoolTTL, rc.poolMaxIdle(), rc.poolMaxStreams())
c := client.Client(rc)
// wrap in reverse
for i := len(options.Wrappers); i > 0; i-- {
c = options.Wrappers[i-1](c)
}
return c
}
func NewClient(opts ...client.Option) client.Client {
return newClient(opts...)
}
| [
"\"MICRO_PROXY\"",
"\"MICRO_PROXY_ADDRESS\"",
"\"MICRO_PROXY\""
] | [] | [
"MICRO_PROXY",
"MICRO_PROXY_ADDRESS"
] | [] | ["MICRO_PROXY", "MICRO_PROXY_ADDRESS"] | go | 2 | 0 | |
goat_test.go | package goat
import (
"github.com/stretchr/testify/assert"
"os"
"testing"
)
func TestInit(t *testing.T) {
defer func() {
r := recover()
assert.Nil(t, r, "Init() panicked")
if r != nil {
println("recovered")
println(r)
}
assert.Len(t, GetErrors(), 0, "Init() created errors")
}()
SetRoot(os.Getenv("APP_BASE"))
Init()
assert.NotEmpty(t, Root(), "failed to set root")
}
| [
"\"APP_BASE\""
] | [] | [
"APP_BASE"
] | [] | ["APP_BASE"] | go | 1 | 0 | |
vendor/github.com/containers/common/pkg/auth/auth.go | package auth
import (
"bufio"
"context"
"fmt"
"os"
"strings"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/pkg/docker/config"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh/terminal"
)
// GetDefaultAuthFile returns env value REGISTRY_AUTH_FILE as default --authfile path
// used in multiple --authfile flag definitions
func GetDefaultAuthFile() string {
return os.Getenv("REGISTRY_AUTH_FILE")
}
// CheckAuthFile validates filepath given by --authfile
// used by command has --authfile flag
func CheckAuthFile(authfile string) error {
if authfile == "" {
return nil
}
if _, err := os.Stat(authfile); err != nil {
return errors.Wrapf(err, "error checking authfile path %s", authfile)
}
return nil
}
// systemContextWithOptions returns a version of sys
// updated with authFile and certDir values (if they are not "").
// NOTE: this is a shallow copy that can be used and updated, but may share
// data with the original parameter.
func systemContextWithOptions(sys *types.SystemContext, authFile, certDir string) *types.SystemContext {
if sys != nil {
sysCopy := *sys
sys = &sysCopy
} else {
sys = &types.SystemContext{}
}
if authFile != "" {
sys.AuthFilePath = authFile
}
if certDir != "" {
sys.DockerCertPath = certDir
}
return sys
}
// Login implements a “log in” command with the provided opts and args
// reading the password from opts.Stdin or the options in opts.
func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginOptions, args []string) error {
systemContext = systemContextWithOptions(systemContext, opts.AuthFile, opts.CertDir)
var (
server string
err error
)
if len(args) > 1 {
return errors.Errorf("login accepts only one registry to login to")
}
if len(args) == 0 {
if !opts.AcceptUnspecifiedRegistry {
return errors.Errorf("please provide a registry to login to")
}
if server, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
return err
}
logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", server)
} else {
server = getRegistryName(args[0])
}
authConfig, err := config.GetCredentials(systemContext, server)
if err != nil {
return errors.Wrapf(err, "error reading auth file")
}
if opts.GetLoginSet {
if authConfig.Username == "" {
return errors.Errorf("not logged into %s", server)
}
fmt.Fprintf(opts.Stdout, "%s\n", authConfig.Username)
return nil
}
if authConfig.IdentityToken != "" {
return errors.Errorf("currently logged in, auth file contains an Identity token")
}
password := opts.Password
if opts.StdinPassword {
var stdinPasswordStrBuilder strings.Builder
if opts.Password != "" {
return errors.Errorf("Can't specify both --password-stdin and --password")
}
if opts.Username == "" {
return errors.Errorf("Must provide --username with --password-stdin")
}
scanner := bufio.NewScanner(opts.Stdin)
for scanner.Scan() {
fmt.Fprint(&stdinPasswordStrBuilder, scanner.Text())
}
password = stdinPasswordStrBuilder.String()
}
// If no username and no password is specified, try to use existing ones.
if opts.Username == "" && password == "" && authConfig.Username != "" && authConfig.Password != "" {
fmt.Println("Authenticating with existing credentials...")
if err := docker.CheckAuth(ctx, systemContext, authConfig.Username, authConfig.Password, server); err == nil {
fmt.Fprintln(opts.Stdout, "Existing credentials are valid. Already logged in to", server)
return nil
}
fmt.Fprintln(opts.Stdout, "Existing credentials are invalid, please enter valid username and password")
}
username, password, err := getUserAndPass(opts, password, authConfig.Username)
if err != nil {
return errors.Wrapf(err, "error getting username and password")
}
if err = docker.CheckAuth(ctx, systemContext, username, password, server); err == nil {
// Write the new credentials to the authfile
if err := config.SetAuthentication(systemContext, server, username, password); err != nil {
return err
}
}
if err == nil {
fmt.Fprintln(opts.Stdout, "Login Succeeded!")
return nil
}
if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok {
logrus.Debugf("error logging into %q: %v", server, unauthorized)
return errors.Errorf("error logging into %q: invalid username/password", server)
}
return errors.Wrapf(err, "error authenticating creds for %q", server)
}
// getRegistryName scrubs and parses the input to get the server name
func getRegistryName(server string) string {
// removes 'http://' or 'https://' from the front of the
// server/registry string if either is there. This will be mostly used
// for user input from 'Buildah login' and 'Buildah logout'.
server = strings.TrimPrefix(strings.TrimPrefix(server, "https://"), "http://")
// gets the registry from the input. If the input is of the form
// quay.io/myuser/myimage, it will parse it and just return quay.io
split := strings.Split(server, "/")
return split[0]
}
// getUserAndPass gets the username and password from STDIN if not given
// using the -u and -p flags. If the username prompt is left empty, the
// displayed userFromAuthFile will be used instead.
func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user, pass string, err error) {
reader := bufio.NewReader(opts.Stdin)
username := opts.Username
if username == "" {
if userFromAuthFile != "" {
fmt.Fprintf(opts.Stdout, "Username (%s): ", userFromAuthFile)
} else {
fmt.Fprint(opts.Stdout, "Username: ")
}
username, err = reader.ReadString('\n')
if err != nil {
return "", "", errors.Wrapf(err, "error reading username")
}
// If the user just hit enter, use the displayed user from the
// the authentication file. This allows to do a lazy
// `$ buildah login -p $NEW_PASSWORD` without specifying the
// user.
if strings.TrimSpace(username) == "" {
username = userFromAuthFile
}
}
if password == "" {
fmt.Fprint(opts.Stdout, "Password: ")
pass, err := terminal.ReadPassword(0)
if err != nil {
return "", "", errors.Wrapf(err, "error reading password")
}
password = string(pass)
fmt.Fprintln(opts.Stdout)
}
return strings.TrimSpace(username), password, err
}
// Logout implements a “log out” command with the provided opts and args
func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []string) error {
if err := CheckAuthFile(opts.AuthFile); err != nil {
return err
}
systemContext = systemContextWithOptions(systemContext, opts.AuthFile, "")
var (
server string
err error
)
if len(args) > 1 {
return errors.Errorf("logout accepts only one registry to logout from")
}
if len(args) == 0 && !opts.All {
if !opts.AcceptUnspecifiedRegistry {
return errors.Errorf("please provide a registry to logout from")
}
if server, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
return err
}
logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", server)
}
if len(args) != 0 {
if opts.All {
return errors.Errorf("--all takes no arguments")
}
server = getRegistryName(args[0])
}
if opts.All {
if err := config.RemoveAllAuthentication(systemContext); err != nil {
return err
}
fmt.Fprintln(opts.Stdout, "Removed login credentials for all registries")
return nil
}
err = config.RemoveAuthentication(systemContext, server)
switch errors.Cause(err) {
case nil:
fmt.Fprintf(opts.Stdout, "Removed login credentials for %s\n", server)
return nil
case config.ErrNotLoggedIn:
authConfig, err := config.GetCredentials(systemContext, server)
if err != nil {
return errors.Wrapf(err, "error reading auth file")
}
authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, server)
if authConfig.Username != "" && authConfig.Password != "" && authInvalid == nil {
fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", server)
return nil
}
return errors.Errorf("Not logged into %s\n", server)
default:
return errors.Wrapf(err, "error logging out of %q", server)
}
}
// defaultRegistryWhenUnspecified returns first registry from search list of registry.conf
// used by login/logout when registry argument is not specified
func defaultRegistryWhenUnspecified(systemContext *types.SystemContext) (string, error) {
registriesFromFile, err := sysregistriesv2.UnqualifiedSearchRegistries(systemContext)
if err != nil {
return "", errors.Wrapf(err, "error getting registry from registry.conf, please specify a registry")
}
if len(registriesFromFile) == 0 {
return "", errors.Errorf("no registries found in registries.conf, a registry must be provided")
}
return registriesFromFile[0], nil
}
| [
"\"REGISTRY_AUTH_FILE\""
] | [] | [
"REGISTRY_AUTH_FILE"
] | [] | ["REGISTRY_AUTH_FILE"] | go | 1 | 0 | |
server/server_test.go | package server_test
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"net/mail"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"testing"
"time"
"github.com/dgrijalva/jwt-go"
iclient "github.com/influxdata/influxdb/client/v2"
"github.com/influxdata/influxdb/influxql"
imodels "github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/toml"
"github.com/influxdata/kapacitor/alert"
"github.com/influxdata/kapacitor/client/v1"
"github.com/influxdata/kapacitor/command"
"github.com/influxdata/kapacitor/command/commandtest"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/server"
"github.com/influxdata/kapacitor/services/alert/alerttest"
"github.com/influxdata/kapacitor/services/alerta/alertatest"
"github.com/influxdata/kapacitor/services/hipchat/hipchattest"
"github.com/influxdata/kapacitor/services/httppost"
"github.com/influxdata/kapacitor/services/httppost/httpposttest"
"github.com/influxdata/kapacitor/services/k8s"
"github.com/influxdata/kapacitor/services/mqtt"
"github.com/influxdata/kapacitor/services/mqtt/mqtttest"
"github.com/influxdata/kapacitor/services/opsgenie"
"github.com/influxdata/kapacitor/services/opsgenie/opsgenietest"
"github.com/influxdata/kapacitor/services/pagerduty"
"github.com/influxdata/kapacitor/services/pagerduty/pagerdutytest"
"github.com/influxdata/kapacitor/services/pushover/pushovertest"
"github.com/influxdata/kapacitor/services/sensu/sensutest"
"github.com/influxdata/kapacitor/services/slack/slacktest"
"github.com/influxdata/kapacitor/services/smtp/smtptest"
"github.com/influxdata/kapacitor/services/snmptrap/snmptraptest"
"github.com/influxdata/kapacitor/services/swarm"
"github.com/influxdata/kapacitor/services/talk/talktest"
"github.com/influxdata/kapacitor/services/telegram"
"github.com/influxdata/kapacitor/services/telegram/telegramtest"
"github.com/influxdata/kapacitor/services/udf"
"github.com/influxdata/kapacitor/services/victorops"
"github.com/influxdata/kapacitor/services/victorops/victoropstest"
"github.com/k-sone/snmpgo"
"github.com/pkg/errors"
)
var udfDir string
func init() {
dir, _ := os.Getwd()
udfDir = filepath.Clean(filepath.Join(dir, "../udf"))
}
func TestServer_Ping(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
_, version, err := cli.Ping()
if err != nil {
t.Fatal(err)
}
if version != "testServer" {
t.Fatal("unexpected version", version)
}
}
func TestServer_Pprof_Index(t *testing.T) {
s, _ := OpenDefaultServer()
defer s.Close()
testCases := []struct {
path string
code int
contentType string
}{
{
path: "/debug/pprof/",
code: http.StatusOK,
contentType: "text/html; charset=utf-8",
},
{
path: "/debug/pprof/block",
code: http.StatusOK,
contentType: "text/plain; charset=utf-8",
},
{
path: "/debug/pprof/goroutine",
code: http.StatusOK,
contentType: "text/plain; charset=utf-8",
},
{
path: "/debug/pprof/heap",
code: http.StatusOK,
contentType: "text/plain; charset=utf-8",
},
{
path: "/debug/pprof/threadcreate",
code: http.StatusOK,
contentType: "text/plain; charset=utf-8",
},
}
for _, tc := range testCases {
t.Run(tc.path, func(t *testing.T) {
r, err := http.Get(s.URL() + tc.path)
if err != nil {
t.Fatal(err)
}
if got, exp := r.StatusCode, tc.code; got != exp {
t.Errorf("unexpected status code got %d exp %d", got, exp)
}
if got, exp := r.Header.Get("Content-Type"), tc.contentType; got != exp {
t.Errorf("unexpected content type got %s exp %s", got, exp)
}
})
}
}
func TestServer_Authenticate_Fail(t *testing.T) {
conf := NewConfig()
conf.HTTP.AuthEnabled = true
s := OpenServer(conf)
cli, err := client.New(client.Config{
URL: s.URL(),
})
if err != nil {
t.Fatal(err)
}
defer s.Close()
_, _, err = cli.Ping()
if err == nil {
t.Error("expected authentication error")
} else if exp, got := "unable to parse authentication credentials", err.Error(); got != exp {
t.Errorf("unexpected error message: got %q exp %q", got, exp)
}
}
func TestServer_Authenticate_User(t *testing.T) {
conf := NewConfig()
conf.HTTP.AuthEnabled = true
s := OpenServer(conf)
cli, err := client.New(client.Config{
URL: s.URL(),
Credentials: &client.Credentials{
Method: client.UserAuthentication,
Username: "bob",
Password: "bob's secure password",
},
})
if err != nil {
t.Fatal(err)
}
defer s.Close()
_, version, err := cli.Ping()
if err != nil {
t.Fatal(err)
}
if version != "testServer" {
t.Fatal("unexpected version", version)
}
}
func TestServer_Authenticate_Bearer_Fail(t *testing.T) {
secret := "secret"
// Create a new token object, specifying signing method and the claims
// you would like it to contain.
token := jwt.NewWithClaims(jwt.SigningMethodHS512, jwt.MapClaims{
"username": "bob",
"exp": time.Now().Add(10 * time.Second).Unix(),
})
// Sign and get the complete encoded token as a string using the secret
tokenString, err := token.SignedString([]byte(secret))
if err != nil {
t.Fatal(err)
}
conf := NewConfig()
conf.HTTP.AuthEnabled = true
// Use a different secret so the token is invalid
conf.HTTP.SharedSecret = secret + "extra secret"
s := OpenServer(conf)
cli, err := client.New(client.Config{
URL: s.URL(),
Credentials: &client.Credentials{
Method: client.BearerAuthentication,
Token: tokenString,
},
})
if err != nil {
t.Fatal(err)
}
defer s.Close()
_, _, err = cli.Ping()
if err == nil {
t.Error("expected authentication error")
} else if exp, got := "invalid token: signature is invalid", err.Error(); got != exp {
t.Errorf("unexpected error message: got %q exp %q", got, exp)
}
}
func TestServer_Authenticate_Bearer_Expired(t *testing.T) {
secret := "secret"
// Create a new token object, specifying signing method and the claims
// you would like it to contain.
token := jwt.NewWithClaims(jwt.SigningMethodHS512, jwt.MapClaims{
"username": "bob",
"exp": time.Now().Add(-10 * time.Second).Unix(),
})
// Sign and get the complete encoded token as a string using the secret
tokenString, err := token.SignedString([]byte(secret))
if err != nil {
t.Fatal(err)
}
conf := NewConfig()
conf.HTTP.AuthEnabled = true
conf.HTTP.SharedSecret = secret
s := OpenServer(conf)
cli, err := client.New(client.Config{
URL: s.URL(),
Credentials: &client.Credentials{
Method: client.BearerAuthentication,
Token: tokenString,
},
})
if err != nil {
t.Fatal(err)
}
defer s.Close()
_, _, err = cli.Ping()
if err == nil {
t.Error("expected authentication error")
} else if exp, got := "invalid token: Token is expired", err.Error(); got != exp {
t.Errorf("unexpected error message: got %q exp %q", got, exp)
}
}
func TestServer_Authenticate_Bearer(t *testing.T) {
secret := "secret"
// Create a new token object, specifying signing method and the claims
// you would like it to contain.
token := jwt.NewWithClaims(jwt.SigningMethodHS512, jwt.MapClaims{
"username": "bob",
"exp": time.Now().Add(10 * time.Second).Unix(),
})
// Sign and get the complete encoded token as a string using the secret
tokenString, err := token.SignedString([]byte(secret))
if err != nil {
t.Fatal(err)
}
conf := NewConfig()
conf.HTTP.AuthEnabled = true
conf.HTTP.SharedSecret = secret
s := OpenServer(conf)
cli, err := client.New(client.Config{
URL: s.URL(),
Credentials: &client.Credentials{
Method: client.BearerAuthentication,
Token: tokenString,
},
})
if err != nil {
t.Fatal(err)
}
defer s.Close()
_, version, err := cli.Ping()
if err != nil {
t.Fatal(err)
}
if version != "testServer" {
t.Fatal("unexpected version", version)
}
}
func TestServer_CreateTask(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_CreateTaskImplicitStream(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `dbrp "mydb"."myrp"
dbrp "otherdb"."default"
stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_CreateTaskBatch(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
}
tick := `dbrp "mydb"."myrp"
batch
|query('SELECT * from mydb.myrp.mymeas')
|log()
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.BatchTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.BatchTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph testTaskID {\nquery1 -> log2;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_CreateTaskImplicitAndExplicit(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
}
tick := `dbrp "mydb"."myrp"
dbrp "otherdb"."default"
stream
|from()
.measurement('test')
`
_, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
// It is expected that error should be non nil
if err == nil {
t.Fatal("expected task to fail to be created")
}
}
func TestServer_CreateTaskExplicitUpdateImplicit(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
createDBRPs := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
createTick := `stream
|from()
.measurement('test')
`
updateDBRPs := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
}
updateTick := `dbrp "mydb"."myrp"
stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
DBRPs: createDBRPs,
TICKscript: createTick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, createDBRPs) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, createDBRPs)
}
if ti.TICKscript != createTick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, createTick)
}
dot := "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
TICKscript: updateTick,
})
if err != nil {
t.Fatal(err)
}
ti, err = cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, updateDBRPs) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, updateDBRPs)
}
if ti.TICKscript != updateTick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, updateTick)
}
dot = "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_EnableTask(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Enabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Enabled)
}
if ti.Executing != true {
t.Fatalf("unexpected executing got %v exp %v", ti.Executing, true)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := `digraph testTaskID {
graph [throughput="0.00 points/s"];
stream0 [avg_exec_time_ns="0s" errors="0" working_cardinality="0" ];
stream0 -> from1 [processed="0"];
from1 [avg_exec_time_ns="0s" errors="0" working_cardinality="0" ];
}`
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_EnableTaskOnCreate(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Enabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Enabled)
}
if ti.Executing != true {
t.Fatalf("unexpected executing got %v exp %v", ti.Executing, true)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := `digraph testTaskID {
graph [throughput="0.00 points/s"];
stream0 [avg_exec_time_ns="0s" errors="0" working_cardinality="0" ];
stream0 -> from1 [processed="0"];
from1 [avg_exec_time_ns="0s" errors="0" working_cardinality="0" ];
}`
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_DisableTask(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_DeleteTask(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
err = cli.DeleteTask(task.Link)
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err == nil {
t.Fatal("unexpected task:", ti)
}
}
func TestServer_TaskNums(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
}
tick := `stream
|from()
.measurement('test')
`
// Create a bunch of tasks with every 3rd task enabled
count := 100
enabled := 0
tasks := make([]client.Task, count)
for i := 0; i < count; i++ {
status := client.Disabled
if i%3 == 0 {
enabled++
status = client.Enabled
}
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: fmt.Sprintf("%s-%d", id, i),
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: status,
})
if err != nil {
t.Fatal(err)
}
tasks[i] = task
}
if stats, err := s.Stats(); err != nil {
t.Fatal(err)
} else {
if got, exp := stats.NumTasks, count; got != exp {
t.Errorf("unexpected num_tasks got %d exp %d", got, exp)
}
if got, exp := stats.NumEnabledTasks, enabled; got != exp {
t.Errorf("unexpected num_enabled_tasks got %d exp %d", got, exp)
}
}
// Enable a bunch of tasks
for i, task := range tasks {
if i%2 == 0 && task.Status != client.Enabled {
enabled++
tasks[i].Status = client.Enabled
if _, err := cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
}
}
if stats, err := s.Stats(); err != nil {
t.Fatal(err)
} else {
if got, exp := stats.NumTasks, count; got != exp {
t.Errorf("unexpected num_tasks got %d exp %d", got, exp)
}
if got, exp := stats.NumEnabledTasks, enabled; got != exp {
t.Errorf("unexpected num_enabled_tasks got %d exp %d", got, exp)
}
}
// Disable a bunch of tasks
for i, task := range tasks {
if i%5 == 0 && task.Status != client.Disabled {
enabled--
tasks[i].Status = client.Disabled
if _, err := cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Disabled,
}); err != nil {
t.Fatal(err)
}
}
}
if stats, err := s.Stats(); err != nil {
t.Fatal(err)
} else {
if got, exp := stats.NumTasks, count; got != exp {
t.Errorf("unexpected num_tasks got %d exp %d", got, exp)
}
if got, exp := stats.NumEnabledTasks, enabled; got != exp {
t.Errorf("unexpected num_enabled_tasks got %d exp %d", got, exp)
}
}
// Delete a bunch of tasks
for i, task := range tasks {
if i%6 == 0 {
count--
if task.Status == client.Enabled {
enabled--
}
if err := cli.DeleteTask(task.Link); err != nil {
t.Fatal(err)
}
}
}
if stats, err := s.Stats(); err != nil {
t.Fatal(err)
} else {
if got, exp := stats.NumTasks, count; got != exp {
t.Errorf("unexpected num_tasks got %d exp %d", got, exp)
}
if got, exp := stats.NumEnabledTasks, enabled; got != exp {
t.Errorf("unexpected num_enabled_tasks got %d exp %d", got, exp)
}
}
}
func TestServer_ListTasks(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
count := 10
ttype := client.StreamTask
tick := `stream
|from()
.measurement('test')
`
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
for i := 0; i < count; i++ {
id := fmt.Sprintf("testTaskID%d", i)
status := client.Disabled
if i%2 == 0 {
status = client.Enabled
}
_, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: status,
})
if err != nil {
t.Fatal(err)
}
}
tasks, err := cli.ListTasks(nil)
if err != nil {
t.Fatal(err)
}
if exp, got := count, len(tasks); exp != got {
t.Fatalf("unexpected number of tasks: exp:%d got:%d", exp, got)
}
for i, task := range tasks {
if exp, got := fmt.Sprintf("testTaskID%d", i), task.ID; exp != got {
t.Errorf("unexpected task.ID i:%d exp:%s got:%s", i, exp, got)
}
if exp, got := client.StreamTask, task.Type; exp != got {
t.Errorf("unexpected task.Type i:%d exp:%v got:%v", i, exp, got)
}
if !reflect.DeepEqual(task.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps i:%d exp:%s got:%s", i, dbrps, task.DBRPs)
}
exp := client.Disabled
if i%2 == 0 {
exp = client.Enabled
}
if got := task.Status; exp != got {
t.Errorf("unexpected task.Status i:%d exp:%v got:%v", i, exp, got)
}
if exp, got := i%2 == 0, task.Executing; exp != got {
t.Errorf("unexpected task.Executing i:%d exp:%v got:%v", i, exp, got)
}
if exp, got := true, len(task.Dot) != 0; exp != got {
t.Errorf("unexpected task.Dot i:%d exp:\n%v\ngot:\n%v\n", i, exp, got)
}
if exp, got := tick, task.TICKscript; exp != got {
t.Errorf("unexpected task.TICKscript i:%d exp:%v got:%v", i, exp, got)
}
if exp, got := "", task.Error; exp != got {
t.Errorf("unexpected task.Error i:%d exp:%v got:%v", i, exp, got)
}
}
}
func TestServer_ListTasks_Fields(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
count := 100
ttype := client.StreamTask
tick := `stream
|from()
.measurement('test')
`
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
for i := 0; i < count; i++ {
id := fmt.Sprintf("testTaskID%d", i)
_, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
}
tasks, err := cli.ListTasks(&client.ListTasksOptions{
Pattern: "testTaskID1*",
Fields: []string{"type", "status"},
Offset: 1,
Limit: 5,
})
if err != nil {
t.Fatal(err)
}
if exp, got := 5, len(tasks); exp != got {
t.Fatalf("unexpected number of tasks: exp:%d got:%d", exp, got)
}
for i, task := range tasks {
if exp, got := fmt.Sprintf("testTaskID1%d", i), task.ID; exp != got {
t.Errorf("unexpected task.ID i:%d exp:%s got:%s", i, exp, got)
}
if exp, got := client.StreamTask, task.Type; exp != got {
t.Errorf("unexpected task.Type i:%d exp:%v got:%v", i, exp, got)
}
if exp, got := client.Enabled, task.Status; exp != got {
t.Errorf("unexpected task.Status i:%d exp:%v got:%v", i, exp, got)
}
// We didn't request these fields so they should be default zero values
if exp, got := 0, len(task.DBRPs); exp != got {
t.Fatalf("unexpected dbrps i:%d exp:%d got:%d", i, exp, got)
}
if exp, got := false, task.Executing; exp != got {
t.Errorf("unexpected task.Executing i:%d exp:%v got:%v", i, exp, got)
}
if exp, got := "", task.Dot; exp != got {
t.Errorf("unexpected task.Dot i:%d exp:%v got:%v", i, exp, got)
}
if exp, got := "", task.TICKscript; exp != got {
t.Errorf("unexpected task.TICKscript i:%d exp:%v got:%v", i, exp, got)
}
if exp, got := "", task.Error; exp != got {
t.Errorf("unexpected task.Error i:%d exp:%v got:%v", i, exp, got)
}
}
}
func TestServer_CreateTemplate(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
ttype := client.StreamTask
tick := `var x = 5
stream
|from()
.measurement('test')
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
Type: ttype,
TICKscript: tick,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Template(template.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got\n%s\nexp\n%s\n", ti.TICKscript, tick)
}
dot := "digraph testTemplateID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
vars := client.Vars{"x": {Value: int64(5), Type: client.VarInt}}
if !reflect.DeepEqual(vars, ti.Vars) {
t.Fatalf("unexpected vars\ngot\n%s\nexp\n%s\n", ti.Vars, vars)
}
}
func TestServer_UpdateTemplateID(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
ttype := client.StreamTask
tick := `var x = 5
stream
|from()
.measurement('test')
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
Type: ttype,
TICKscript: tick,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Template(template.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got\n%s\nexp\n%s\n", ti.TICKscript, tick)
}
dot := "digraph testTemplateID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
vars := client.Vars{"x": {Value: int64(5), Type: client.VarInt}}
if !reflect.DeepEqual(vars, ti.Vars) {
t.Fatalf("unexpected vars\ngot\n%s\nexp\n%s\n", ti.Vars, vars)
}
newID := "newTemplateID"
template, err = cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
ID: newID,
})
if err != nil {
t.Fatal(err)
}
if got, exp := template.Link.Href, "/kapacitor/v1/templates/newTemplateID"; got != exp {
t.Fatalf("unexpected template link got %s exp %s", got, exp)
}
ti, err = cli.Template(template.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != newID {
t.Fatalf("unexpected id got %s exp %s", ti.ID, newID)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got\n%s\nexp\n%s\n", ti.TICKscript, tick)
}
dot = "digraph newTemplateID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
if !reflect.DeepEqual(vars, ti.Vars) {
t.Fatalf("unexpected vars\ngot\n%s\nexp\n%s\n", ti.Vars, vars)
}
}
func TestServer_CreateTemplateImplicitAndUpdateExplicitWithTasks(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
implicitTick := `dbrp "telegraf"."autogen"
var x = 5
stream
|from()
.measurement('test')
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
TICKscript: implicitTick,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Template(template.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.TICKscript != implicitTick {
t.Fatalf("unexpected TICKscript got\n%s\nexp\n%s\n", ti.TICKscript, implicitTick)
}
dot := "digraph testTemplateID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
vars := client.Vars{"x": {Value: int64(5), Type: client.VarInt}}
if !reflect.DeepEqual(vars, ti.Vars) {
t.Fatalf("unexpected vars\ngot\n%s\nexp\n%s\n", ti.Vars, vars)
}
implicitDBRPs := []client.DBRP{
{
Database: "telegraf",
RetentionPolicy: "autogen",
},
}
count := 1
tasks := make([]client.Task, count)
for i := 0; i < count; i++ {
task, err := cli.CreateTask(client.CreateTaskOptions{
TemplateID: template.ID,
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
tasks[i] = task
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(ti.DBRPs, implicitDBRPs) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, implicitDBRPs)
}
}
updateTick := `var x = 5
stream
|from()
.measurement('test')
`
_, err = cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
ID: id,
TICKscript: updateTick,
})
// Expects error
if err == nil {
t.Fatal(err)
}
finalTick := `dbrp "telegraf"."autogen"
dbrp "telegraf"."not_autogen"
var x = 5
stream
|from()
.measurement('test')
`
finalDBRPs := []client.DBRP{
{
Database: "telegraf",
RetentionPolicy: "autogen",
},
{
Database: "telegraf",
RetentionPolicy: "not_autogen",
},
}
template, err = cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
ID: id,
TICKscript: finalTick,
})
if err != nil {
t.Fatal(err)
}
for _, task := range tasks {
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(ti.DBRPs, finalDBRPs) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, finalDBRPs)
}
}
}
func TestServer_UpdateTemplateID_WithTasks(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
ttype := client.StreamTask
tick := `var x = 5
stream
|from()
.measurement('test')
`
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
Type: ttype,
TICKscript: tick,
})
if err != nil {
t.Fatal(err)
}
count := 100
tasks := make([]client.Task, count)
for i := 0; i < count; i++ {
task, err := cli.CreateTask(client.CreateTaskOptions{
TemplateID: template.ID,
DBRPs: dbrps,
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
tasks[i] = task
}
newID := "newTemplateID"
template, err = cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
ID: newID,
})
if err != nil {
t.Fatal(err)
}
for _, task := range tasks {
got, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if got.TemplateID != newID {
t.Errorf("unexpected task TemplateID got %s exp %s", got.TemplateID, newID)
}
if got.TICKscript != tick {
t.Errorf("unexpected task TICKscript got %s exp %s", got.TICKscript, tick)
}
}
}
func TestServer_UpdateTemplateID_Fail(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
newID := "anotherTemplateID"
ttype := client.StreamTask
tick := `var x = 5
stream
|from()
.measurement('test')
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
Type: ttype,
TICKscript: tick,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Template(template.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got\n%s\nexp\n%s\n", ti.TICKscript, tick)
}
dot := "digraph testTemplateID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
vars := client.Vars{"x": {Value: int64(5), Type: client.VarInt}}
if !reflect.DeepEqual(vars, ti.Vars) {
t.Fatalf("unexpected vars\ngot\n%s\nexp\n%s\n", ti.Vars, vars)
}
// Create conflicting template
if _, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: newID,
Type: ttype,
TICKscript: tick,
}); err != nil {
t.Fatal(err)
}
if _, err = cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
ID: newID,
}); err == nil {
t.Fatal("expected update template to fail on name conflict")
}
// Can still get old template
ti, err = cli.Template(template.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got\n%s\nexp\n%s\n", ti.TICKscript, tick)
}
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
if !reflect.DeepEqual(vars, ti.Vars) {
t.Fatalf("unexpected vars\ngot\n%s\nexp\n%s\n", ti.Vars, vars)
}
}
func TestServer_UpdateTemplateID_WithTasks_Fail(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
ttype := client.StreamTask
tick := `var x = 5
stream
|from()
.measurement('test')
`
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
Type: ttype,
TICKscript: tick,
})
if err != nil {
t.Fatal(err)
}
count := 100
tasks := make([]client.Task, count)
for i := 0; i < count; i++ {
task, err := cli.CreateTask(client.CreateTaskOptions{
TemplateID: template.ID,
DBRPs: dbrps,
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
tasks[i] = task
}
// Create conflicting template
newID := "newTemplateID"
if _, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: newID,
Type: ttype,
TICKscript: tick,
}); err != nil {
t.Fatal(err)
}
if _, err = cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
ID: newID,
TICKscript: "stream",
}); err == nil {
t.Fatal("expected update template to fail on conflicting name")
}
for _, task := range tasks {
got, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if got.TemplateID != id {
t.Errorf("unexpected task TemplateID got %s exp %s", got.TemplateID, id)
}
if got.TICKscript != tick {
t.Errorf("unexpected task TICKscript got %s exp %s", got.TICKscript, tick)
}
}
}
func TestServer_DeleteTemplate(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
ttype := client.StreamTask
tick := `stream
|from()
.measurement('test')
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
Type: ttype,
TICKscript: tick,
})
if err != nil {
t.Fatal(err)
}
err = cli.DeleteTemplate(template.Link)
if err != nil {
t.Fatal(err)
}
ti, err := cli.Template(template.Link, nil)
if err == nil {
t.Fatal("unexpected template:", ti)
}
}
func TestServer_CreateTaskFromTemplate(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTemplateID"
ttype := client.StreamTask
tick := `// Configurable measurement
var measurement = 'test'
stream
|from()
.measurement(measurement)
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: id,
Type: ttype,
TICKscript: tick,
})
if err != nil {
t.Fatal(err)
}
templateInfo, err := cli.Template(template.Link, nil)
if err != nil {
t.Fatal(err)
}
if templateInfo.Error != "" {
t.Fatal(templateInfo.Error)
}
if templateInfo.ID != id {
t.Fatalf("unexpected template.id got %s exp %s", templateInfo.ID, id)
}
if templateInfo.Type != client.StreamTask {
t.Fatalf("unexpected template.type got %v exp %v", templateInfo.Type, client.StreamTask)
}
if templateInfo.TICKscript != tick {
t.Fatalf("unexpected template.TICKscript got %s exp %s", templateInfo.TICKscript, tick)
}
dot := "digraph testTemplateID {\nstream0 -> from1;\n}"
if templateInfo.Dot != dot {
t.Fatalf("unexpected template.dot\ngot\n%s\nexp\n%s\n", templateInfo.Dot, dot)
}
expVars := client.Vars{
"measurement": {
Value: "test",
Type: client.VarString,
Description: "Configurable measurement",
},
}
if got, exp := templateInfo.Vars, expVars; !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected template vars: got %v exp %v", got, exp)
}
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
vars := client.Vars{
"measurement": {
Value: "another_measurement",
Type: client.VarString,
},
}
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: "taskid",
TemplateID: id,
DBRPs: dbrps,
Vars: vars,
})
if err != nil {
t.Fatal(err)
}
taskInfo, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if taskInfo.Error != "" {
t.Fatal(taskInfo.Error)
}
if taskInfo.ID != "taskid" {
t.Fatalf("unexpected task.id got %s exp %s", taskInfo.ID, "taskid")
}
if taskInfo.Type != client.StreamTask {
t.Fatalf("unexpected task.type got %v exp %v", taskInfo.Type, client.StreamTask)
}
if taskInfo.TICKscript != tick {
t.Fatalf("unexpected task.TICKscript got %s exp %s", taskInfo.TICKscript, tick)
}
dot = "digraph taskid {\nstream0 -> from1;\n}"
if taskInfo.Dot != dot {
t.Fatalf("unexpected task.dot\ngot\n%s\nexp\n%s\n", taskInfo.Dot, dot)
}
if taskInfo.Status != client.Disabled {
t.Fatalf("unexpected task.status got %v exp %v", taskInfo.Status, client.Disabled)
}
if !reflect.DeepEqual(taskInfo.DBRPs, dbrps) {
t.Fatalf("unexpected task.dbrps got %s exp %s", taskInfo.DBRPs, dbrps)
}
if !reflect.DeepEqual(taskInfo.Vars, vars) {
t.Fatalf("unexpected task.vars got %s exp %s", taskInfo.Vars, vars)
}
}
func TestServer_StreamTask(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count('value')
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}`
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
}
func TestServer_StreamTask_NoRP(t *testing.T) {
conf := NewConfig()
conf.DefaultRetentionPolicy = "myrp"
s := OpenServer(conf)
defer s.Close()
cli := Client(s)
id := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count('value')
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "", points, v)
exp := `{"series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}`
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
}
func TestServer_StreamTemplateTask(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
templateId := "testStreamTemplate"
taskId := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `
var field = 'nonexistent'
stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count(field)
|httpOut('count')
`
if _, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: templateId,
Type: ttype,
TICKscript: tick,
}); err != nil {
t.Fatal(err)
}
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: taskId,
TemplateID: templateId,
DBRPs: dbrps,
Status: client.Enabled,
Vars: client.Vars{
"field": {
Value: "value",
Type: client.VarString,
},
},
}); err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), taskId)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
if err := s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}`
if err := s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
}
func TestServer_StreamTemplateTask_MissingVar(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
templateId := "testStreamTemplate"
taskId := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `
var field string
stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count(field)
|httpOut('count')
`
if _, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: templateId,
Type: ttype,
TICKscript: tick,
}); err != nil {
t.Fatal(err)
}
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: taskId,
TemplateID: templateId,
DBRPs: dbrps,
Status: client.Enabled,
}); err == nil {
t.Error("expected error for missing task vars")
} else if exp, got := "invalid TICKscript: missing value for var \"field\".", err.Error(); got != exp {
t.Errorf("unexpected error message: got %s exp %s", got, exp)
}
}
func TestServer_StreamTemplateTask_AllTypes(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
templateId := "testStreamTemplate"
taskId := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `
var bool bool
var count_threshold int
var value_threshold float
var window duration
var field string
var tagMatch regex
var match lambda
var eval lambda
var groups list
var secondGroup list
stream
|from()
.measurement('test')
.where(lambda: match AND "tag" =~ tagMatch AND bool AND "value" >= value_threshold)
.groupBy(groups)
|log().prefix('FROM')
|window()
.period(window)
.every(window)
|log().prefix('WINDOW')
|count(field)
|log().prefix('COUNT')
|groupBy(secondGroup)
|sum('count')
.as('count')
|log().prefix('SUM')
|where(lambda: "count" >= count_threshold)
|log().prefix('WHERE')
|eval(eval)
.as('count')
|httpOut('count')
`
if _, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: templateId,
Type: ttype,
TICKscript: tick,
}); err != nil {
t.Fatal(err)
}
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: taskId,
TemplateID: templateId,
DBRPs: dbrps,
Status: client.Enabled,
Vars: client.Vars{
"bool": {
Value: true,
Type: client.VarBool,
},
"count_threshold": {
Value: int64(1),
Type: client.VarInt,
},
"value_threshold": {
Value: float64(1.0),
Type: client.VarFloat,
},
"window": {
Value: 10 * time.Second,
Type: client.VarDuration,
},
"field": {
Value: "value",
Type: client.VarString,
},
"tagMatch": {
Value: "^a.*",
Type: client.VarRegex,
},
"match": {
Value: `"value" == 1.0`,
Type: client.VarLambda,
},
"eval": {
Value: `"count" * 2`,
Type: client.VarLambda,
},
"groups": {
Value: []client.Var{client.Var{Type: client.VarStar}},
Type: client.VarList,
},
"secondGroup": {
Value: []client.Var{client.Var{Value: "tag", Type: client.VarString}},
Type: client.VarList,
},
},
}); err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), taskId)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
if err := s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
points := `test,tag=abc,other=a value=1 0000000000
test,tag=abc,other=b value=1 0000000000
test,tag=abc,other=a value=1 0000000001
test,tag=bbc,other=b value=1 0000000001
test,tag=abc,other=a value=1 0000000002
test,tag=abc,other=a value=0 0000000002
test,tag=abc,other=b value=1 0000000003
test,tag=abc,other=a value=1 0000000003
test,tag=abc,other=a value=1 0000000004
test,tag=abc,other=b value=1 0000000005
test,tag=abc,other=a value=1 0000000005
test,tag=bbc,other=a value=1 0000000005
test,tag=abc,other=b value=1 0000000006
test,tag=abc,other=a value=1 0000000007
test,tag=abc,other=b value=0 0000000008
test,tag=abc,other=a value=1 0000000009
test,tag=abc,other=a value=1 0000000010
test,tag=abc,other=a value=1 0000000011
test,tag=abc,other=b value=1 0000000011
test,tag=bbc,other=a value=1 0000000011
test,tag=bbc,other=b value=1 0000000011
test,tag=abc,other=a value=1 0000000021
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test","tags":{"tag":"abc"},"columns":["time","count"],"values":[["1970-01-01T00:00:10Z",24]]}]}`
if err := s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
}
func TestServer_StreamTemplateTaskFromUpdate(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
templateId := "testStreamTemplate"
taskId := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `
var field = 'nonexistent'
stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count(field)
|httpOut('count')
`
if _, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: templateId,
Type: ttype,
TICKscript: tick,
}); err != nil {
t.Fatal(err)
}
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: taskId,
TemplateID: templateId,
DBRPs: dbrps,
Status: client.Disabled,
Vars: client.Vars{
"field": {
Value: "value",
Type: client.VarString,
},
},
})
if err != nil {
t.Fatal(err)
}
if _, err := cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), taskId)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
if err := s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}`
if err := s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
}
func TestServer_StreamTemplateTask_UpdateTemplate(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
templateId := "testStreamTemplate"
taskId := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tickWrong := `
stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count('wrong')
|httpOut('count')
`
tickCorrect := `
var field string
stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count(field)
|httpOut('count')
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: templateId,
Type: ttype,
TICKscript: tickWrong,
})
if err != nil {
t.Fatal(err)
}
if _, err = cli.CreateTask(client.CreateTaskOptions{
ID: taskId,
TemplateID: templateId,
DBRPs: dbrps,
Status: client.Enabled,
Vars: client.Vars{
"field": {
Value: "value",
Type: client.VarString,
},
},
}); err != nil {
t.Fatal(err)
}
if _, err := cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
TICKscript: tickCorrect,
}); err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), taskId)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
if err := s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}`
if err := s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
}
func TestServer_StreamTemplateTask_UpdateTemplate_Rollback(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
templateId := "testStreamTemplate"
taskId := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tickCorrect := `
var field string
stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count(field)
|httpOut('count')
`
tickNewVar := `
var field string
var period duration
stream
|from()
.measurement('test')
|window()
.period(period)
.every(period)
|count(field)
|httpOut('count')
`
template, err := cli.CreateTemplate(client.CreateTemplateOptions{
ID: templateId,
Type: ttype,
TICKscript: tickCorrect,
})
if err != nil {
t.Fatal(err)
}
// Create several tasks
count := 5
tasks := make([]client.Task, count)
for i := 0; i < count; i++ {
if task, err := cli.CreateTask(client.CreateTaskOptions{
ID: fmt.Sprintf("%s-%d", taskId, i),
TemplateID: templateId,
DBRPs: dbrps,
Status: client.Enabled,
Vars: client.Vars{
"field": {
Value: "value",
Type: client.VarString,
},
},
}); err != nil {
t.Fatal(err)
} else {
tasks[i] = task
}
}
if _, err := cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
TICKscript: tickNewVar,
}); err == nil {
t.Error("expected error for breaking template update, got nil")
} else if got, exp := err.Error(), `error reloading associated task testStreamTask-0: missing value for var "period".`; exp != got {
t.Errorf("unexpected error for breaking template update, got %s exp %s", got, exp)
}
// Get all tasks and make sure their TICKscript has the original value
for _, task := range tasks {
if gotTask, err := cli.Task(task.Link, &client.TaskOptions{ScriptFormat: "raw"}); err != nil {
t.Fatal(err)
} else if got, exp := gotTask.TICKscript, tickCorrect; got != exp {
t.Errorf("unexpected task TICKscript:\ngot\n%s\nexp\n%s\n", got, exp)
}
}
// Update all tasks with new var
for _, task := range tasks {
if _, err := cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Vars: client.Vars{
"field": {
Value: "value",
Type: client.VarString,
},
"period": {
Value: 10 * time.Second,
Type: client.VarDuration,
},
},
}); err != nil {
t.Fatal(err)
}
}
// Now update template should succeed since the tasks are updated too.
if _, err := cli.UpdateTemplate(template.Link, client.UpdateTemplateOptions{
TICKscript: tickNewVar,
}); err != nil {
t.Fatal(err)
}
for _, task := range tasks {
taskId := task.ID
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), taskId)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
if err := s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
for _, task := range tasks {
taskId := task.ID
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), taskId)
exp := `{"series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}`
if err := s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5); err != nil {
t.Error(err)
}
}
}
func TestServer_UpdateTaskID(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
newID := "newTaskID"
task, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
ID: newID,
})
if err != nil {
t.Fatal(err)
}
if got, exp := task.Link.Href, "/kapacitor/v1/tasks/newTaskID"; got != exp {
t.Fatalf("unexpected task link got %s exp %s", got, exp)
}
ti, err = cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != newID {
t.Fatalf("unexpected id got %s exp %s", ti.ID, newID)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot = "digraph newTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_UpdateTaskID_Fail(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
newID := "anotherTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
// Create conflicting task
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: newID,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
}); err != nil {
t.Fatal(err)
}
if _, err := cli.UpdateTask(task.Link, client.UpdateTaskOptions{
ID: newID,
}); err == nil {
t.Fatal("expected error on name conflict")
}
// Can still get old task
ti, err = cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_UpdateTaskID_Enabled(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Enabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Enabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
if !ti.Executing {
t.Fatal("expected task to be executing")
}
newID := "newTaskID"
task, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
ID: newID,
})
if err != nil {
t.Fatal(err)
}
if got, exp := task.Link.Href, "/kapacitor/v1/tasks/newTaskID"; got != exp {
t.Fatalf("unexpected task link got %s exp %s", got, exp)
}
ti, err = cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != newID {
t.Fatalf("unexpected id got %s exp %s", ti.ID, newID)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Enabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Enabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
if !ti.Executing {
t.Fatal("expected task to be executing")
}
}
func TestServer_StreamTask_AllMeasurements(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `stream
|from()
|window()
.period(10s)
.every(10s)
|count('value')
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
points := `test0 value=1 0000000000
test1 value=1 0000000001
test0 value=1 0000000001
test1 value=1 0000000002
test0 value=1 0000000002
test1 value=1 0000000003
test0 value=1 0000000003
test1 value=1 0000000004
test0 value=1 0000000005
test1 value=1 0000000005
test0 value=1 0000000005
test1 value=1 0000000006
test0 value=1 0000000007
test1 value=1 0000000008
test0 value=1 0000000009
test1 value=1 0000000010
test0 value=1 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test0","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}`
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
}
func TestServer_BatchTask(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
count := 0
stopTimeC := make(chan time.Time, 1)
db := NewInfluxDB(func(q string) *iclient.Response {
stmt, err := influxql.ParseStatement(q)
if err != nil {
return &iclient.Response{Err: err.Error()}
}
slct, ok := stmt.(*influxql.SelectStatement)
if !ok {
return nil
}
cond, ok := slct.Condition.(*influxql.BinaryExpr)
if !ok {
return &iclient.Response{Err: "expected select condition to be binary expression"}
}
stopTimeExpr, ok := cond.RHS.(*influxql.BinaryExpr)
if !ok {
return &iclient.Response{Err: "expected select condition rhs to be binary expression"}
}
stopTL, ok := stopTimeExpr.RHS.(*influxql.StringLiteral)
if !ok {
return &iclient.Response{Err: "expected select condition rhs to be string literal"}
}
count++
switch count {
case 1:
stopTime, err := time.Parse(time.RFC3339Nano, stopTL.Val)
if err != nil {
return &iclient.Response{Err: err.Error()}
}
stopTimeC <- stopTime
return &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
stopTime.Add(-2 * time.Millisecond).Format(time.RFC3339Nano),
1.0,
},
{
stopTime.Add(-1 * time.Millisecond).Format(time.RFC3339Nano),
1.0,
},
},
}},
}},
}
default:
return &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{},
}},
}},
}
}
})
c.InfluxDB[0].URLs = []string{db.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testBatchTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `batch
|query('SELECT value from mydb.myrp.cpu')
.period(5ms)
.every(5ms)
.align()
|count('value')
|where(lambda: "count" == 2)
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id)
timeout := time.NewTicker(100 * time.Millisecond)
defer timeout.Stop()
select {
case <-timeout.C:
t.Fatal("timedout waiting for query")
case stopTime := <-stopTimeC:
exp := fmt.Sprintf(`{"series":[{"name":"cpu","columns":["time","count"],"values":[["%s",2]]}]}`, stopTime.Local().Format(time.RFC3339Nano))
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
}
}
func TestServer_BatchTask_InfluxDBConfigUpdate(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
count := 0
stopTimeC := make(chan time.Time, 1)
badCount := 0
dbBad := NewInfluxDB(func(q string) *iclient.Response {
badCount++
// Return empty results
return &iclient.Response{
Results: []iclient.Result{},
}
})
defer dbBad.Close()
db := NewInfluxDB(func(q string) *iclient.Response {
stmt, err := influxql.ParseStatement(q)
if err != nil {
return &iclient.Response{Err: err.Error()}
}
slct, ok := stmt.(*influxql.SelectStatement)
if !ok {
return nil
}
cond, ok := slct.Condition.(*influxql.BinaryExpr)
if !ok {
return &iclient.Response{Err: "expected select condition to be binary expression"}
}
stopTimeExpr, ok := cond.RHS.(*influxql.BinaryExpr)
if !ok {
return &iclient.Response{Err: "expected select condition rhs to be binary expression"}
}
stopTL, ok := stopTimeExpr.RHS.(*influxql.StringLiteral)
if !ok {
return &iclient.Response{Err: "expected select condition rhs to be string literal"}
}
count++
switch count {
case 1:
stopTime, err := time.Parse(time.RFC3339Nano, stopTL.Val)
if err != nil {
return &iclient.Response{Err: err.Error()}
}
stopTimeC <- stopTime
return &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
stopTime.Add(-2 * time.Millisecond).Format(time.RFC3339Nano),
1.0,
},
{
stopTime.Add(-1 * time.Millisecond).Format(time.RFC3339Nano),
1.0,
},
},
}},
}},
}
default:
return &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{},
}},
}},
}
}
})
defer db.Close()
// Set bad URL first
c.InfluxDB[0].URLs = []string{dbBad.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testBatchTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `batch
|query('SELECT value from mydb.myrp.cpu')
.period(5ms)
.every(5ms)
.align()
|count('value')
|where(lambda: "count" == 2)
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
// Update InfluxDB config, while task is running
influxdbDefault := cli.ConfigElementLink("influxdb", "default")
if err := cli.ConfigUpdate(influxdbDefault, client.ConfigUpdateAction{
Set: map[string]interface{}{
"urls": []string{db.URL()},
},
}); err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id)
timeout := time.NewTicker(100 * time.Millisecond)
defer timeout.Stop()
select {
case <-timeout.C:
t.Fatal("timedout waiting for query")
case stopTime := <-stopTimeC:
exp := fmt.Sprintf(`{"series":[{"name":"cpu","columns":["time","count"],"values":[["%s",2]]}]}`, stopTime.Local().Format(time.RFC3339Nano))
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
}
if badCount == 0 {
t.Error("expected bad influxdb to be queried at least once")
}
}
func TestServer_InvalidBatchTask(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
db := NewInfluxDB(func(q string) *iclient.Response {
return nil
})
c.InfluxDB[0].URLs = []string{db.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testInvalidBatchTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `batch
|query(' SELECT value from unknowndb.unknownrp.cpu ')
.period(5ms)
.every(5ms)
|count('value')
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
expErr := `batch query is not allowed to request data from "unknowndb"."unknownrp"`
if err != nil && err.Error() != expErr {
t.Fatalf("unexpected err: got %v exp %s", err, expErr)
}
err = cli.DeleteTask(task.Link)
if err != nil {
t.Fatal(err)
}
}
func TestServer_RecordReplayStream(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tmpDir, err := ioutil.TempDir("", "testStreamTaskRecording")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tick := `stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count('value')
|alert()
.id('test-count')
.message('{{ .ID }} got: {{ index .Fields "count" }}')
.crit(lambda: TRUE)
.log('` + tmpDir + `/alert.log')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
recording, err := cli.RecordStream(client.RecordStreamOptions{
ID: "recordingid",
Task: task.ID,
Stop: time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC),
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/recordings/recordingid", recording.Link.Href; exp != got {
t.Errorf("unexpected recording.Link.Href got %s exp %s", got, exp)
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
test value=1 0000000012
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
retry := 0
for recording.Status == client.Running {
time.Sleep(100 * time.Millisecond)
recording, err = cli.Recording(recording.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to finish recording")
}
}
if recording.Status != client.Finished || recording.Error != "" {
t.Errorf("recording failed: %s", recording.Error)
}
replay, err := cli.CreateReplay(client.CreateReplayOptions{
ID: "replayid",
Task: id,
Recording: recording.ID,
Clock: client.Fast,
RecordingTime: true,
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/replays/replayid", replay.Link.Href; exp != got {
t.Errorf("unexpected replay.Link.Href got %s exp %s", got, exp)
}
if exp, got := id, replay.Task; exp != got {
t.Errorf("unexpected replay.Task got %s exp %s", got, exp)
}
retry = 0
for replay.Status == client.Running {
time.Sleep(100 * time.Millisecond)
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to finish replay")
}
}
if replay.Status != client.Finished || replay.Error != "" {
t.Errorf("replay failed: %s", replay.Error)
}
f, err := os.Open(filepath.Join(tmpDir, "alert.log"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
type response struct {
ID string `json:"id"`
Message string `json:"message"`
Time time.Time `json:"time"`
Level string `json:"level"`
Data influxql.Result `json:"data"`
}
exp := response{
ID: "test-count",
Message: "test-count got: 15",
Time: time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "test",
Columns: []string{"time", "count"},
Values: [][]interface{}{
{
time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC).Format(time.RFC3339Nano),
15.0,
},
},
},
},
},
}
got := response{}
d := json.NewDecoder(f)
d.Decode(&got)
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got, exp)
}
recordings, err := cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(recordings); exp != got {
t.Fatalf("unexpected recordings list:\ngot %v\nexp %v\nrecordings %v", got, exp, recordings)
}
err = cli.DeleteRecording(recordings[0].Link)
if err != nil {
t.Error(err)
}
recordings, err = cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(recordings); exp != got {
t.Errorf("unexpected recordings list after delete:\ngot %v\nexp %v\nrecordings %v", got, exp, recordings)
}
replays, err := cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(replays); exp != got {
t.Fatalf("unexpected replays list:\ngot %v\nexp %v\nreplays %v", got, exp, replays)
}
err = cli.DeleteReplay(replays[0].Link)
if err != nil {
t.Error(err)
}
replays, err = cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(replays); exp != got {
t.Errorf("unexpected replays list after delete:\ngot %v\nexp %v\nreplays %v", got, exp, replays)
}
}
func TestServer_RecordReplayStreamWithPost(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
id := "testStreamTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tmpDir, err := ioutil.TempDir("", "testStreamTaskRecording")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tick := `stream
|from()
.measurement('test')
|window()
.period(10s)
.every(10s)
|count('value')
|alert()
.id('test-count')
.message('{{ .ID }} got: {{ index .Fields "count" }}')
.crit(lambda: TRUE)
.post('http://localhost:8080')
.log('` + tmpDir + `/alert.log')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
recording, err := cli.RecordStream(client.RecordStreamOptions{
ID: "recordingid",
Task: task.ID,
Stop: time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC),
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/recordings/recordingid", recording.Link.Href; exp != got {
t.Errorf("unexpected recording.Link.Href got %s exp %s", got, exp)
}
points := `test value=1 0000000000
test value=1 0000000001
test value=1 0000000001
test value=1 0000000002
test value=1 0000000002
test value=1 0000000003
test value=1 0000000003
test value=1 0000000004
test value=1 0000000005
test value=1 0000000005
test value=1 0000000005
test value=1 0000000006
test value=1 0000000007
test value=1 0000000008
test value=1 0000000009
test value=1 0000000010
test value=1 0000000011
test value=1 0000000012
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
retry := 0
for recording.Status == client.Running {
time.Sleep(100 * time.Millisecond)
recording, err = cli.Recording(recording.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to finish recording")
}
}
if recording.Status != client.Finished || recording.Error != "" {
t.Errorf("recording failed: %s", recording.Error)
}
replay, err := cli.CreateReplay(client.CreateReplayOptions{
ID: "replayid",
Task: id,
Recording: recording.ID,
Clock: client.Fast,
RecordingTime: true,
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/replays/replayid", replay.Link.Href; exp != got {
t.Errorf("unexpected replay.Link.Href got %s exp %s", got, exp)
}
if exp, got := id, replay.Task; exp != got {
t.Errorf("unexpected replay.Task got %s exp %s", got, exp)
}
retry = 0
for replay.Status == client.Running {
time.Sleep(100 * time.Millisecond)
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to finish replay")
}
}
if replay.Status != client.Finished || replay.Error != "" {
t.Errorf("replay failed: %s", replay.Error)
}
f, err := os.Open(filepath.Join(tmpDir, "alert.log"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
type response struct {
ID string `json:"id"`
Message string `json:"message"`
Time time.Time `json:"time"`
Level string `json:"level"`
Data influxql.Result `json:"data"`
}
exp := response{
ID: "test-count",
Message: "test-count got: 15",
Time: time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "test",
Columns: []string{"time", "count"},
Values: [][]interface{}{
{
time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC).Format(time.RFC3339Nano),
15.0,
},
},
},
},
},
}
got := response{}
d := json.NewDecoder(f)
d.Decode(&got)
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got, exp)
}
recordings, err := cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(recordings); exp != got {
t.Fatalf("unexpected recordings list:\ngot %v\nexp %v\nrecordings %v", got, exp, recordings)
}
err = cli.DeleteRecording(recordings[0].Link)
if err != nil {
t.Error(err)
}
recordings, err = cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(recordings); exp != got {
t.Errorf("unexpected recordings list after delete:\ngot %v\nexp %v\nrecordings %v", got, exp, recordings)
}
replays, err := cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(replays); exp != got {
t.Fatalf("unexpected replays list:\ngot %v\nexp %v\nreplays %v", got, exp, replays)
}
err = cli.DeleteReplay(replays[0].Link)
if err != nil {
t.Error(err)
}
replays, err = cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(replays); exp != got {
t.Errorf("unexpected replays list after delete:\ngot %v\nexp %v\nreplays %v", got, exp, replays)
}
}
func TestServer_RecordReplayBatch(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
value := 0
db := NewInfluxDB(func(q string) *iclient.Response {
if len(q) > 6 && q[:6] == "SELECT" {
r := &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, value, 0, time.UTC).Format(time.RFC3339Nano),
float64(value),
},
{
time.Date(1971, 1, 1, 0, 0, value+1, 0, time.UTC).Format(time.RFC3339Nano),
float64(value + 1),
},
},
}},
}},
}
value += 2
return r
}
return nil
})
c.InfluxDB[0].URLs = []string{db.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testBatchTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tmpDir, err := ioutil.TempDir("", "testBatchTaskRecording")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tick := `batch
|query('SELECT value from mydb.myrp.cpu')
.period(2s)
.every(2s)
|alert()
.id('test-batch')
.message('{{ .ID }} got: {{ index .Fields "value" }}')
.crit(lambda: "value" > 2.0)
.log('` + tmpDir + `/alert.log')
`
_, err = cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
recording, err := cli.RecordBatch(client.RecordBatchOptions{
ID: "recordingid",
Task: id,
Start: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC),
Stop: time.Date(1971, 1, 1, 0, 0, 6, 0, time.UTC),
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/recordings/recordingid", recording.Link.Href; exp != got {
t.Errorf("unexpected recording.Link.Href got %s exp %s", got, exp)
}
// Wait for recording to finish.
retry := 0
for recording.Status == client.Running {
time.Sleep(100 * time.Millisecond)
recording, err = cli.Recording(recording.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perfom recording")
}
}
replay, err := cli.CreateReplay(client.CreateReplayOptions{
Task: id,
Recording: recording.ID,
Clock: client.Fast,
RecordingTime: true,
})
if err != nil {
t.Fatal(err)
}
if exp, got := id, replay.Task; exp != got {
t.Errorf("unexpected replay.Task got %s exp %s", got, exp)
}
// Wait for replay to finish.
retry = 0
for replay.Status == client.Running {
time.Sleep(100 * time.Millisecond)
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perform replay")
}
}
f, err := os.Open(filepath.Join(tmpDir, "alert.log"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
type response struct {
ID string `json:"id"`
Message string `json:"message"`
Time time.Time `json:"time"`
Level string `json:"level"`
Data influxql.Result `json:"data"`
}
exp := []response{
{
ID: "test-batch",
Message: "test-batch got: 3",
Time: time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano),
2.0,
},
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano),
3.0,
},
},
},
},
},
},
{
ID: "test-batch",
Message: "test-batch got: 4",
Time: time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano),
4.0,
},
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano),
5.0,
},
},
},
},
},
},
}
dec := json.NewDecoder(f)
got := make([]response, 0)
for dec.More() {
g := response{}
dec.Decode(&g)
got = append(got, g)
}
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got, exp)
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[0].Data.Series[0], exp[0].Data.Series[0])
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[1].Data.Series[0], exp[1].Data.Series[0])
}
recordings, err := cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(recordings); exp != got {
t.Fatalf("unexpected recordings list:\ngot %v\nexp %v", got, exp)
}
err = cli.DeleteRecording(recordings[0].Link)
if err != nil {
t.Error(err)
}
recordings, err = cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(recordings); exp != got {
t.Errorf("unexpected recordings list:\ngot %v\nexp %v", got, exp)
}
replays, err := cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(replays); exp != got {
t.Fatalf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
err = cli.DeleteReplay(replays[0].Link)
if err != nil {
t.Error(err)
}
replays, err = cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(replays); exp != got {
t.Errorf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
}
func TestServer_ReplayBatch(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
value := 0
db := NewInfluxDB(func(q string) *iclient.Response {
if len(q) > 6 && q[:6] == "SELECT" {
r := &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, value, 0, time.UTC).Format(time.RFC3339Nano),
float64(value),
},
{
time.Date(1971, 1, 1, 0, 0, value+1, 0, time.UTC).Format(time.RFC3339Nano),
float64(value + 1),
},
},
}},
}},
}
value += 2
return r
}
return nil
})
c.InfluxDB[0].URLs = []string{db.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testBatchTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tmpDir, err := ioutil.TempDir("", "testBatchTaskRecording")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tick := `batch
|query('SELECT value from mydb.myrp.cpu')
.period(2s)
.every(2s)
|alert()
.id('test-batch')
.message('{{ .ID }} got: {{ index .Fields "value" }}')
.crit(lambda: "value" > 2.0)
.log('` + tmpDir + `/alert.log')
`
_, err = cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
replay, err := cli.ReplayBatch(client.ReplayBatchOptions{
ID: "replayid",
Task: id,
Start: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC),
Stop: time.Date(1971, 1, 1, 0, 0, 6, 0, time.UTC),
Clock: client.Fast,
RecordingTime: true,
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/replays/replayid", replay.Link.Href; exp != got {
t.Errorf("unexpected replay.Link.Href got %s exp %s", got, exp)
}
// Wait for replay to finish.
retry := 0
for replay.Status == client.Running {
time.Sleep(100 * time.Millisecond)
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perfom replay")
}
}
f, err := os.Open(filepath.Join(tmpDir, "alert.log"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
type response struct {
ID string `json:"id"`
Message string `json:"message"`
Time time.Time `json:"time"`
Level string `json:"level"`
Data influxql.Result `json:"data"`
}
exp := []response{
{
ID: "test-batch",
Message: "test-batch got: 3",
Time: time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano),
2.0,
},
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano),
3.0,
},
},
},
},
},
},
{
ID: "test-batch",
Message: "test-batch got: 4",
Time: time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano),
4.0,
},
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano),
5.0,
},
},
},
},
},
},
}
dec := json.NewDecoder(f)
got := make([]response, 0)
for dec.More() {
g := response{}
dec.Decode(&g)
got = append(got, g)
}
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got, exp)
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[0].Data.Series[0], exp[0].Data.Series[0])
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[1].Data.Series[0], exp[1].Data.Series[0])
}
recordings, err := cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(recordings); exp != got {
t.Fatalf("unexpected recordings list:\ngot %v\nexp %v", got, exp)
}
replays, err := cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(replays); exp != got {
t.Fatalf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
err = cli.DeleteReplay(replays[0].Link)
if err != nil {
t.Error(err)
}
replays, err = cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(replays); exp != got {
t.Errorf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
}
func TestServer_RecordReplayQuery(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
db := NewInfluxDB(func(q string) *iclient.Response {
if len(q) > 6 && q[:6] == "SELECT" {
r := &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC).Format(time.RFC3339Nano),
0.0,
},
{
time.Date(1971, 1, 1, 0, 0, 1, 0, time.UTC).Format(time.RFC3339Nano),
1.0,
},
},
},
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano),
2.0,
},
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano),
3.0,
},
},
},
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano),
4.0,
},
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano),
5.0,
},
},
},
},
}},
}
return r
}
return nil
})
c.InfluxDB[0].URLs = []string{db.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testBatchTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tmpDir, err := ioutil.TempDir("", "testBatchTaskRecording")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tick := `batch
|query('SELECT value from mydb.myrp.cpu')
.period(2s)
.every(2s)
|alert()
.id('test-batch')
.message('{{ .ID }} got: {{ index .Fields "value" }}')
.crit(lambda: "value" > 2.0)
.log('` + tmpDir + `/alert.log')
`
_, err = cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
recording, err := cli.RecordQuery(client.RecordQueryOptions{
ID: "recordingid",
Query: "SELECT value from mydb.myrp.cpu",
Type: client.BatchTask,
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/recordings/recordingid", recording.Link.Href; exp != got {
t.Errorf("unexpected recording.Link.Href got %s exp %s", got, exp)
}
// Wait for recording to finish.
retry := 0
for recording.Status == client.Running {
time.Sleep(100 * time.Millisecond)
recording, err = cli.Recording(recording.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perfom recording")
}
}
replay, err := cli.CreateReplay(client.CreateReplayOptions{
Task: id,
Recording: recording.ID,
Clock: client.Fast,
RecordingTime: true,
})
if err != nil {
t.Fatal(err)
}
if exp, got := id, replay.Task; exp != got {
t.Errorf("unexpected replay.Task got %s exp %s", got, exp)
}
// Wait for replay to finish.
retry = 0
for replay.Status == client.Running {
time.Sleep(100 * time.Millisecond)
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perfom replay")
}
}
f, err := os.Open(filepath.Join(tmpDir, "alert.log"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
type response struct {
ID string `json:"id"`
Message string `json:"message"`
Time time.Time `json:"time"`
Level string `json:"level"`
Data influxql.Result `json:"data"`
}
exp := []response{
{
ID: "test-batch",
Message: "test-batch got: 3",
Time: time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano),
2.0,
},
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano),
3.0,
},
},
},
},
},
},
{
ID: "test-batch",
Message: "test-batch got: 4",
Time: time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano),
4.0,
},
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano),
5.0,
},
},
},
},
},
},
}
dec := json.NewDecoder(f)
got := make([]response, 0)
for dec.More() {
g := response{}
dec.Decode(&g)
got = append(got, g)
}
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got, exp)
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[0].Data.Series[0], exp[0].Data.Series[0])
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[1].Data.Series[0], exp[1].Data.Series[0])
}
// ------------
// Test List/Delete Recordings/Replays
recordings, err := cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(recordings); exp != got {
t.Fatalf("unexpected recordings list:\ngot %v\nexp %v", got, exp)
}
// Test List Recordings via direct default URL
resp, err := http.Get(s.URL() + "/recordings")
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if exp, got := http.StatusOK, resp.StatusCode; exp != got {
t.Errorf("unexpected status code, got %d exp %d", got, exp)
}
// Response type
type recResponse struct {
Recordings []client.Recording `json:"recordings"`
}
dec = json.NewDecoder(resp.Body)
recR := recResponse{}
dec.Decode(&recR)
if exp, got := 1, len(recR.Recordings); exp != got {
t.Fatalf("unexpected recordings count, got %d exp %d", got, exp)
}
err = cli.DeleteRecording(recordings[0].Link)
if err != nil {
t.Error(err)
}
recordings, err = cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(recordings); exp != got {
t.Errorf("unexpected recordings list:\ngot %v\nexp %v", got, exp)
}
replays, err := cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(replays); exp != got {
t.Fatalf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
// Test List Replays via direct default URL
resp, err = http.Get(s.URL() + "/replays")
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if exp, got := http.StatusOK, resp.StatusCode; exp != got {
t.Errorf("unexpected status code, got %d exp %d", got, exp)
}
// Response type
type repResponse struct {
Replays []client.Replay `json:"replays"`
}
dec = json.NewDecoder(resp.Body)
repR := repResponse{}
dec.Decode(&repR)
if exp, got := 1, len(repR.Replays); exp != got {
t.Fatalf("unexpected replays count, got %d exp %d", got, exp)
}
err = cli.DeleteReplay(replays[0].Link)
if err != nil {
t.Error(err)
}
replays, err = cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(replays); exp != got {
t.Errorf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
}
func TestServer_ReplayQuery(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
db := NewInfluxDB(func(q string) *iclient.Response {
if len(q) > 6 && q[:6] == "SELECT" {
r := &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC).Format(time.RFC3339Nano),
0.0,
},
{
time.Date(1971, 1, 1, 0, 0, 1, 0, time.UTC).Format(time.RFC3339Nano),
1.0,
},
},
},
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano),
2.0,
},
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano),
3.0,
},
},
},
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano),
4.0,
},
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano),
5.0,
},
},
},
},
}},
}
return r
}
return nil
})
c.InfluxDB[0].URLs = []string{db.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testBatchTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tmpDir, err := ioutil.TempDir("", "testBatchTaskRecording")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tick := `batch
|query('SELECT value from mydb.myrp.cpu')
.period(2s)
.every(2s)
|alert()
.id('test-batch')
.message('{{ .ID }} got: {{ index .Fields "value" }}')
.crit(lambda: "value" > 2.0)
.log('` + tmpDir + `/alert.log')
`
_, err = cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
replay, err := cli.ReplayQuery(client.ReplayQueryOptions{
ID: "replayid",
Query: "SELECT value from mydb.myrp.cpu",
Task: id,
Clock: client.Fast,
RecordingTime: true,
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/replays/replayid", replay.Link.Href; exp != got {
t.Errorf("unexpected replay.Link.Href got %s exp %s", got, exp)
}
// Wait for replay to finish.
retry := 0
for replay.Status == client.Running {
time.Sleep(100 * time.Millisecond)
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perfom replay")
}
}
f, err := os.Open(filepath.Join(tmpDir, "alert.log"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
type response struct {
ID string `json:"id"`
Message string `json:"message"`
Time time.Time `json:"time"`
Level string `json:"level"`
Data influxql.Result `json:"data"`
}
exp := []response{
{
ID: "test-batch",
Message: "test-batch got: 3",
Time: time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano),
2.0,
},
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano),
3.0,
},
},
},
},
},
},
{
ID: "test-batch",
Message: "test-batch got: 4",
Time: time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC),
Level: "CRITICAL",
Data: influxql.Result{
Series: imodels.Rows{
{
Name: "cpu",
Columns: []string{"time", "value"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano),
4.0,
},
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano),
5.0,
},
},
},
},
},
},
}
dec := json.NewDecoder(f)
got := make([]response, 0)
for dec.More() {
g := response{}
dec.Decode(&g)
got = append(got, g)
}
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got, exp)
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[0].Data.Series[0], exp[0].Data.Series[0])
t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[1].Data.Series[0], exp[1].Data.Series[0])
}
recordings, err := cli.ListRecordings(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(recordings); exp != got {
t.Fatalf("unexpected recordings list:\ngot %v\nexp %v", got, exp)
}
replays, err := cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 1, len(replays); exp != got {
t.Fatalf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
err = cli.DeleteReplay(replays[0].Link)
if err != nil {
t.Error(err)
}
replays, err = cli.ListReplays(nil)
if err != nil {
t.Error(err)
}
if exp, got := 0, len(replays); exp != got {
t.Errorf("unexpected replays list:\ngot %v\nexp %v", got, exp)
}
}
// Test for recording and replaying a stream query where data has missing fields and tags.
func TestServer_RecordReplayQuery_Missing(t *testing.T) {
c := NewConfig()
c.InfluxDB[0].Enabled = true
db := NewInfluxDB(func(q string) *iclient.Response {
if len(q) > 6 && q[:6] == "SELECT" {
r := &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{
{
Name: "m",
Tags: map[string]string{"t1": "", "t2": ""},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 1, 0, time.UTC).Format(time.RFC3339Nano),
1.0,
nil,
},
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano),
nil,
2.0,
},
{
time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC).Format(time.RFC3339Nano),
nil,
10.0,
},
{
time.Date(1971, 1, 1, 0, 0, 11, 0, time.UTC).Format(time.RFC3339Nano),
11.0,
nil,
},
},
},
{
Name: "m",
Tags: map[string]string{"t1": "", "t2": "4"},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano),
4.0,
4.0,
},
},
},
{
Name: "m",
Tags: map[string]string{"t1": "", "t2": "7"},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 7, 0, time.UTC).Format(time.RFC3339Nano),
nil,
7.0,
},
},
},
{
Name: "m",
Tags: map[string]string{"t1": "3", "t2": ""},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano),
3.0,
3.0,
},
},
},
{
Name: "m",
Tags: map[string]string{"t1": "5", "t2": ""},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano),
5.0,
5.0,
},
},
},
{
Name: "m",
Tags: map[string]string{"t1": "6", "t2": ""},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 6, 0, time.UTC).Format(time.RFC3339Nano),
nil,
6.0,
},
},
},
{
Name: "m",
Tags: map[string]string{"t1": "8", "t2": ""},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 8, 0, time.UTC).Format(time.RFC3339Nano),
nil,
8.0,
},
},
},
{
Name: "m",
Tags: map[string]string{"t1": "9", "t2": ""},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 9, 0, time.UTC).Format(time.RFC3339Nano),
nil,
9.0,
},
},
},
},
}},
}
return r
}
return nil
})
c.InfluxDB[0].URLs = []string{db.URL()}
s := OpenServer(c)
defer s.Close()
cli := Client(s)
id := "testStreamQueryRecordReplay"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
// setup temp dir for alert.log
tmpDir, err := ioutil.TempDir("", "testStreamTaskRecordingReplay")
if err != nil {
t.Fatal(err)
}
//defer os.RemoveAll(tmpDir)
tick := `stream
|from()
.measurement('m')
|log()
|alert()
.id('test-stream-query')
.crit(lambda: TRUE)
.details('')
.log('` + tmpDir + `/alert.log')
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
}); err != nil {
t.Fatal(err)
}
recording, err := cli.RecordQuery(client.RecordQueryOptions{
ID: "recordingid",
Query: "SELECT * FROM mydb.myrp.m",
Type: client.StreamTask,
})
if err != nil {
t.Fatal(err)
}
if exp, got := "/kapacitor/v1/recordings/recordingid", recording.Link.Href; exp != got {
t.Errorf("unexpected recording.Link.Href got %s exp %s", got, exp)
}
// Wait for recording to finish.
retry := 0
for recording.Status == client.Running {
time.Sleep(100 * time.Millisecond)
recording, err = cli.Recording(recording.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perfom recording")
}
}
replay, err := cli.CreateReplay(client.CreateReplayOptions{
Task: id,
Recording: recording.ID,
Clock: client.Fast,
RecordingTime: true,
})
if err != nil {
t.Fatal(err)
}
if exp, got := id, replay.Task; exp != got {
t.Errorf("unexpected replay.Task got %s exp %s", got, exp)
}
// Wait for replay to finish.
retry = 0
for replay.Status == client.Running {
time.Sleep(100 * time.Millisecond)
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
retry++
if retry > 10 {
t.Fatal("failed to perfom replay")
}
}
// Validate we got the data in the alert.log
f, err := os.Open(filepath.Join(tmpDir, "alert.log"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
exp := []alert.Data{
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 1, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.OK,
Duration: 0 * time.Second,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Columns: []string{"time", "a"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 1, 0, time.UTC),
1.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 1 * time.Second,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Columns: []string{"time", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC),
2.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 2 * time.Second,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Tags: map[string]string{"t1": "3"},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC),
3.0,
3.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 3 * time.Second,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Tags: map[string]string{"t2": "4"},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC),
4.0,
4.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 4 * time.Second,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Tags: map[string]string{"t1": "5"},
Columns: []string{"time", "a", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC),
5.0,
5.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 6, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 5 * time.Second,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Tags: map[string]string{"t1": "6"},
Columns: []string{"time", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 6, 0, time.UTC),
6.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 7, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 6 * time.Second,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Tags: map[string]string{"t2": "7"},
Columns: []string{"time", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 7, 0, time.UTC),
7.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 8, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 7 * time.Second,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Tags: map[string]string{"t1": "8"},
Columns: []string{"time", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 8, 0, time.UTC),
8.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 9, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 8 * time.Second,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Tags: map[string]string{"t1": "9"},
Columns: []string{"time", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 9, 0, time.UTC),
9.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 9 * time.Second,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Columns: []string{"time", "b"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC),
10.0,
},
},
},
},
},
},
{
ID: "test-stream-query",
Message: "test-stream-query is CRITICAL",
Time: time.Date(1971, 1, 1, 0, 0, 11, 0, time.UTC),
Level: alert.Critical,
PreviousLevel: alert.Critical,
Duration: 10 * time.Second,
Data: models.Result{
Series: models.Rows{
{
Name: "m",
Columns: []string{"time", "a"},
Values: [][]interface{}{
{
time.Date(1971, 1, 1, 0, 0, 11, 0, time.UTC),
11.0,
},
},
},
},
},
},
}
dec := json.NewDecoder(f)
var got []alert.Data
for dec.More() {
g := alert.Data{}
dec.Decode(&g)
got = append(got, g)
}
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected alert log:\ngot %+v\nexp %+v", got, exp)
}
}
// If this test fails due to missing python dependencies, run 'INSTALL_PREFIX=/usr/local ./install-deps.sh' from the root directory of the
// kapacitor project.
func TestServer_UDFStreamAgents(t *testing.T) {
tdir, err := ioutil.TempDir("", "kapacitor_server_test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tdir)
agents := []struct {
buildFunc func() error
config udf.FunctionConfig
}{
// Go
{
buildFunc: func() error {
// Explicitly compile the binary.
// We could just use 'go run' but I ran into race conditions
// where 'go run' was not handing off to the compiled process in time
// and I didn't care to dig into 'go run's specific behavior.
cmd := exec.Command(
"go",
"build",
"-o",
filepath.Join(tdir, "movavg"+ExecutableSuffix),
filepath.Join(udfDir, "agent/examples/moving_avg/moving_avg.go"),
)
out, err := cmd.CombinedOutput()
if err != nil {
t.Log(string(out))
return err
}
return nil
},
config: udf.FunctionConfig{
Prog: filepath.Join(tdir, "movavg"),
Timeout: toml.Duration(time.Minute),
},
},
// Python
{
buildFunc: func() error { return nil },
config: udf.FunctionConfig{
Prog: PythonExecutable,
Args: []string{"-u", filepath.Join(udfDir, "agent/examples/moving_avg/moving_avg.py")},
Timeout: toml.Duration(time.Minute),
Env: map[string]string{
"PYTHONPATH": strings.Join(
[]string{filepath.Join(udfDir, "agent/py"), os.Getenv("PYTHONPATH")},
string(filepath.ListSeparator),
),
},
},
},
}
for _, agent := range agents {
err := agent.buildFunc()
if err != nil {
t.Fatal(err)
}
c := NewConfig()
c.UDF.Functions = map[string]udf.FunctionConfig{
"movingAvg": agent.config,
}
testStreamAgent(t, c)
}
}
func testStreamAgent(t *testing.T, c *server.Config) {
s := NewServer(c)
err := s.Open()
if err != nil {
t.Fatal(err)
}
defer s.Close()
cli := Client(s)
id := "testUDFTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `stream
|from()
.measurement('test')
.groupBy('group')
@movingAvg()
.field('value')
.size(10)
.as('mean')
|window()
.period(11s)
.every(11s)
|last('mean').as('mean')
|httpOut('moving_avg')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/moving_avg", s.URL(), id)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
points := `test,group=a value=1 0000000000
test,group=b value=2 0000000000
test,group=a value=1 0000000001
test,group=b value=2 0000000001
test,group=a value=1 0000000002
test,group=b value=2 0000000002
test,group=a value=1 0000000003
test,group=b value=2 0000000003
test,group=a value=1 0000000004
test,group=b value=2 0000000004
test,group=a value=1 0000000005
test,group=b value=2 0000000005
test,group=a value=1 0000000006
test,group=b value=2 0000000006
test,group=a value=1 0000000007
test,group=b value=2 0000000007
test,group=a value=1 0000000008
test,group=b value=2 0000000008
test,group=a value=1 0000000009
test,group=b value=2 0000000009
test,group=a value=0 0000000010
test,group=b value=1 0000000010
test,group=a value=0 0000000011
test,group=b value=0 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test","tags":{"group":"a"},"columns":["time","mean"],"values":[["1970-01-01T00:00:11Z",0.9]]},{"name":"test","tags":{"group":"b"},"columns":["time","mean"],"values":[["1970-01-01T00:00:11Z",1.9]]}]}`
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
}
// If this test fails due to missing python dependencies, run 'INSTALL_PREFIX=/usr/local ./install-deps.sh' from the root directory of the
// kapacitor project.
func TestServer_UDFStreamAgentsSocket(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Skipping on windows as unix sockets are not available")
}
tdir, err := ioutil.TempDir("", "kapacitor_server_test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tdir)
agents := []struct {
startFunc func() *exec.Cmd
config udf.FunctionConfig
}{
// Go
{
startFunc: func() *exec.Cmd {
cmd := exec.Command(
"go",
"build",
"-o",
filepath.Join(tdir, "mirror"+ExecutableSuffix),
filepath.Join(udfDir, "agent/examples/mirror/mirror.go"),
)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatal(string(out))
}
cmd = exec.Command(
filepath.Join(tdir, "mirror"),
"-socket",
filepath.Join(tdir, "mirror.go.sock"),
)
cmd.Stderr = os.Stderr
return cmd
},
config: udf.FunctionConfig{
Socket: filepath.Join(tdir, "mirror.go.sock"),
Timeout: toml.Duration(time.Minute),
},
},
// Python
{
startFunc: func() *exec.Cmd {
cmd := exec.Command(
PythonExecutable,
"-u",
filepath.Join(udfDir, "agent/examples/mirror/mirror.py"),
filepath.Join(tdir, "mirror.py.sock"),
)
cmd.Stderr = os.Stderr
env := os.Environ()
env = append(env, fmt.Sprintf(
"%s=%s",
"PYTHONPATH",
strings.Join(
[]string{filepath.Join(udfDir, "agent/py"), os.Getenv("PYTHONPATH")},
string(filepath.ListSeparator),
),
))
cmd.Env = env
return cmd
},
config: udf.FunctionConfig{
Socket: filepath.Join(tdir, "mirror.py.sock"),
Timeout: toml.Duration(time.Minute),
},
},
}
for _, agent := range agents {
cmd := agent.startFunc()
cmd.Start()
defer cmd.Process.Signal(os.Interrupt)
if err != nil {
t.Fatal(err)
}
c := NewConfig()
c.UDF.Functions = map[string]udf.FunctionConfig{
"mirror": agent.config,
}
testStreamAgentSocket(t, c)
}
}
func testStreamAgentSocket(t *testing.T, c *server.Config) {
s := NewServer(c)
err := s.Open()
if err != nil {
t.Fatal(err)
}
defer s.Close()
cli := Client(s)
id := "testUDFTask"
ttype := client.StreamTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `stream
|from()
.measurement('test')
.groupBy('group')
@mirror()
|window()
.period(10s)
.every(10s)
|count('value')
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id)
// Request data before any writes and expect null responses
nullResponse := `{"series":null}`
err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
points := `test,group=a value=1 0000000000
test,group=a value=1 0000000001
test,group=a value=1 0000000002
test,group=a value=1 0000000003
test,group=a value=1 0000000004
test,group=a value=1 0000000005
test,group=a value=1 0000000006
test,group=a value=1 0000000007
test,group=a value=1 0000000008
test,group=a value=1 0000000009
test,group=a value=0 0000000010
test,group=a value=0 0000000011
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", points, v)
exp := `{"series":[{"name":"test","tags":{"group":"a"},"columns":["time","count"],"values":[["1970-01-01T00:00:10Z",10]]}]}`
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5)
if err != nil {
t.Error(err)
}
}
// If this test fails due to missing python dependencies, run 'INSTALL_PREFIX=/usr/local ./install-deps.sh' from the root directory of the
// kapacitor project.
func TestServer_UDFBatchAgents(t *testing.T) {
tdir, err := ioutil.TempDir("", "kapacitor_server_test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tdir)
agents := []struct {
buildFunc func() error
config udf.FunctionConfig
}{
// Go
{
buildFunc: func() error {
// Explicitly compile the binary.
// We could just use 'go run' but I ran into race conditions
// where 'go run' was not handing off to the compiled process in time
// and I didn't care to dig into 'go run's specific behavior.
cmd := exec.Command(
"go",
"build",
"-o",
filepath.Join(tdir, "outliers"+ExecutableSuffix),
filepath.Join(udfDir, "agent/examples/outliers/outliers.go"),
)
out, err := cmd.CombinedOutput()
if err != nil {
t.Log(string(out))
return err
}
return nil
},
config: udf.FunctionConfig{
Prog: filepath.Join(tdir, "outliers"),
Timeout: toml.Duration(time.Minute),
},
},
// Python
{
buildFunc: func() error { return nil },
config: udf.FunctionConfig{
Prog: PythonExecutable,
Args: []string{"-u", filepath.Join(udfDir, "agent/examples/outliers/outliers.py")},
Timeout: toml.Duration(time.Minute),
Env: map[string]string{
"PYTHONPATH": strings.Join(
[]string{filepath.Join(udfDir, "agent/py"), os.Getenv("PYTHONPATH")},
string(filepath.ListSeparator),
),
},
},
},
}
for _, agent := range agents {
err := agent.buildFunc()
if err != nil {
t.Fatal(err)
}
c := NewConfig()
c.UDF.Functions = map[string]udf.FunctionConfig{
"outliers": agent.config,
}
testBatchAgent(t, c)
}
}
func testBatchAgent(t *testing.T, c *server.Config) {
count := 0
stopTimeC := make(chan time.Time, 2)
db := NewInfluxDB(func(q string) *iclient.Response {
stmt, err := influxql.ParseStatement(q)
if err != nil {
return &iclient.Response{Err: err.Error()}
}
slct, ok := stmt.(*influxql.SelectStatement)
if !ok {
return nil
}
cond, ok := slct.Condition.(*influxql.BinaryExpr)
if !ok {
return &iclient.Response{Err: "expected select condition to be binary expression"}
}
stopTimeExpr, ok := cond.RHS.(*influxql.BinaryExpr)
if !ok {
return &iclient.Response{Err: "expected select condition rhs to be binary expression"}
}
stopTL, ok := stopTimeExpr.RHS.(*influxql.StringLiteral)
if !ok {
return &iclient.Response{Err: "expected select condition rhs to be string literal"}
}
count++
switch count {
case 1, 2:
stopTime, err := time.Parse(time.RFC3339Nano, stopTL.Val)
if err != nil {
return &iclient.Response{Err: err.Error()}
}
stopTimeC <- stopTime
data := []float64{
5,
6,
7,
13,
33,
35,
36,
45,
46,
47,
48,
50,
51,
52,
53,
54,
80,
85,
90,
100,
}
// Shuffle data using count as seed.
// Data order should not effect the result.
r := rand.New(rand.NewSource(int64(count)))
for i := range data {
j := r.Intn(i + 1)
data[i], data[j] = data[j], data[i]
}
// Create set values with time from shuffled data.
values := make([][]interface{}, len(data))
for i, value := range data {
values[i] = []interface{}{
stopTime.Add(time.Duration(i-len(data)) * time.Millisecond).Format(time.RFC3339Nano),
value,
}
}
return &iclient.Response{
Results: []iclient.Result{{
Series: []imodels.Row{{
Name: "cpu",
Columns: []string{"time", "value"},
Tags: map[string]string{
"count": strconv.FormatInt(int64(count%2), 10),
},
Values: values,
}},
}},
}
default:
return nil
}
})
c.InfluxDB[0].URLs = []string{db.URL()}
c.InfluxDB[0].Enabled = true
s := NewServer(c)
err := s.Open()
if err != nil {
t.Fatal(err)
}
defer s.Close()
cli := Client(s)
id := "testUDFTask"
ttype := client.BatchTask
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
tick := `batch
|query(' SELECT value from mydb.myrp.cpu ')
.period(5ms)
.every(5ms)
.groupBy('count')
@outliers()
.field('value')
.scale(1.5)
|count('value')
|httpOut('count')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
stopTimes := make([]time.Time, 2)
for i := range stopTimes {
timeout := time.NewTicker(100 * time.Millisecond)
defer timeout.Stop()
select {
case <-timeout.C:
t.Fatal("timedout waiting for query")
case stopTime := <-stopTimeC:
stopTimes[i] = stopTime
}
}
endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id)
exp := fmt.Sprintf(
`{"series":[{"name":"cpu","tags":{"count":"1"},"columns":["time","count"],"values":[["%s",5]]},{"name":"cpu","tags":{"count":"0"},"columns":["time","count"],"values":[["%s",5]]}]}`,
stopTimes[0].Format(time.RFC3339Nano),
stopTimes[1].Format(time.RFC3339Nano),
)
err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*50)
if err != nil {
t.Error(err)
}
_, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
}
func TestServer_CreateTask_Defaults(t *testing.T) {
s, cli := OpenDefaultServer()
baseURL := s.URL()
body := `
{
"id" : "TASK_ID",
"type" : "stream",
"dbrps": [{"db": "DATABASE_NAME", "rp" : "RP_NAME"}],
"script": "stream\n |from()\n .measurement('cpu')\n"
}`
resp, err := http.Post(baseURL+"/tasks", "application/json", strings.NewReader(body))
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if exp, got := http.StatusOK, resp.StatusCode; exp != got {
t.Errorf("unexpected status code, got %d exp %d", got, exp)
}
id := "TASK_ID"
tick := "stream\n |from()\n .measurement('cpu')\n"
dbrps := []client.DBRP{
{
Database: "DATABASE_NAME",
RetentionPolicy: "RP_NAME",
},
}
ti, err := cli.Task(cli.TaskLink(id), nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph TASK_ID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestServer_ListTask_Defaults(t *testing.T) {
s, cli := OpenDefaultServer()
baseURL := s.URL()
dbrps := []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}}
id := "task_id"
tick := "stream\n |from()\n"
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: client.StreamTask,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
resp, err := http.Get(baseURL + "/tasks")
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if exp, got := http.StatusOK, resp.StatusCode; exp != got {
t.Errorf("unexpected status code, got %d exp %d", got, exp)
}
// Response type
type response struct {
Tasks []client.Task `json:"tasks"`
}
dec := json.NewDecoder(resp.Body)
tasks := response{}
dec.Decode(&tasks)
if exp, got := 1, len(tasks.Tasks); exp != got {
t.Fatalf("unexpected tasks count, got %d exp %d", got, exp)
}
task = tasks.Tasks[0]
if task.ID != id {
t.Fatalf("unexpected id got %s exp %s", task.ID, id)
}
if task.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", task.Type, client.StreamTask)
}
if task.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", task.Status, client.Disabled)
}
if !reflect.DeepEqual(task.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", task.DBRPs, dbrps)
}
if task.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", task.TICKscript, tick)
}
dot := "digraph task_id {\nstream0 -> from1;\n}"
if task.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", task.Dot, dot)
}
}
func TestServer_CreateTask_ValidIDs(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
testCases := []struct {
id string
valid bool
}{
{
id: "task_id",
valid: true,
},
{
id: "task_id7",
valid: true,
},
{
id: "task.id7",
valid: true,
},
{
id: "task-id7",
valid: true,
},
{
id: "tásk7",
valid: true,
},
{
id: "invalid id",
valid: false,
},
{
id: "invalid*id",
valid: false,
},
{
id: "task/id7",
valid: false,
},
}
for _, tc := range testCases {
id := tc.id
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if !tc.valid {
exp := fmt.Sprintf("task ID must contain only letters, numbers, '-', '.' and '_'. %q", id)
if err.Error() != exp {
t.Errorf("unexpected error: got %s exp %s", err.Error(), exp)
}
continue
}
if err != nil {
t.Fatal(err)
}
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph " + id + " {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
}
func TestServer_CreateRecording_ValidIDs(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
}
tick := `stream
|from()
.measurement('test')
`
_, err := cli.CreateTask(client.CreateTaskOptions{
ID: "task_id",
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
testCases := []struct {
id string
valid bool
}{
{
id: "recording_id",
valid: true,
},
{
id: "recording_id7",
valid: true,
},
{
id: "recording.id7",
valid: true,
},
{
id: "recording-id7",
valid: true,
},
{
id: "récording7",
valid: true,
},
{
id: "invalid id",
valid: false,
},
{
id: "invalid*id",
valid: false,
},
{
id: "recording/id7",
valid: false,
},
}
for _, tc := range testCases {
id := tc.id
recording, err := cli.RecordStream(client.RecordStreamOptions{
ID: id,
Task: "task_id",
Stop: time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC),
})
if !tc.valid {
exp := fmt.Sprintf("recording ID must contain only letters, numbers, '-', '.' and '_'. %q", id)
if err.Error() != exp {
t.Errorf("unexpected error: got %s exp %s", err.Error(), exp)
}
continue
}
if err != nil {
t.Fatal(err)
}
recording, err = cli.Recording(recording.Link)
if err != nil {
t.Fatal(err)
}
if exp, got := id, recording.ID; got != exp {
t.Errorf("unexpected recording ID got %s exp %s", got, exp)
}
}
}
func TestServer_CreateReplay_ValidIDs(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
}
tick := `stream
|from()
.measurement('test')
`
_, err := cli.CreateTask(client.CreateTaskOptions{
ID: "task_id",
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
_, err = cli.RecordStream(client.RecordStreamOptions{
ID: "recording_id",
Task: "task_id",
Stop: time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC),
})
if err != nil {
t.Fatal(err)
}
testCases := []struct {
id string
valid bool
}{
{
id: "replay_id",
valid: true,
},
{
id: "replay_id7",
valid: true,
},
{
id: "replay.id7",
valid: true,
},
{
id: "replay-id7",
valid: true,
},
{
id: "réplay7",
valid: true,
},
{
id: "invalid id",
valid: false,
},
{
id: "invalid*id",
valid: false,
},
{
id: "replay/id7",
valid: false,
},
}
for _, tc := range testCases {
id := tc.id
replay, err := cli.CreateReplay(client.CreateReplayOptions{
ID: id,
Task: "task_id",
Recording: "recording_id",
Clock: client.Fast,
RecordingTime: true,
})
if !tc.valid {
exp := fmt.Sprintf("replay ID must contain only letters, numbers, '-', '.' and '_'. %q", id)
if err.Error() != exp {
t.Errorf("unexpected error: got %s exp %s", err.Error(), exp)
}
continue
}
if err != nil {
t.Fatal(err)
}
replay, err = cli.Replay(replay.Link)
if err != nil {
t.Fatal(err)
}
if exp, got := id, replay.ID; got != exp {
t.Errorf("unexpected replay ID got %s exp %s", got, exp)
}
}
}
func TestServer_UpdateConfig(t *testing.T) {
type updateAction struct {
element string
updateAction client.ConfigUpdateAction
expSection client.ConfigSection
expElement client.ConfigElement
}
db := NewInfluxDB(func(q string) *iclient.Response {
return &iclient.Response{}
})
testCases := []struct {
section string
element string
setDefaults func(*server.Config)
expDefaultSection client.ConfigSection
expDefaultElement client.ConfigElement
updates []updateAction
}{
{
section: "influxdb",
element: "default",
setDefaults: func(c *server.Config) {
c.InfluxDB[0].Enabled = true
c.InfluxDB[0].Username = "bob"
c.InfluxDB[0].Password = "secret"
c.InfluxDB[0].URLs = []string{db.URL()}
// Set really long timeout since we shouldn't hit it
c.InfluxDB[0].StartUpTimeout = toml.Duration(time.Hour)
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": false,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "http",
"subscription-mode": "cluster",
"subscriptions": nil,
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{db.URL()},
"username": "bob",
},
Redacted: []string{
"password",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": false,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "http",
"subscription-mode": "cluster",
"subscriptions": nil,
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{db.URL()},
"username": "bob",
},
Redacted: []string{
"password",
},
},
updates: []updateAction{
{
// Set Invalid URL to make sure we can fix it without waiting for connection timeouts
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"urls": []string{"http://192.0.2.0:8086"},
},
},
element: "default",
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": false,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "http",
"subscription-mode": "cluster",
"subscriptions": nil,
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{"http://192.0.2.0:8086"},
"username": "bob",
},
Redacted: []string{
"password",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": false,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "http",
"subscription-mode": "cluster",
"subscriptions": nil,
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{"http://192.0.2.0:8086"},
"username": "bob",
},
Redacted: []string{
"password",
},
},
},
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"default": true,
"subscription-protocol": "https",
"subscriptions": map[string][]string{"_internal": []string{"monitor"}},
},
},
element: "default",
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": true,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "https",
"subscription-mode": "cluster",
"subscriptions": map[string]interface{}{"_internal": []interface{}{"monitor"}},
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{"http://192.0.2.0:8086"},
"username": "bob",
},
Redacted: []string{
"password",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": true,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "https",
"subscription-mode": "cluster",
"subscriptions": map[string]interface{}{"_internal": []interface{}{"monitor"}},
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{"http://192.0.2.0:8086"},
"username": "bob",
},
Redacted: []string{
"password",
},
},
},
{
updateAction: client.ConfigUpdateAction{
Delete: []string{"urls"},
},
element: "default",
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": true,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "https",
"subscription-mode": "cluster",
"subscriptions": map[string]interface{}{"_internal": []interface{}{"monitor"}},
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{db.URL()},
"username": "bob",
},
Redacted: []string{
"password",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": true,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "https",
"subscription-mode": "cluster",
"subscriptions": map[string]interface{}{"_internal": []interface{}{"monitor"}},
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{db.URL()},
"username": "bob",
},
Redacted: []string{
"password",
},
},
},
{
updateAction: client.ConfigUpdateAction{
Add: map[string]interface{}{
"name": "new",
"urls": []string{db.URL()},
},
},
element: "new",
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb"},
Elements: []client.ConfigElement{
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/default"},
Options: map[string]interface{}{
"default": true,
"disable-subscriptions": false,
"enabled": true,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "default",
"password": true,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "1h0m0s",
"subscription-protocol": "https",
"subscription-mode": "cluster",
"subscriptions": map[string]interface{}{"_internal": []interface{}{"monitor"}},
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{db.URL()},
"username": "bob",
},
Redacted: []string{
"password",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/new"},
Options: map[string]interface{}{
"default": false,
"disable-subscriptions": false,
"enabled": false,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "new",
"password": false,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "5m0s",
"subscription-protocol": "http",
"subscription-mode": "cluster",
"subscriptions": nil,
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{db.URL()},
"username": "",
},
Redacted: []string{
"password",
},
},
},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/influxdb/new"},
Options: map[string]interface{}{
"default": false,
"disable-subscriptions": false,
"enabled": false,
"excluded-subscriptions": map[string]interface{}{"_kapacitor": []interface{}{"autogen"}},
"http-port": float64(0),
"insecure-skip-verify": false,
"kapacitor-hostname": "",
"name": "new",
"password": false,
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"startup-timeout": "5m0s",
"subscription-protocol": "http",
"subscriptions": nil,
"subscription-mode": "cluster",
"subscriptions-sync-interval": "1m0s",
"timeout": "0s",
"udp-bind": "",
"udp-buffer": float64(1e3),
"udp-read-buffer": float64(0),
"urls": []interface{}{db.URL()},
"username": "",
},
Redacted: []string{
"password",
},
},
},
},
},
{
section: "alerta",
setDefaults: func(c *server.Config) {
c.Alerta.URL = "http://alerta.example.com"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alerta"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alerta/"},
Options: map[string]interface{}{
"enabled": false,
"environment": "",
"origin": "",
"token": false,
"token-prefix": "",
"url": "http://alerta.example.com",
"insecure-skip-verify": false,
"timeout": "0s",
},
Redacted: []string{
"token",
}},
},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alerta/"},
Options: map[string]interface{}{
"enabled": false,
"environment": "",
"origin": "",
"token": false,
"token-prefix": "",
"url": "http://alerta.example.com",
"insecure-skip-verify": false,
"timeout": "0s",
},
Redacted: []string{
"token",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"token": "token",
"origin": "kapacitor",
"timeout": "3h",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alerta"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alerta/"},
Options: map[string]interface{}{
"enabled": false,
"environment": "",
"origin": "kapacitor",
"token": true,
"token-prefix": "",
"url": "http://alerta.example.com",
"insecure-skip-verify": false,
"timeout": "3h0m0s",
},
Redacted: []string{
"token",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/alerta/"},
Options: map[string]interface{}{
"enabled": false,
"environment": "",
"origin": "kapacitor",
"token": true,
"token-prefix": "",
"url": "http://alerta.example.com",
"insecure-skip-verify": false,
"timeout": "3h0m0s",
},
Redacted: []string{
"token",
},
},
},
},
},
{
section: "httppost",
element: "test",
setDefaults: func(c *server.Config) {
apc := httppost.Config{
Endpoint: "test",
URL: "http://httppost.example.com",
Headers: map[string]string{
"testing": "works",
},
}
c.HTTPPost = httppost.Configs{apc}
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/httppost"},
Elements: []client.ConfigElement{
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/httppost/test"},
Options: map[string]interface{}{
"endpoint": "test",
"url": "http://httppost.example.com",
"headers": map[string]interface{}{
"testing": "works",
},
"basic-auth": false,
"alert-template": "",
"alert-template-file": "",
"row-template": "",
"row-template-file": "",
},
Redacted: []string{
"basic-auth",
}},
},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/httppost/test"},
Options: map[string]interface{}{
"endpoint": "test",
"url": "http://httppost.example.com",
"headers": map[string]interface{}{
"testing": "works",
},
"basic-auth": false,
"alert-template": "",
"alert-template-file": "",
"row-template": "",
"row-template-file": "",
},
Redacted: []string{
"basic-auth",
},
},
updates: []updateAction{
{
element: "test",
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"headers": map[string]string{
"testing": "more",
},
"basic-auth": httppost.BasicAuth{
Username: "usr",
Password: "pass",
},
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/httppost"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/httppost/test"},
Options: map[string]interface{}{
"endpoint": "test",
"url": "http://httppost.example.com",
"headers": map[string]interface{}{
"testing": "more",
},
"basic-auth": true,
"alert-template": "",
"alert-template-file": "",
"row-template": "",
"row-template-file": "",
},
Redacted: []string{
"basic-auth",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/httppost/test"},
Options: map[string]interface{}{
"endpoint": "test",
"url": "http://httppost.example.com",
"headers": map[string]interface{}{
"testing": "more",
},
"basic-auth": true,
"alert-template": "",
"alert-template-file": "",
"row-template": "",
"row-template-file": "",
},
Redacted: []string{
"basic-auth",
},
},
},
},
},
{
section: "pushover",
setDefaults: func(c *server.Config) {
c.Pushover.URL = "http://pushover.example.com"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pushover"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pushover/"},
Options: map[string]interface{}{
"enabled": false,
"token": false,
"user-key": false,
"url": "http://pushover.example.com",
},
Redacted: []string{
"token",
"user-key",
}},
},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pushover/"},
Options: map[string]interface{}{
"enabled": false,
"token": false,
"user-key": false,
"url": "http://pushover.example.com",
},
Redacted: []string{
"token",
"user-key",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"token": "token",
"user-key": "kapacitor",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pushover"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pushover/"},
Options: map[string]interface{}{
"enabled": false,
"user-key": true,
"token": true,
"url": "http://pushover.example.com",
},
Redacted: []string{
"token",
"user-key",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pushover/"},
Options: map[string]interface{}{
"enabled": false,
"user-key": true,
"token": true,
"url": "http://pushover.example.com",
},
Redacted: []string{
"token",
"user-key",
},
},
},
},
},
{
section: "kubernetes",
setDefaults: func(c *server.Config) {
c.Kubernetes = k8s.Configs{k8s.NewConfig()}
c.Kubernetes[0].APIServers = []string{"http://localhost:80001"}
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes/"},
Options: map[string]interface{}{
"id": "",
"api-servers": []interface{}{"http://localhost:80001"},
"ca-path": "",
"enabled": false,
"in-cluster": false,
"namespace": "",
"token": false,
"resource": "",
},
Redacted: []string{
"token",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes/"},
Options: map[string]interface{}{
"id": "",
"api-servers": []interface{}{"http://localhost:80001"},
"ca-path": "",
"enabled": false,
"in-cluster": false,
"namespace": "",
"token": false,
"resource": "",
},
Redacted: []string{
"token",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"token": "secret",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes/"},
Options: map[string]interface{}{
"id": "",
"api-servers": []interface{}{"http://localhost:80001"},
"ca-path": "",
"enabled": false,
"in-cluster": false,
"namespace": "",
"token": true,
"resource": "",
},
Redacted: []string{
"token",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/kubernetes/"},
Options: map[string]interface{}{
"id": "",
"api-servers": []interface{}{"http://localhost:80001"},
"ca-path": "",
"enabled": false,
"in-cluster": false,
"namespace": "",
"token": true,
"resource": "",
},
Redacted: []string{
"token",
},
},
},
},
},
{
section: "hipchat",
setDefaults: func(c *server.Config) {
c.HipChat.URL = "http://hipchat.example.com"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat/"},
Options: map[string]interface{}{
"enabled": false,
"global": false,
"room": "",
"state-changes-only": false,
"token": false,
"url": "http://hipchat.example.com",
},
Redacted: []string{
"token",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat/"},
Options: map[string]interface{}{
"enabled": false,
"global": false,
"room": "",
"state-changes-only": false,
"token": false,
"url": "http://hipchat.example.com",
},
Redacted: []string{
"token",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"token": "token",
"room": "kapacitor",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat/"},
Options: map[string]interface{}{
"enabled": false,
"global": false,
"room": "kapacitor",
"state-changes-only": false,
"token": true,
"url": "http://hipchat.example.com",
},
Redacted: []string{
"token",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/hipchat/"},
Options: map[string]interface{}{
"enabled": false,
"global": false,
"room": "kapacitor",
"state-changes-only": false,
"token": true,
"url": "http://hipchat.example.com",
},
Redacted: []string{
"token",
},
},
},
},
},
{
section: "mqtt",
setDefaults: func(c *server.Config) {
c.MQTT = mqtt.Configs{mqtt.Config{
Name: "default",
URL: "tcp://mqtt.example.com:1883",
NewClientF: mqtttest.NewClient,
}}
},
element: "default",
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/mqtt"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/mqtt/default"},
Options: map[string]interface{}{
"enabled": false,
"name": "default",
"default": false,
"url": "tcp://mqtt.example.com:1883",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
"client-id": "",
"username": "",
"password": false,
},
Redacted: []string{
"password",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/mqtt/default"},
Options: map[string]interface{}{
"enabled": false,
"name": "default",
"default": false,
"url": "tcp://mqtt.example.com:1883",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
"client-id": "",
"username": "",
"password": false,
},
Redacted: []string{
"password",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"client-id": "kapacitor-default",
"password": "super secret",
},
},
element: "default",
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/mqtt"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/mqtt/default"},
Options: map[string]interface{}{
"enabled": false,
"name": "default",
"default": false,
"url": "tcp://mqtt.example.com:1883",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
"client-id": "kapacitor-default",
"username": "",
"password": true,
},
Redacted: []string{
"password",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/mqtt/default"},
Options: map[string]interface{}{
"enabled": false,
"name": "default",
"default": false,
"url": "tcp://mqtt.example.com:1883",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
"client-id": "kapacitor-default",
"username": "",
"password": true,
},
Redacted: []string{
"password",
},
},
},
},
},
{
section: "opsgenie",
setDefaults: func(c *server.Config) {
c.OpsGenie.URL = "http://opsgenie.example.com"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie/"},
Options: map[string]interface{}{
"api-key": false,
"enabled": false,
"global": false,
"recipients": nil,
"recovery_url": opsgenie.DefaultOpsGenieRecoveryURL,
"teams": nil,
"url": "http://opsgenie.example.com",
},
Redacted: []string{
"api-key",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie/"},
Options: map[string]interface{}{
"api-key": false,
"enabled": false,
"global": false,
"recipients": nil,
"recovery_url": opsgenie.DefaultOpsGenieRecoveryURL,
"teams": nil,
"url": "http://opsgenie.example.com",
},
Redacted: []string{
"api-key",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"api-key": "token",
"global": true,
"teams": []string{"teamA", "teamB"},
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie/"},
Options: map[string]interface{}{
"api-key": true,
"enabled": false,
"global": true,
"recipients": nil,
"recovery_url": opsgenie.DefaultOpsGenieRecoveryURL,
"teams": []interface{}{"teamA", "teamB"},
"url": "http://opsgenie.example.com",
},
Redacted: []string{
"api-key",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/opsgenie/"},
Options: map[string]interface{}{
"api-key": true,
"enabled": false,
"global": true,
"recipients": nil,
"recovery_url": opsgenie.DefaultOpsGenieRecoveryURL,
"teams": []interface{}{"teamA", "teamB"},
"url": "http://opsgenie.example.com",
},
Redacted: []string{
"api-key",
},
},
},
},
},
{
section: "pagerduty",
setDefaults: func(c *server.Config) {
c.PagerDuty.ServiceKey = "secret"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty/"},
Options: map[string]interface{}{
"enabled": false,
"global": false,
"service-key": true,
"url": pagerduty.DefaultPagerDutyAPIURL,
},
Redacted: []string{
"service-key",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty/"},
Options: map[string]interface{}{
"enabled": false,
"global": false,
"service-key": true,
"url": pagerduty.DefaultPagerDutyAPIURL,
},
Redacted: []string{
"service-key",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"service-key": "",
"enabled": true,
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty/"},
Options: map[string]interface{}{
"enabled": true,
"global": false,
"service-key": false,
"url": pagerduty.DefaultPagerDutyAPIURL,
},
Redacted: []string{
"service-key",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/pagerduty/"},
Options: map[string]interface{}{
"enabled": true,
"global": false,
"service-key": false,
"url": pagerduty.DefaultPagerDutyAPIURL,
},
Redacted: []string{
"service-key",
},
},
},
},
},
{
section: "smtp",
setDefaults: func(c *server.Config) {
c.SMTP.Host = "smtp.example.com"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp/"},
Options: map[string]interface{}{
"enabled": false,
"from": "",
"global": false,
"host": "smtp.example.com",
"idle-timeout": "30s",
"no-verify": false,
"password": false,
"port": float64(25),
"state-changes-only": false,
"to": nil,
"username": "",
},
Redacted: []string{
"password",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp/"},
Options: map[string]interface{}{
"enabled": false,
"from": "",
"global": false,
"host": "smtp.example.com",
"idle-timeout": "30s",
"no-verify": false,
"password": false,
"port": float64(25),
"state-changes-only": false,
"to": nil,
"username": "",
},
Redacted: []string{
"password",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"idle-timeout": "1m0s",
"global": true,
"password": "secret",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp/"},
Options: map[string]interface{}{
"enabled": false,
"from": "",
"global": true,
"host": "smtp.example.com",
"idle-timeout": "1m0s",
"no-verify": false,
"password": true,
"port": float64(25),
"state-changes-only": false,
"to": nil,
"username": "",
},
Redacted: []string{
"password",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/smtp/"},
Options: map[string]interface{}{
"enabled": false,
"from": "",
"global": true,
"host": "smtp.example.com",
"idle-timeout": "1m0s",
"no-verify": false,
"password": true,
"port": float64(25),
"state-changes-only": false,
"to": nil,
"username": "",
},
Redacted: []string{
"password",
},
},
},
},
},
{
section: "sensu",
setDefaults: func(c *server.Config) {
c.Sensu.Addr = "sensu.example.com:3000"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu/"},
Options: map[string]interface{}{
"addr": "sensu.example.com:3000",
"enabled": false,
"source": "Kapacitor",
"handlers": nil,
},
Redacted: nil,
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu/"},
Options: map[string]interface{}{
"addr": "sensu.example.com:3000",
"enabled": false,
"source": "Kapacitor",
"handlers": nil,
},
Redacted: nil,
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"addr": "sensu.local:3000",
"enabled": true,
"source": "Kapacitor",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu/"},
Options: map[string]interface{}{
"addr": "sensu.local:3000",
"enabled": true,
"source": "Kapacitor",
"handlers": nil,
},
Redacted: nil,
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/sensu/"},
Options: map[string]interface{}{
"addr": "sensu.local:3000",
"enabled": true,
"source": "Kapacitor",
"handlers": nil,
},
Redacted: nil,
},
},
},
},
{
section: "slack",
setDefaults: func(c *server.Config) {
c.Slack.Global = true
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/"},
Options: map[string]interface{}{
"channel": "",
"enabled": false,
"global": true,
"icon-emoji": "",
"state-changes-only": false,
"url": false,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/"},
Options: map[string]interface{}{
"channel": "",
"enabled": false,
"global": true,
"icon-emoji": "",
"state-changes-only": false,
"url": false,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"enabled": true,
"global": false,
"channel": "#general",
"url": "http://slack.example.com/secret-token",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/"},
Options: map[string]interface{}{
"channel": "#general",
"enabled": true,
"global": false,
"icon-emoji": "",
"state-changes-only": false,
"url": true,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/slack/"},
Options: map[string]interface{}{
"channel": "#general",
"enabled": true,
"global": false,
"icon-emoji": "",
"state-changes-only": false,
"url": true,
"username": "kapacitor",
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: []string{
"url",
},
},
},
},
},
{
section: "snmptrap",
setDefaults: func(c *server.Config) {
c.SNMPTrap.Community = "test"
c.SNMPTrap.Retries = 2.0
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/snmptrap"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/snmptrap/"},
Options: map[string]interface{}{
"addr": "localhost:162",
"enabled": false,
"community": true,
"retries": 2.0,
},
Redacted: []string{
"community",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/snmptrap/"},
Options: map[string]interface{}{
"addr": "localhost:162",
"enabled": false,
"community": true,
"retries": 2.0,
},
Redacted: []string{
"community",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"enabled": true,
"addr": "snmptrap.example.com:162",
"community": "public",
"retries": 1.0,
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/snmptrap"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/snmptrap/"},
Options: map[string]interface{}{
"addr": "snmptrap.example.com:162",
"enabled": true,
"community": true,
"retries": 1.0,
},
Redacted: []string{
"community",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/snmptrap/"},
Options: map[string]interface{}{
"addr": "snmptrap.example.com:162",
"enabled": true,
"community": true,
"retries": 1.0,
},
Redacted: []string{
"community",
},
},
},
},
},
{
section: "swarm",
setDefaults: func(c *server.Config) {
c.Swarm = swarm.Configs{swarm.Config{
Servers: []string{"http://localhost:80001"},
}}
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/swarm"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/swarm/"},
Options: map[string]interface{}{
"id": "",
"enabled": false,
"servers": []interface{}{"http://localhost:80001"},
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: nil,
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/swarm/"},
Options: map[string]interface{}{
"id": "",
"enabled": false,
"servers": []interface{}{"http://localhost:80001"},
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: nil,
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"enabled": true,
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/swarm"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/swarm/"},
Options: map[string]interface{}{
"id": "",
"enabled": true,
"servers": []interface{}{"http://localhost:80001"},
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: nil,
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/swarm/"},
Options: map[string]interface{}{
"id": "",
"enabled": true,
"servers": []interface{}{"http://localhost:80001"},
"ssl-ca": "",
"ssl-cert": "",
"ssl-key": "",
"insecure-skip-verify": false,
},
Redacted: nil,
},
},
},
},
{
section: "talk",
setDefaults: func(c *server.Config) {
c.Talk.AuthorName = "Kapacitor"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk/"},
Options: map[string]interface{}{
"enabled": false,
"url": false,
"author_name": "Kapacitor",
},
Redacted: []string{
"url",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk/"},
Options: map[string]interface{}{
"enabled": false,
"url": false,
"author_name": "Kapacitor",
},
Redacted: []string{
"url",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"enabled": true,
"url": "http://talk.example.com/secret-token",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk/"},
Options: map[string]interface{}{
"enabled": true,
"url": true,
"author_name": "Kapacitor",
},
Redacted: []string{
"url",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/talk/"},
Options: map[string]interface{}{
"enabled": true,
"url": true,
"author_name": "Kapacitor",
},
Redacted: []string{
"url",
},
},
},
},
},
{
section: "telegram",
setDefaults: func(c *server.Config) {
c.Telegram.ChatId = "kapacitor"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram/"},
Options: map[string]interface{}{
"chat-id": "kapacitor",
"disable-notification": false,
"disable-web-page-preview": false,
"enabled": false,
"global": false,
"parse-mode": "",
"state-changes-only": false,
"token": false,
"url": telegram.DefaultTelegramURL,
},
Redacted: []string{
"token",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram/"},
Options: map[string]interface{}{
"chat-id": "kapacitor",
"disable-notification": false,
"disable-web-page-preview": false,
"enabled": false,
"global": false,
"parse-mode": "",
"state-changes-only": false,
"token": false,
"url": telegram.DefaultTelegramURL,
},
Redacted: []string{
"token",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"enabled": true,
"token": "token",
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram/"},
Options: map[string]interface{}{
"chat-id": "kapacitor",
"disable-notification": false,
"disable-web-page-preview": false,
"enabled": true,
"global": false,
"parse-mode": "",
"state-changes-only": false,
"token": true,
"url": telegram.DefaultTelegramURL,
},
Redacted: []string{
"token",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/telegram/"},
Options: map[string]interface{}{
"chat-id": "kapacitor",
"disable-notification": false,
"disable-web-page-preview": false,
"enabled": true,
"global": false,
"parse-mode": "",
"state-changes-only": false,
"token": true,
"url": telegram.DefaultTelegramURL,
},
Redacted: []string{
"token",
},
},
},
},
},
{
section: "victorops",
setDefaults: func(c *server.Config) {
c.VictorOps.RoutingKey = "test"
c.VictorOps.APIKey = "secret"
},
expDefaultSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops/"},
Options: map[string]interface{}{
"api-key": true,
"enabled": false,
"global": false,
"routing-key": "test",
"url": victorops.DefaultVictorOpsAPIURL,
},
Redacted: []string{
"api-key",
},
}},
},
expDefaultElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops/"},
Options: map[string]interface{}{
"api-key": true,
"enabled": false,
"global": false,
"routing-key": "test",
"url": victorops.DefaultVictorOpsAPIURL,
},
Redacted: []string{
"api-key",
},
},
updates: []updateAction{
{
updateAction: client.ConfigUpdateAction{
Set: map[string]interface{}{
"api-key": "",
"global": true,
},
},
expSection: client.ConfigSection{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops"},
Elements: []client.ConfigElement{{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops/"},
Options: map[string]interface{}{
"api-key": false,
"enabled": false,
"global": true,
"routing-key": "test",
"url": victorops.DefaultVictorOpsAPIURL,
},
Redacted: []string{
"api-key",
},
}},
},
expElement: client.ConfigElement{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/config/victorops/"},
Options: map[string]interface{}{
"api-key": false,
"enabled": false,
"global": true,
"routing-key": "test",
"url": victorops.DefaultVictorOpsAPIURL,
},
Redacted: []string{
"api-key",
},
},
},
},
},
}
compareElements := func(got, exp client.ConfigElement) error {
if got.Link != exp.Link {
return fmt.Errorf("elements have different links, got %v exp %v", got.Link, exp.Link)
}
for k, v := range exp.Options {
if g, ok := got.Options[k]; !ok {
return fmt.Errorf("missing option %q", k)
} else if !reflect.DeepEqual(g, v) {
return fmt.Errorf("unexpected config option %q got %#v exp %#v types: got %T exp %T", k, g, v, g, v)
}
}
for k := range got.Options {
if v, ok := exp.Options[k]; !ok {
return fmt.Errorf("extra option %q with value %#v", k, v)
}
}
if len(got.Redacted) != len(exp.Redacted) {
return fmt.Errorf("unexpected element redacted lists: got %v exp %v", got.Redacted, exp.Redacted)
}
sort.Strings(got.Redacted)
sort.Strings(exp.Redacted)
for i := range exp.Redacted {
if got.Redacted[i] != exp.Redacted[i] {
return fmt.Errorf("unexpected element redacted lists: got %v exp %v", got.Redacted, exp.Redacted)
}
}
return nil
}
compareSections := func(got, exp client.ConfigSection) error {
if got.Link != exp.Link {
return fmt.Errorf("sections have different links, got %v exp %v", got.Link, exp.Link)
}
if len(got.Elements) != len(exp.Elements) {
return fmt.Errorf("sections are different lengths, got %d exp %d", len(got.Elements), len(exp.Elements))
}
for i := range exp.Elements {
if err := compareElements(got.Elements[i], exp.Elements[i]); err != nil {
return errors.Wrapf(err, "section element %d are not equal", i)
}
}
return nil
}
validate := func(
cli *client.Client,
section,
element string,
expSection client.ConfigSection,
expElement client.ConfigElement,
) error {
// Get all sections
if config, err := cli.ConfigSections(); err != nil {
return errors.Wrap(err, "failed to get sections")
} else {
if err := compareSections(config.Sections[section], expSection); err != nil {
return fmt.Errorf("%s: %v", section, err)
}
}
// Get the specific section
sectionLink := cli.ConfigSectionLink(section)
if got, err := cli.ConfigSection(sectionLink); err != nil {
return err
} else {
if err := compareSections(got, expSection); err != nil {
return fmt.Errorf("%s: %v", section, err)
}
}
elementLink := cli.ConfigElementLink(section, element)
// Get the specific element
if got, err := cli.ConfigElement(elementLink); err != nil {
return err
} else {
if err := compareElements(got, expElement); err != nil {
return fmt.Errorf("%s/%s: %v", section, element, err)
}
}
return nil
}
for i, tc := range testCases {
t.Run(fmt.Sprintf("%s/%s-%d", tc.section, tc.element, i), func(t *testing.T) {
// Create default config
c := NewConfig()
if tc.setDefaults != nil {
tc.setDefaults(c)
}
s := OpenServer(c)
cli := Client(s)
defer s.Close()
if err := validate(cli, tc.section, tc.element, tc.expDefaultSection, tc.expDefaultElement); err != nil {
t.Errorf("unexpected defaults for %s/%s: %v", tc.section, tc.element, err)
}
for i, ua := range tc.updates {
link := cli.ConfigElementLink(tc.section, ua.element)
if len(ua.updateAction.Add) > 0 ||
len(ua.updateAction.Remove) > 0 {
link = cli.ConfigSectionLink(tc.section)
}
if err := cli.ConfigUpdate(link, ua.updateAction); err != nil {
t.Fatal(err)
}
if err := validate(cli, tc.section, ua.element, ua.expSection, ua.expElement); err != nil {
t.Errorf("unexpected update result %d for %s/%s: %v", i, tc.section, ua.element, err)
}
}
})
}
}
func TestServer_ListServiceTests(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
serviceTests, err := cli.ListServiceTests(nil)
if err != nil {
t.Fatal(err)
}
expServiceTests := client.ServiceTests{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests"},
Services: []client.ServiceTest{
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/alerta"},
Name: "alerta",
Options: client.ServiceTestOptions{
"resource": "testResource",
"event": "testEvent",
"environment": "",
"severity": "critical",
"group": "testGroup",
"value": "testValue",
"message": "test alerta message",
"origin": "",
"service": []interface{}{
"testServiceA",
"testServiceB",
},
"timeout": "24h0m0s",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/azure"},
Name: "azure",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/consul"},
Name: "consul",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/dns"},
Name: "dns",
Options: client.ServiceTestOptions{
"id": ""},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/ec2"},
Name: "ec2",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/file-discovery"},
Name: "file-discovery",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/gce"},
Name: "gce",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/hipchat"},
Name: "hipchat",
Options: client.ServiceTestOptions{
"room": "",
"message": "test hipchat message",
"level": "CRITICAL",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/httppost"},
Name: "httppost",
Options: client.ServiceTestOptions{
"endpoint": "example",
"url": "http://localhost:3000/",
"headers": map[string]interface{}{"Auth": "secret"},
"timeout": float64(0),
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/influxdb"},
Name: "influxdb",
Options: client.ServiceTestOptions{
"cluster": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/kubernetes"},
Name: "kubernetes",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/marathon"},
Name: "marathon",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/mqtt"},
Name: "mqtt",
Options: client.ServiceTestOptions{
"broker-name": "",
"topic": "",
"message": "test MQTT message",
"qos": "at-most-once",
"retained": false,
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/nerve"},
Name: "nerve",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/opsgenie"},
Name: "opsgenie",
Options: client.ServiceTestOptions{
"teams": nil,
"recipients": nil,
"message-type": "CRITICAL",
"message": "test opsgenie message",
"entity-id": "testEntityID",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/pagerduty"},
Name: "pagerduty",
Options: client.ServiceTestOptions{
"incident-key": "testIncidentKey",
"description": "test pagerduty message",
"level": "CRITICAL",
"details": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/pushover"},
Name: "pushover",
Options: client.ServiceTestOptions{
"user-key": "", //gohere
"message": "test pushover message",
"device": "",
"title": "",
"url": "",
"url-title": "",
"sound": "",
"level": "CRITICAL",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/scraper"},
Name: "scraper",
Options: client.ServiceTestOptions{
"name": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/sensu"},
Name: "sensu",
Options: client.ServiceTestOptions{
"name": "testName",
"output": "testOutput",
"source": "Kapacitor",
"handlers": []interface{}{},
"level": "CRITICAL",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/serverset"},
Name: "serverset",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/slack"},
Name: "slack",
Options: client.ServiceTestOptions{
"channel": "",
"icon-emoji": "",
"level": "CRITICAL",
"message": "test slack message",
"username": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/smtp"},
Name: "smtp",
Options: client.ServiceTestOptions{
"to": nil,
"subject": "test subject",
"body": "test body",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/snmptrap"},
Name: "snmptrap",
Options: client.ServiceTestOptions{
"trap-oid": "1.1.1.1",
"data-list": []interface{}{
map[string]interface{}{
"oid": "1.1.1.1.2",
"type": "s",
"value": "test snmptrap message",
},
},
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/static-discovery"},
Name: "static-discovery",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/swarm"},
Name: "swarm",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/talk"},
Name: "talk",
Options: client.ServiceTestOptions{
"title": "testTitle",
"text": "test talk text",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/telegram"},
Name: "telegram",
Options: client.ServiceTestOptions{
"chat-id": "",
"parse-mode": "",
"message": "test telegram message",
"disable-web-page-preview": false,
"disable-notification": false,
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/triton"},
Name: "triton",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/victorops"},
Name: "victorops",
Options: client.ServiceTestOptions{
"routingKey": "",
"messageType": "CRITICAL",
"message": "test victorops message",
"entityID": "testEntityID",
},
},
},
}
if got, exp := serviceTests.Link.Href, expServiceTests.Link.Href; got != exp {
t.Errorf("unexpected service tests link.href: got %s exp %s", got, exp)
}
if got, exp := len(serviceTests.Services), len(expServiceTests.Services); got != exp {
t.Fatalf("unexpected length of services: got %d exp %d", got, exp)
}
for i := range expServiceTests.Services {
exp := expServiceTests.Services[i]
got := serviceTests.Services[i]
if !reflect.DeepEqual(got, exp) {
t.Errorf("unexpected server test %s:\ngot\n%#v\nexp\n%#v\n", exp.Name, got, exp)
}
}
}
func TestServer_ListServiceTests_WithPattern(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
serviceTests, err := cli.ListServiceTests(&client.ListServiceTestsOptions{
Pattern: "s*",
})
if err != nil {
t.Fatal(err)
}
expServiceTests := client.ServiceTests{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests"},
Services: []client.ServiceTest{
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/scraper"},
Name: "scraper",
Options: client.ServiceTestOptions{
"name": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/sensu"},
Name: "sensu",
Options: client.ServiceTestOptions{
"name": "testName",
"output": "testOutput",
"source": "Kapacitor",
"handlers": []interface{}{},
"level": "CRITICAL",
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/serverset"},
Name: "serverset",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/slack"},
Name: "slack",
Options: client.ServiceTestOptions{
"channel": "",
"icon-emoji": "",
"level": "CRITICAL",
"message": "test slack message",
"username": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/smtp"},
Name: "smtp",
Options: client.ServiceTestOptions{
"to": nil,
"subject": "test subject",
"body": "test body",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/snmptrap"},
Name: "snmptrap",
Options: client.ServiceTestOptions{
"trap-oid": "1.1.1.1",
"data-list": []interface{}{
map[string]interface{}{
"oid": "1.1.1.1.2",
"type": "s",
"value": "test snmptrap message",
},
},
},
},
{
Link: client.Link{Relation: "self", Href: "/kapacitor/v1/service-tests/static-discovery"},
Name: "static-discovery",
Options: client.ServiceTestOptions{
"id": "",
},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/service-tests/swarm"},
Name: "swarm",
Options: client.ServiceTestOptions{
"id": "",
},
},
},
}
if got, exp := serviceTests.Link.Href, expServiceTests.Link.Href; got != exp {
t.Errorf("unexpected service tests link.href: got %s exp %s", got, exp)
}
if got, exp := len(serviceTests.Services), len(expServiceTests.Services); got != exp {
t.Fatalf("unexpected length of services: got %d exp %d", got, exp)
}
for i := range expServiceTests.Services {
exp := expServiceTests.Services[i]
got := serviceTests.Services[i]
if !reflect.DeepEqual(got, exp) {
t.Errorf("unexpected server test %s:\ngot\n%#v\nexp\n%#v\n", exp.Name, got, exp)
}
}
}
func TestServer_DoServiceTest(t *testing.T) {
db := NewInfluxDB(func(q string) *iclient.Response {
return &iclient.Response{}
})
testCases := []struct {
service string
setDefaults func(*server.Config)
options client.ServiceTestOptions
exp client.ServiceTestResult
}{
{
service: "alerta",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "hipchat",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "influxdb",
setDefaults: func(c *server.Config) {
c.InfluxDB[0].Enabled = true
c.InfluxDB[0].Name = "default"
c.InfluxDB[0].URLs = []string{db.URL()}
},
options: client.ServiceTestOptions{
"cluster": "default",
},
exp: client.ServiceTestResult{
Success: true,
Message: "",
},
},
{
service: "influxdb",
options: client.ServiceTestOptions{
"cluster": "default",
},
exp: client.ServiceTestResult{
Success: false,
Message: "cluster \"default\" is not enabled or does not exist",
},
},
{
service: "kubernetes",
options: client.ServiceTestOptions{
"id": "default",
},
exp: client.ServiceTestResult{
Success: false,
Message: "unknown kubernetes cluster \"default\"",
},
},
{
service: "mqtt",
options: client.ServiceTestOptions{
"broker-name": "default",
"topic": "test",
},
exp: client.ServiceTestResult{
Success: false,
Message: "unknown MQTT broker \"default\"",
},
},
{
service: "opsgenie",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "pagerduty",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "pushover",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "sensu",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "slack",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "smtp",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "snmptrap",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "swarm",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "unknown swarm cluster \"\"",
},
},
{
service: "talk",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "telegram",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
{
service: "victorops",
options: client.ServiceTestOptions{},
exp: client.ServiceTestResult{
Success: false,
Message: "service is not enabled",
},
},
}
for _, tc := range testCases {
// Create default config
c := NewConfig()
if tc.setDefaults != nil {
tc.setDefaults(c)
}
s := OpenServer(c)
cli := Client(s)
defer s.Close()
tr, err := cli.DoServiceTest(cli.ServiceTestLink(tc.service), tc.options)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(tr, tc.exp) {
t.Log("Options", tc.options)
t.Errorf("unexpected service test result for %s:\ngot\n%#v\nexp\n%#v\n", tc.service, tr, tc.exp)
}
}
}
func TestServer_AlertHandlers_CRUD(t *testing.T) {
testCases := []struct {
topic string
create client.TopicHandlerOptions
expCreate client.TopicHandler
patch client.JSONPatch
expPatch client.TopicHandler
put client.TopicHandlerOptions
expPut client.TopicHandler
}{
{
topic: "system",
create: client.TopicHandlerOptions{
ID: "myhandler",
Kind: "slack",
Options: map[string]interface{}{
"channel": "#test",
},
},
expCreate: client.TopicHandler{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/handlers/myhandler"},
ID: "myhandler",
Kind: "slack",
Options: map[string]interface{}{
"channel": "#test",
},
},
patch: client.JSONPatch{
{
Path: "/kind",
Operation: "replace",
Value: "log",
},
{
Path: "/options/channel",
Operation: "remove",
},
{
Path: "/options/path",
Operation: "add",
Value: AlertLogPath,
},
},
expPatch: client.TopicHandler{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/handlers/myhandler"},
ID: "myhandler",
Kind: "log",
Options: map[string]interface{}{
"path": AlertLogPath,
},
},
put: client.TopicHandlerOptions{
ID: "newid",
Kind: "smtp",
Options: map[string]interface{}{
"to": []string{"[email protected]"},
},
},
expPut: client.TopicHandler{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/handlers/newid"},
ID: "newid",
Kind: "smtp",
Options: map[string]interface{}{
"to": []interface{}{"[email protected]"},
},
},
},
}
for _, tc := range testCases {
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
h, err := cli.CreateTopicHandler(cli.TopicHandlersLink(tc.topic), tc.create)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(h, tc.expCreate) {
t.Errorf("unexpected handler created:\ngot\n%#v\nexp\n%#v\n", h, tc.expCreate)
}
h, err = cli.PatchTopicHandler(h.Link, tc.patch)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(h, tc.expPatch) {
t.Errorf("unexpected handler patched:\ngot\n%#v\nexp\n%#v\n", h, tc.expPatch)
}
h, err = cli.ReplaceTopicHandler(h.Link, tc.put)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(h, tc.expPut) {
t.Errorf("unexpected handler put:\ngot\n%#v\nexp\n%#v\n", h, tc.expPut)
}
// Restart server
s.Restart()
rh, err := cli.TopicHandler(h.Link)
if err != nil {
t.Fatalf("could not find handler after restart: %v", err)
}
if got, exp := rh, h; !reflect.DeepEqual(got, exp) {
t.Errorf("unexpected handler after restart:\ngot\n%#v\nexp\n%#v\n", got, exp)
}
err = cli.DeleteTopicHandler(h.Link)
if err != nil {
t.Fatal(err)
}
_, err = cli.TopicHandler(h.Link)
if err == nil {
t.Errorf("expected handler to be deleted")
}
}
}
func TestServer_AlertHandlers(t *testing.T) {
resultJSON := `{"series":[{"name":"alert","columns":["time","value"],"values":[["1970-01-01T00:00:00Z",1]]}]}`
alertData := alert.Data{
ID: "id",
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Level: alert.Critical,
Data: models.Result{
Series: models.Rows{
{
Name: "alert",
Columns: []string{"time", "value"},
Values: [][]interface{}{[]interface{}{
time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
1.0,
}},
},
},
},
}
adJSON, err := json.Marshal(alertData)
if err != nil {
t.Fatal(err)
}
testCases := []struct {
handler client.TopicHandler
setup func(*server.Config, *client.TopicHandler) (context.Context, error)
result func(context.Context) error
}{
{
handler: client.TopicHandler{
Kind: "alerta",
Options: map[string]interface{}{
"token": "testtoken1234567",
"token-prefix": "Bearer",
"origin": "kapacitor",
"group": "test",
"environment": "env",
"timeout": time.Duration(24 * time.Hour),
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := alertatest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.Alerta.Enabled = true
c.Alerta.URL = ts.URL
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*alertatest.Server)
ts.Close()
got := ts.Requests()
exp := []alertatest.Request{{
URL: "/alert",
Authorization: "Bearer testtoken1234567",
PostData: alertatest.PostData{
Resource: "alert",
Event: "id",
Group: "test",
Environment: "env",
Text: "message",
Origin: "kapacitor",
Service: []string{"alert"},
Timeout: 86400,
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected alerta request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "exec",
Options: map[string]interface{}{
"prog": "/bin/alert-handler.sh",
"args": []string{"arg1", "arg2", "arg3"},
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
te := alerttest.NewExec()
ctxt := context.WithValue(nil, "exec", te)
c.Commander = te.Commander
return ctxt, nil
},
result: func(ctxt context.Context) error {
te := ctxt.Value("exec").(*alerttest.Exec)
expData := []*commandtest.Command{{
Spec: command.Spec{
Prog: "/bin/alert-handler.sh",
Args: []string{"arg1", "arg2", "arg3"},
},
Started: true,
Waited: true,
Killed: false,
StdinData: append(adJSON, '\n'),
}}
cmds := te.Commands()
if got, exp := len(cmds), len(expData); got != exp {
return fmt.Errorf("unexpected commands length: got %d exp %d", got, exp)
}
for i := range expData {
if err := expData[i].Compare(cmds[i]); err != nil {
return fmt.Errorf("unexpected command %d: %v", i, err)
}
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "hipchat",
Options: map[string]interface{}{
"token": "testtoken1234567",
"room": "1234567",
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := hipchattest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.HipChat.Enabled = true
c.HipChat.URL = ts.URL
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*hipchattest.Server)
ts.Close()
got := ts.Requests()
exp := []hipchattest.Request{{
URL: "/1234567/notification?auth_token=testtoken1234567",
PostData: hipchattest.PostData{
From: "kapacitor",
Message: "message",
Color: "red",
Notify: true,
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected hipchat request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "log",
Options: map[string]interface{}{
"mode": 0604,
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
tdir := MustTempDir()
p := filepath.Join(tdir, "alert.log")
ha.Options["path"] = p
l := alerttest.NewLog(p)
ctxt := context.WithValue(nil, "tdir", tdir)
ctxt = context.WithValue(ctxt, "log", l)
return ctxt, nil
},
result: func(ctxt context.Context) error {
tdir := ctxt.Value("tdir").(string)
defer os.RemoveAll(tdir)
l := ctxt.Value("log").(*alerttest.Log)
expData := []alert.Data{alertData}
expMode := os.FileMode(LogFileExpectedMode)
m, err := l.Mode()
if err != nil {
return err
}
if got, exp := m, expMode; exp != got {
return fmt.Errorf("unexpected file mode: got %v exp %v", got, exp)
}
data, err := l.Data()
if err != nil {
return err
}
if got, exp := data, expData; !reflect.DeepEqual(got, exp) {
return fmt.Errorf("unexpected alert data written to log:\ngot\n%+v\nexp\n%+v\n", got, exp)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "mqtt",
Options: map[string]interface{}{
"topic": "test",
"qos": "at-least-once",
"retained": true,
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
cc := new(mqtttest.ClientCreator)
ctxt := context.WithValue(nil, "clientCreator", cc)
c.MQTT = mqtt.Configs{
mqtt.Config{
Enabled: true,
Name: "test",
URL: "tcp://mqtt.example.com:1883",
NewClientF: cc.NewClient,
},
}
return ctxt, nil
},
result: func(ctxt context.Context) error {
s := ctxt.Value("clientCreator").(*mqtttest.ClientCreator)
if got, exp := len(s.Clients), 1; got != exp {
return fmt.Errorf("unexpected number of clients created : exp %d got: %d", exp, got)
}
if got, exp := len(s.Configs), 1; got != exp {
return fmt.Errorf("unexpected number of configs received: exp %d got: %d", exp, got)
}
if got, exp := s.Configs[0].URL, "tcp://mqtt.example.com:1883"; exp != got {
return fmt.Errorf("unexpected config URL: exp %q got %q", exp, got)
}
got := s.Clients[0].PublishData
exp := []mqtttest.PublishData{{
Topic: "test",
QoS: mqtt.AtLeastOnce,
Retained: true,
Message: []byte("message"),
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected mqtt publish data:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "opsgenie",
Options: map[string]interface{}{
"teams-list": []string{"A team", "B team"},
"recipients-list": []string{"test_recipient1", "test_recipient2"},
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := opsgenietest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.OpsGenie.Enabled = true
c.OpsGenie.URL = ts.URL
c.OpsGenie.APIKey = "api_key"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*opsgenietest.Server)
ts.Close()
got := ts.Requests()
exp := []opsgenietest.Request{{
URL: "/",
PostData: opsgenietest.PostData{
ApiKey: "api_key",
Message: "message",
Entity: "id",
Alias: "id",
Note: "",
Details: map[string]interface{}{
"Level": "CRITICAL",
"Monitoring Tool": "Kapacitor",
},
Description: resultJSON,
Teams: []string{"A team", "B team"},
Recipients: []string{"test_recipient1", "test_recipient2"},
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected opsgenie request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "pagerduty",
Options: map[string]interface{}{
"service-key": "service_key",
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := pagerdutytest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.PagerDuty.Enabled = true
c.PagerDuty.URL = ts.URL
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*pagerdutytest.Server)
kapacitorURL := ctxt.Value("kapacitorURL").(string)
ts.Close()
got := ts.Requests()
exp := []pagerdutytest.Request{{
URL: "/",
PostData: pagerdutytest.PostData{
ServiceKey: "service_key",
EventType: "trigger",
Description: "message",
Client: "kapacitor",
ClientURL: kapacitorURL,
Details: "details",
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected pagerduty request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "post",
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := alerttest.NewPostServer()
ha.Options = map[string]interface{}{"url": ts.URL}
ctxt := context.WithValue(nil, "server", ts)
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*alerttest.PostServer)
ts.Close()
exp := []alert.Data{alertData}
got := ts.Data()
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected post request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "post",
Options: map[string]interface{}{
"endpoint": "test",
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := httpposttest.NewAlertServer(nil, true)
ctxt := context.WithValue(nil, "server", ts)
c.HTTPPost = httppost.Configs{{
Endpoint: "test",
URL: ts.URL,
AlertTemplate: `{{.Message}}`,
}}
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*httpposttest.AlertServer)
exp := []httpposttest.AlertRequest{{
MatchingHeaders: true,
Raw: []byte("message"),
}}
got := ts.Data()
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected httppost alert request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "pushover",
Options: map[string]interface{}{},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := pushovertest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.Pushover.Enabled = true
c.Pushover.URL = ts.URL
c.Pushover.Token = "api_key"
c.Pushover.UserKey = "user"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*pushovertest.Server)
ts.Close()
got := ts.Requests()
exp := []pushovertest.Request{{
PostData: pushovertest.PostData{
Token: "api_key",
UserKey: "user",
Message: "message",
Priority: 1,
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected pushover request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "sensu",
Options: map[string]interface{}{
"source": "Kapacitor",
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts, err := sensutest.NewServer()
if err != nil {
return nil, err
}
ctxt := context.WithValue(nil, "server", ts)
c.Sensu.Enabled = true
c.Sensu.Addr = ts.Addr
c.Sensu.Source = "Kapacitor"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*sensutest.Server)
ts.Close()
exp := []sensutest.Request{{
Source: "Kapacitor",
Output: "message",
Name: "id",
Status: 2,
}}
got := ts.Requests()
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected sensu request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "slack",
Options: map[string]interface{}{
"channel": "#test",
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := slacktest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.Slack.Enabled = true
c.Slack.URL = ts.URL + "/test/slack/url"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*slacktest.Server)
ts.Close()
got := ts.Requests()
exp := []slacktest.Request{{
URL: "/test/slack/url",
PostData: slacktest.PostData{
Channel: "#test",
Username: "kapacitor",
Text: "",
Attachments: []slacktest.Attachment{
{
Fallback: "message",
Color: "danger",
Text: "message",
Mrkdwn_in: []string{"text"},
},
},
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected slack request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "smtp",
Options: map[string]interface{}{
"to": []string{"[email protected]", "[email protected]"},
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts, err := smtptest.NewServer()
if err != nil {
return nil, err
}
ctxt := context.WithValue(nil, "server", ts)
c.SMTP.Enabled = true
c.SMTP.Host = ts.Host
c.SMTP.Port = ts.Port
c.SMTP.From = "[email protected]"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*smtptest.Server)
ts.Close()
errors := ts.Errors()
if len(errors) != 0 {
return fmt.Errorf("multiple errors %d: %v", len(errors), errors)
}
expMail := []*smtptest.Message{{
Header: mail.Header{
"Mime-Version": []string{"1.0"},
"Content-Type": []string{"text/html; charset=UTF-8"},
"Content-Transfer-Encoding": []string{"quoted-printable"},
"To": []string{"[email protected], [email protected]"},
"From": []string{"[email protected]"},
"Subject": []string{"message"},
},
Body: "details\n",
}}
msgs := ts.SentMessages()
if got, exp := len(msgs), len(expMail); got != exp {
return fmt.Errorf("unexpected number of messages sent: got %d exp %d", got, exp)
}
for i, exp := range expMail {
got := msgs[i]
if err := exp.Compare(got); err != nil {
return fmt.Errorf("unexpected message %d: %v", i, err)
}
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "snmptrap",
Options: map[string]interface{}{
"trap-oid": "1.1.2",
"data-list": []map[string]string{
{
"oid": "1.1.2.1",
"type": "s",
"value": "{{.Message}}",
},
{
"oid": "1.1.2.2",
"type": "s",
"value": "{{.Level}}",
},
},
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts, err := snmptraptest.NewServer()
if err != nil {
return nil, err
}
ctxt := context.WithValue(nil, "server", ts)
c.SNMPTrap.Enabled = true
c.SNMPTrap.Addr = ts.Addr
c.SNMPTrap.Community = ts.Community
c.SNMPTrap.Retries = 3
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*snmptraptest.Server)
ts.Close()
got := ts.Traps()
exp := []snmptraptest.Trap{{
Pdu: snmptraptest.Pdu{
Type: snmpgo.SNMPTrapV2,
ErrorStatus: snmpgo.NoError,
VarBinds: snmptraptest.VarBinds{
{
Oid: "1.3.6.1.2.1.1.3.0",
Value: "1000",
Type: "TimeTicks",
},
{
Oid: "1.3.6.1.6.3.1.1.4.1.0",
Value: "1.1.2",
Type: "Oid",
},
{
Oid: "1.1.2.1",
Value: "message",
Type: "OctetString",
},
{
Oid: "1.1.2.2",
Value: "CRITICAL",
Type: "OctetString",
},
},
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected snmptrap request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "talk",
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := talktest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.Talk.Enabled = true
c.Talk.URL = ts.URL
c.Talk.AuthorName = "Kapacitor"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*talktest.Server)
ts.Close()
got := ts.Requests()
exp := []talktest.Request{{
URL: "/",
PostData: talktest.PostData{
AuthorName: "Kapacitor",
Text: "message",
Title: "id",
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected talk request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "tcp",
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts, err := alerttest.NewTCPServer()
if err != nil {
return nil, err
}
ha.Options = map[string]interface{}{"address": ts.Addr}
ctxt := context.WithValue(nil, "server", ts)
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*alerttest.TCPServer)
ts.Close()
exp := []alert.Data{alertData}
got := ts.Data()
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected tcp request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "telegram",
Options: map[string]interface{}{
"chat-id": "chat id",
"disable-web-page-preview": true,
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := telegramtest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.Telegram.Enabled = true
c.Telegram.URL = ts.URL + "/bot"
c.Telegram.Token = "TOKEN:AUTH"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*telegramtest.Server)
ts.Close()
got := ts.Requests()
exp := []telegramtest.Request{{
URL: "/botTOKEN:AUTH/sendMessage",
PostData: telegramtest.PostData{
ChatId: "chat id",
Text: "message",
ParseMode: "",
DisableWebPagePreview: true,
DisableNotification: false,
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected telegram request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
{
handler: client.TopicHandler{
Kind: "victorops",
Options: map[string]interface{}{
"routing-key": "key",
},
},
setup: func(c *server.Config, ha *client.TopicHandler) (context.Context, error) {
ts := victoropstest.NewServer()
ctxt := context.WithValue(nil, "server", ts)
c.VictorOps.Enabled = true
c.VictorOps.URL = ts.URL
c.VictorOps.APIKey = "api_key"
return ctxt, nil
},
result: func(ctxt context.Context) error {
ts := ctxt.Value("server").(*victoropstest.Server)
ts.Close()
got := ts.Requests()
exp := []victoropstest.Request{{
URL: "/api_key/key",
PostData: victoropstest.PostData{
MessageType: "CRITICAL",
EntityID: "id",
StateMessage: "message",
Timestamp: 0,
MonitoringTool: "kapacitor",
Data: resultJSON,
},
}}
if !reflect.DeepEqual(exp, got) {
return fmt.Errorf("unexpected victorops request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
return nil
},
},
}
for i, tc := range testCases {
t.Run(fmt.Sprintf("%s-%d", tc.handler.Kind, i), func(t *testing.T) {
kind := tc.handler.Kind
// Create default config
c := NewConfig()
var ctxt context.Context
if tc.setup != nil {
var err error
ctxt, err = tc.setup(c, &tc.handler)
if err != nil {
t.Fatal(err)
}
}
s := OpenServer(c)
cli := Client(s)
closed := false
defer func() {
if !closed {
s.Close()
}
}()
ctxt = context.WithValue(ctxt, "kapacitorURL", s.URL())
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink("test"), client.TopicHandlerOptions{
ID: "testAlertHandlers",
Kind: tc.handler.Kind,
Options: tc.handler.Options,
}); err != nil {
t.Fatalf("%s: %v", kind, err)
}
tick := `
stream
|from()
.measurement('alert')
|alert()
.topic('test')
.id('id')
.message('message')
.details('details')
.crit(lambda: TRUE)
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "testAlertHandlers",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatalf("%s: %v", kind, err)
}
point := "alert value=1 0000000000"
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
// Close the entire server to ensure all data is processed
s.Close()
closed = true
if err := tc.result(ctxt); err != nil {
t.Errorf("%s: %v", kind, err)
}
})
}
}
func TestServer_Alert_Duration(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
tick := `
stream
|from()
.measurement('alert')
|alert()
.id('id')
.message('message')
.details('details')
.crit(lambda: "value" > 1.0)
.tcp('` + ts.Addr + `')
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "testAlertHandlers",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
// Write point
point := "alert value=2 0000000000"
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
// Restart the server
s.Restart()
topic := "main:testAlertHandlers:alert2"
l := cli.TopicEventsLink(topic)
expTopicEvents := client.TopicEvents{
Link: l,
Topic: topic,
Events: []client.TopicEvent{{
Link: client.Link{Relation: client.Self, Href: fmt.Sprintf("/kapacitor/v1preview/alerts/topics/%s/events/id", topic)},
ID: "id",
State: client.EventState{
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Duration: 0,
Level: "CRITICAL",
},
}},
}
te, err := cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for anonymous topic:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
event, err := cli.TopicEvent(expTopicEvents.Events[0].Link)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(event, expTopicEvents.Events[0]) {
t.Errorf("unexpected topic event for anonymous topic:\ngot\n%+v\nexp\n%+v\n", event, expTopicEvents.Events[0])
}
// Write point
point = "alert value=3 0000000001"
v = url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
// Restart the server
s.Restart()
expTopicEvents = client.TopicEvents{
Link: l,
Topic: topic,
Events: []client.TopicEvent{{
Link: client.Link{Relation: client.Self, Href: fmt.Sprintf("/kapacitor/v1preview/alerts/topics/%s/events/id", topic)},
ID: "id",
State: client.EventState{
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC),
Duration: client.Duration(time.Second),
Level: "CRITICAL",
},
}},
}
te, err = cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for anonymous topic after second point:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
}
func TestServer_Alert_Aggregate(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
aggTopic := "agg"
// Create task for alert
tick := `
stream
|from()
.measurement('alert')
|alert()
.id('id')
.message('message')
.details('details')
.crit(lambda: "value" > 1.0)
.topic('` + aggTopic + `')
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "agg_task",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
// Create tpc handler on tcp topic
tcpTopic := "tcp"
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink(tcpTopic), client.TopicHandlerOptions{
ID: "tcp_handler",
Kind: "tcp",
Options: map[string]interface{}{
"address": ts.Addr,
},
}); err != nil {
t.Fatal(err)
}
// Create aggregate handler on agg topic
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink(aggTopic), client.TopicHandlerOptions{
ID: "aggregate_handler",
Kind: "aggregate",
Options: map[string]interface{}{
"id": "id-agg",
"interval": 100 * time.Millisecond,
"topic": "tcp",
},
}); err != nil {
t.Fatal(err)
}
// Write points
point := `alert value=3 0000000000000
alert value=4 0000000000001
alert value=2 0000000000002
`
v := url.Values{}
v.Add("precision", "ms")
s.MustWrite("mydb", "myrp", point, v)
time.Sleep(110 * time.Millisecond)
// Check TCP handler got event
alertData := alert.Data{
ID: "id-agg",
Message: "Received 3 events in the last 100ms.",
Details: "message\nmessage\nmessage",
Time: time.Date(1970, 1, 1, 0, 0, 0, 2000000, time.UTC),
Level: alert.Critical,
Duration: 2 * time.Millisecond,
Data: models.Result{
Series: models.Rows{
{
Name: "alert",
Columns: []string{"time", "value"},
Values: [][]interface{}{[]interface{}{
time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
3.0,
}},
},
{
Name: "alert",
Columns: []string{"time", "value"},
Values: [][]interface{}{[]interface{}{
time.Date(1970, 1, 1, 0, 0, 0, 1000000, time.UTC),
4.0,
}},
},
{
Name: "alert",
Columns: []string{"time", "value"},
Values: [][]interface{}{[]interface{}{
time.Date(1970, 1, 1, 0, 0, 0, 2000000, time.UTC),
2.0,
}},
},
},
},
}
ts.Close()
exp := []alert.Data{alertData}
got := ts.Data()
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected tcp request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
// Check event on topic
l := cli.TopicEventsLink(tcpTopic)
expTopicEvents := client.TopicEvents{
Link: l,
Topic: tcpTopic,
Events: []client.TopicEvent{{
Link: client.Link{Relation: client.Self, Href: fmt.Sprintf("/kapacitor/v1preview/alerts/topics/%s/events/id-agg", tcpTopic)},
ID: "id-agg",
State: client.EventState{
Message: "Received 3 events in the last 100ms.",
Details: "message\nmessage\nmessage",
Time: time.Date(1970, 1, 1, 0, 0, 0, 2000000, time.UTC),
Duration: client.Duration(2 * time.Millisecond),
Level: "CRITICAL",
},
}},
}
te, err := cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for aggregate topic:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
}
func TestServer_Alert_Publish(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
publishTopic := "publish"
// Create task for alert
tick := `
stream
|from()
.measurement('alert')
|alert()
.id('id')
.message('message')
.details('details')
.crit(lambda: "value" > 1.0)
.topic('` + publishTopic + `')
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "publish_task",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
// Create tpc handler on tcp topic
tcpTopic := "tcp"
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink(tcpTopic), client.TopicHandlerOptions{
ID: "tcp_handler",
Kind: "tcp",
Options: map[string]interface{}{
"address": ts.Addr,
},
}); err != nil {
t.Fatal(err)
}
// Create publish handler on publish topic
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink(publishTopic), client.TopicHandlerOptions{
ID: "publish_handler",
Kind: "publish",
Options: map[string]interface{}{
// Publish to tcpTopic
"topics": []string{tcpTopic},
},
}); err != nil {
t.Fatal(err)
}
// Write points
point := `alert value=2 0000000000`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
s.Restart()
// Check TCP handler got event
alertData := alert.Data{
ID: "id",
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Level: alert.Critical,
Data: models.Result{
Series: models.Rows{
{
Name: "alert",
Columns: []string{"time", "value"},
Values: [][]interface{}{[]interface{}{
time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
2.0,
}},
},
},
},
}
ts.Close()
exp := []alert.Data{alertData}
got := ts.Data()
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected tcp request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
// Check event on topic
l := cli.TopicEventsLink(tcpTopic)
expTopicEvents := client.TopicEvents{
Link: l,
Topic: tcpTopic,
Events: []client.TopicEvent{{
Link: client.Link{Relation: client.Self, Href: fmt.Sprintf("/kapacitor/v1preview/alerts/topics/%s/events/id", tcpTopic)},
ID: "id",
State: client.EventState{
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Duration: 0,
Level: "CRITICAL",
},
}},
}
te, err := cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for publish topic:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
}
func TestServer_Alert_Match(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
topic := "test"
// Create task for alert
tick := `
stream
|from()
.measurement('alert')
|alert()
.id('id')
.message('message')
.details('details')
.crit(lambda: "value" > 1.0)
.topic('` + topic + `')
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "alert_task",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
// Create tpc handler with match condition
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink(topic), client.TopicHandlerOptions{
ID: "tcp_handler",
Kind: "tcp",
Options: map[string]interface{}{
"address": ts.Addr,
},
Match: `"host" == 'serverA' AND level() == CRITICAL`,
}); err != nil {
t.Fatal(err)
}
// Write points
point := `alert,host=serverA value=0 0000000000
alert,host=serverB value=2 0000000001
alert,host=serverB value=0 0000000002
alert,host=serverA value=2 0000000003
alert,host=serverB value=0 0000000004
`
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
s.Restart()
alertData := alert.Data{
ID: "id",
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 3, 0, time.UTC),
Level: alert.Critical,
Data: models.Result{
Series: models.Rows{
{
Name: "alert",
Tags: map[string]string{"host": "serverA"},
Columns: []string{"time", "value"},
Values: [][]interface{}{[]interface{}{
time.Date(1970, 1, 1, 0, 0, 3, 0, time.UTC),
2.0,
}},
},
},
},
}
ts.Close()
exp := []alert.Data{alertData}
got := ts.Data()
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected tcp request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
// Topic should have must recent event
l := cli.TopicEventsLink(topic)
expTopicEvents := client.TopicEvents{
Link: l,
Topic: topic,
Events: []client.TopicEvent{{
Link: client.Link{Relation: client.Self, Href: fmt.Sprintf("/kapacitor/v1preview/alerts/topics/%s/events/id", topic)},
ID: "id",
State: client.EventState{
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 4, 0, time.UTC),
Duration: client.Duration(time.Second),
Level: "OK",
},
}},
}
te, err := cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for publish topic:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
}
func TestServer_AlertAnonTopic(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
tick := `
stream
|from()
.measurement('alert')
|alert()
.id('id')
.message('message')
.details('details')
.warn(lambda: "value" <= 1.0)
.crit(lambda: "value" > 1.0)
.tcp('` + ts.Addr + `')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: "testAlertHandlers",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
// Write warning point
point := "alert value=1 0000000000"
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
// Restart the server
s.Restart()
topic := "main:testAlertHandlers:alert2"
l := cli.TopicEventsLink(topic)
expTopicEvents := client.TopicEvents{
Link: l,
Topic: topic,
Events: []client.TopicEvent{{
Link: client.Link{Relation: client.Self, Href: fmt.Sprintf("/kapacitor/v1preview/alerts/topics/%s/events/id", topic)},
ID: "id",
State: client.EventState{
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Duration: 0,
Level: "WARNING",
},
}},
}
te, err := cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for anonymous topic:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
event, err := cli.TopicEvent(expTopicEvents.Events[0].Link)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(event, expTopicEvents.Events[0]) {
t.Errorf("unexpected topic event for anonymous topic:\ngot\n%+v\nexp\n%+v\n", event, expTopicEvents.Events[0])
}
// Disable task
task, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
if _, err := cli.ListTopicEvents(l, nil); err == nil {
t.Fatal("expected error listing anonymous topic for disabled task")
} else if got, exp := err.Error(), fmt.Sprintf("failed to get topic events: unknown topic %q", topic); got != exp {
t.Errorf("unexpected error message for nonexistent anonymous topic: got %q exp %q", got, exp)
}
// Enable task
task, err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{
Status: client.Enabled,
})
if err != nil {
t.Fatal(err)
}
te, err = cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for anonymous topic after re-enable:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
// Restart the server, again and ensure that the anonymous topic state is restored
s.Restart()
te, err = cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for anonymous topic after re-enable and restart:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents)
}
// Delete task
if err := cli.DeleteTask(task.Link); err != nil {
t.Fatal(err)
}
if _, err := cli.ListTopicEvents(l, nil); err == nil {
t.Fatal("expected error listing anonymous topic for deleted task")
} else if got, exp := err.Error(), fmt.Sprintf("failed to get topic events: unknown topic %q", topic); got != exp {
t.Errorf("unexpected error message for nonexistent anonymous topic: got %q exp %q", got, exp)
}
}
func TestServer_AlertTopic_PersistedState(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
tmpDir := MustTempDir()
defer os.RemoveAll(tmpDir)
tmpPath := filepath.Join(tmpDir, "alert.log")
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink("test"), client.TopicHandlerOptions{
ID: "testAlertHandler",
Kind: "tcp",
Options: map[string]interface{}{"address": ts.Addr},
}); err != nil {
t.Fatal(err)
}
tick := `
stream
|from()
.measurement('alert')
|alert()
.topic('test')
.id('id')
.message('message')
.details('details')
.warn(lambda: TRUE)
.log('` + tmpPath + `')
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "testAlertHandlers",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
point := "alert value=1 0000000000"
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
// Restart the server
s.Restart()
topics := []string{
"test",
"main:testAlertHandlers:alert2",
}
for _, topic := range topics {
l := cli.TopicEventsLink(topic)
expTopicEvents := client.TopicEvents{
Link: l,
Topic: topic,
Events: []client.TopicEvent{{
Link: client.Link{Relation: client.Self, Href: fmt.Sprintf("/kapacitor/v1preview/alerts/topics/%s/events/id", topic)},
ID: "id",
State: client.EventState{
Message: "message",
Details: "details",
Time: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Duration: 0,
Level: "WARNING",
},
}},
}
te, err := cli.ListTopicEvents(l, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events for topic %q:\ngot\n%+v\nexp\n%+v\n", topic, te, expTopicEvents)
}
event, err := cli.TopicEvent(expTopicEvents.Events[0].Link)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(event, expTopicEvents.Events[0]) {
t.Errorf("unexpected topic event for topic %q:\ngot\n%+v\nexp\n%+v\n", topic, event, expTopicEvents.Events[0])
}
te, err = cli.ListTopicEvents(l, &client.ListTopicEventsOptions{
MinLevel: "CRITICAL",
})
if err != nil {
t.Fatal(err)
}
expTopicEvents.Events = expTopicEvents.Events[0:0]
if !reflect.DeepEqual(te, expTopicEvents) {
t.Errorf("unexpected topic events with minLevel for topic %q:\ngot\n%+v\nexp\n%+v\n", topic, te, expTopicEvents)
}
l = cli.TopicLink(topic)
if err := cli.DeleteTopic(l); err != nil {
t.Fatal(err)
}
te, err = cli.ListTopicEvents(l, nil)
if err == nil {
t.Fatalf("expected error for deleted topic %q", topic)
}
}
}
func TestServer_AlertListHandlers(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
thl := cli.TopicHandlersLink("test")
// Number of handlers to create
n := 3
for i := 0; i < n; i++ {
id := fmt.Sprintf("handler%d", i)
if _, err := cli.CreateTopicHandler(thl, client.TopicHandlerOptions{
ID: id,
Kind: "tcp",
Options: map[string]interface{}{"address": ts.Addr},
}); err != nil {
t.Fatal(err)
}
}
expHandlers := client.TopicHandlers{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/test/handlers?pattern="},
Topic: "test",
Handlers: []client.TopicHandler{
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/test/handlers/handler0"},
ID: "handler0",
Kind: "tcp",
Options: map[string]interface{}{"address": ts.Addr},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/test/handlers/handler1"},
ID: "handler1",
Kind: "tcp",
Options: map[string]interface{}{"address": ts.Addr},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/test/handlers/handler2"},
ID: "handler2",
Kind: "tcp",
Options: map[string]interface{}{"address": ts.Addr},
},
},
}
handlers, err := cli.ListTopicHandlers(thl, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(handlers, expHandlers) {
t.Errorf("unexpected handlers:\ngot\n%+v\nexp\n%+v\n", handlers, expHandlers)
}
// Restart the server
s.Restart()
// Check again
handlers, err = cli.ListTopicHandlers(thl, nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(handlers, expHandlers) {
t.Errorf("unexpected handlers after restart:\ngot\n%+v\nexp\n%+v\n", handlers, expHandlers)
}
var exp client.TopicHandlers
// Pattern = *
handlers, err = cli.ListTopicHandlers(thl, &client.ListTopicHandlersOptions{
Pattern: "*",
})
if err != nil {
t.Fatal(err)
}
exp = expHandlers
exp.Link.Href = "/kapacitor/v1preview/alerts/topics/test/handlers?pattern=%2A"
if !reflect.DeepEqual(handlers, exp) {
t.Errorf("unexpected handlers with pattern \"*\":\ngot\n%+v\nexp\n%+v\n", handlers, exp)
}
// Pattern = handler*
handlers, err = cli.ListTopicHandlers(thl, &client.ListTopicHandlersOptions{
Pattern: "handler*",
})
if err != nil {
t.Fatal(err)
}
exp = expHandlers
exp.Link.Href = "/kapacitor/v1preview/alerts/topics/test/handlers?pattern=handler%2A"
if !reflect.DeepEqual(handlers, exp) {
t.Errorf("unexpected handlers with pattern \"handler*\":\ngot\n%+v\nexp\n%+v\n", handlers, exp)
}
// Pattern = handler0
handlers, err = cli.ListTopicHandlers(thl, &client.ListTopicHandlersOptions{
Pattern: "handler0",
})
if err != nil {
t.Fatal(err)
}
exp = expHandlers
exp.Link.Href = "/kapacitor/v1preview/alerts/topics/test/handlers?pattern=handler0"
exp.Handlers = expHandlers.Handlers[0:1]
if !reflect.DeepEqual(handlers, exp) {
t.Errorf("unexpected handlers with pattern \"handler0\":\ngot\n%+v\nexp\n%+v\n", handlers, exp)
}
}
func TestServer_AlertTopic(t *testing.T) {
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink("misc"), client.TopicHandlerOptions{
ID: "testAlertHandler",
Kind: "tcp",
Options: map[string]interface{}{"address": "localhost:4657"},
}); err != nil {
t.Fatal(err)
}
expTopic := client.Topic{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/misc"},
ID: "misc",
Level: "OK",
Collected: 0,
EventsLink: client.Link{Relation: "events", Href: "/kapacitor/v1preview/alerts/topics/misc/events"},
HandlersLink: client.Link{Relation: "handlers", Href: "/kapacitor/v1preview/alerts/topics/misc/handlers"},
}
topic, err := cli.Topic(cli.TopicLink("misc"))
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(topic, expTopic) {
t.Errorf("unexpected topic:\ngot\n%+v\nexp\n%+v\n", topic, expTopic)
}
}
func TestServer_AlertListTopics(t *testing.T) {
// Setup test TCP server
ts, err := alerttest.NewTCPServer()
if err != nil {
t.Fatal(err)
}
defer ts.Close()
// Create default config
c := NewConfig()
s := OpenServer(c)
cli := Client(s)
defer s.Close()
for _, topic := range []string{"system", "misc", "test"} {
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink(topic), client.TopicHandlerOptions{
ID: "testAlertHandler",
Kind: "tcp",
Options: map[string]interface{}{"address": ts.Addr},
}); err != nil {
t.Fatal(err)
}
}
expTopics := client.Topics{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics?min-level=OK&pattern="},
Topics: []client.Topic{
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/misc"},
ID: "misc",
Level: "OK",
EventsLink: client.Link{Relation: "events", Href: "/kapacitor/v1preview/alerts/topics/misc/events"},
HandlersLink: client.Link{Relation: "handlers", Href: "/kapacitor/v1preview/alerts/topics/misc/handlers"},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system"},
ID: "system",
Level: "OK",
EventsLink: client.Link{Relation: "events", Href: "/kapacitor/v1preview/alerts/topics/system/events"},
HandlersLink: client.Link{Relation: "handlers", Href: "/kapacitor/v1preview/alerts/topics/system/handlers"},
},
{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/test"},
ID: "test",
Level: "OK",
EventsLink: client.Link{Relation: "events", Href: "/kapacitor/v1preview/alerts/topics/test/events"},
HandlersLink: client.Link{Relation: "handlers", Href: "/kapacitor/v1preview/alerts/topics/test/handlers"},
},
},
}
topics, err := cli.ListTopics(nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(topics, expTopics) {
t.Errorf("unexpected topics:\ngot\n%+v\nexp\n%+v\n", topics, expTopics)
}
tick := `
stream
|from()
.measurement('alert')
|alert()
.topic('test')
.id('id')
.message('message')
.details('details')
.crit(lambda: TRUE)
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "testAlertHandlers",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
point := "alert value=1 0000000000"
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
// Restart the server
s.Restart()
// Update expected topics since we triggered an event.
expTopics.Topics[2].Level = "CRITICAL"
// Check again
topics, err = cli.ListTopics(nil)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(topics, expTopics) {
t.Errorf("unexpected topics after restart:\ngot\n%+v\nexp\n%+v\n", topics, expTopics)
}
var exp client.Topics
// Pattern = *
topics, err = cli.ListTopics(&client.ListTopicsOptions{
Pattern: "*",
})
if err != nil {
t.Fatal(err)
}
exp = expTopics
exp.Link.Href = "/kapacitor/v1preview/alerts/topics?min-level=OK&pattern=%2A"
if !reflect.DeepEqual(topics, exp) {
t.Errorf("unexpected topics with pattern \"*\":\ngot\n%+v\nexp\n%+v\n", topics, exp)
}
// Pattern = test
topics, err = cli.ListTopics(&client.ListTopicsOptions{
Pattern: "test",
})
if err != nil {
t.Fatal(err)
}
exp = expTopics
exp.Link.Href = "/kapacitor/v1preview/alerts/topics?min-level=OK&pattern=test"
exp.Topics = expTopics.Topics[2:]
if !reflect.DeepEqual(topics, exp) {
t.Errorf("unexpected topics with pattern \"test\":\ngot\n%+v\nexp\n%+v\n", topics, exp)
}
// MinLevel = INFO
topics, err = cli.ListTopics(&client.ListTopicsOptions{
MinLevel: "INFO",
})
if err != nil {
t.Fatal(err)
}
exp = expTopics
exp.Link.Href = "/kapacitor/v1preview/alerts/topics?min-level=INFO&pattern="
exp.Topics = expTopics.Topics[2:]
if !reflect.DeepEqual(topics, exp) {
t.Errorf("unexpected topics min level \"info\":\ngot\n%+v\nexp\n%+v\n", topics, exp)
}
}
func TestServer_AlertHandler_MultipleHandlers(t *testing.T) {
resultJSON := `{"series":[{"name":"alert","columns":["time","value"],"values":[["1970-01-01T00:00:00Z",1]]}]}`
// Create default config
c := NewConfig()
// Configure slack
slack := slacktest.NewServer()
c.Slack.Enabled = true
c.Slack.URL = slack.URL + "/test/slack/url"
// Configure victorops
vo := victoropstest.NewServer()
c.VictorOps.Enabled = true
c.VictorOps.URL = vo.URL
c.VictorOps.APIKey = "api_key"
s := OpenServer(c)
cli := Client(s)
closed := false
defer func() {
if !closed {
s.Close()
}
}()
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink("test"), client.TopicHandlerOptions{
ID: "testAlertHandlers-VO",
Kind: "victorops",
Options: map[string]interface{}{
"routing-key": "key",
},
}); err != nil {
t.Fatal(err)
}
if _, err := cli.CreateTopicHandler(cli.TopicHandlersLink("test"), client.TopicHandlerOptions{
ID: "testAlertHandlers-Slack",
Kind: "slack",
Options: map[string]interface{}{
"channel": "#test",
},
}); err != nil {
t.Fatal(err)
}
tick := `
stream
|from()
.measurement('alert')
|alert()
.topic('test')
.id('id')
.message('message')
.details('details')
.crit(lambda: TRUE)
`
if _, err := cli.CreateTask(client.CreateTaskOptions{
ID: "testAlertHandlers",
Type: client.StreamTask,
DBRPs: []client.DBRP{{
Database: "mydb",
RetentionPolicy: "myrp",
}},
TICKscript: tick,
Status: client.Enabled,
}); err != nil {
t.Fatal(err)
}
point := "alert value=1 0000000000"
v := url.Values{}
v.Add("precision", "s")
s.MustWrite("mydb", "myrp", point, v)
// Close the entire server to ensure all data is processed
s.Close()
closed = true
// Validate slack
{
slack.Close()
got := slack.Requests()
exp := []slacktest.Request{{
URL: "/test/slack/url",
PostData: slacktest.PostData{
Channel: "#test",
Username: "kapacitor",
Text: "",
Attachments: []slacktest.Attachment{
{
Fallback: "message",
Color: "danger",
Text: "message",
Mrkdwn_in: []string{"text"},
},
},
},
}}
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected slack request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
}
// Validate victorops
{
vo.Close()
got := vo.Requests()
exp := []victoropstest.Request{{
URL: "/api_key/key",
PostData: victoropstest.PostData{
MessageType: "CRITICAL",
EntityID: "id",
StateMessage: "message",
Timestamp: 0,
MonitoringTool: "kapacitor",
Data: resultJSON,
},
}}
if !reflect.DeepEqual(exp, got) {
t.Errorf("unexpected victorops request:\nexp\n%+v\ngot\n%+v\n", exp, got)
}
}
}
func TestStorage_Rebuild(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
storages, err := cli.ListStorage()
if err != nil {
t.Fatal(err)
}
for _, storage := range storages.Storage {
t.Log(storage.Link)
err := cli.DoStorageAction(storage.Link, client.StorageActionOptions{
Action: client.StorageRebuild,
})
if err != nil {
t.Errorf("error rebuilding storage %q: %v", storage.Name, err)
}
}
}
func TestStorage_Backup(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
// Create a task
id := "testTaskID"
ttype := client.StreamTask
dbrps := []client.DBRP{
{
Database: "mydb",
RetentionPolicy: "myrp",
},
{
Database: "otherdb",
RetentionPolicy: "default",
},
}
tick := `stream
|from()
.measurement('test')
`
task, err := cli.CreateTask(client.CreateTaskOptions{
ID: id,
Type: ttype,
DBRPs: dbrps,
TICKscript: tick,
Status: client.Disabled,
})
if err != nil {
t.Fatal(err)
}
// Perform backup
size, r, err := cli.Backup()
if err != nil {
t.Fatal(err)
}
defer r.Close()
backup, err := ioutil.ReadAll(r)
if err != nil {
t.Fatal(err)
}
if got, exp := int64(len(backup)), size; got != exp {
t.Fatalf("unexpected backup size got %d exp %d", got, exp)
}
// Stop the server
s.Stop()
// Restore from backup
if err := ioutil.WriteFile(s.Config.Storage.BoltDBPath, backup, 0644); err != nil {
t.Fatal(err)
}
// Start the server again
s.Start()
// Check that the task was restored
ti, err := cli.Task(task.Link, nil)
if err != nil {
t.Fatal(err)
}
if ti.Error != "" {
t.Fatal(ti.Error)
}
if ti.ID != id {
t.Fatalf("unexpected id got %s exp %s", ti.ID, id)
}
if ti.Type != client.StreamTask {
t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask)
}
if ti.Status != client.Disabled {
t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled)
}
if !reflect.DeepEqual(ti.DBRPs, dbrps) {
t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps)
}
if ti.TICKscript != tick {
t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick)
}
dot := "digraph testTaskID {\nstream0 -> from1;\n}"
if ti.Dot != dot {
t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot)
}
}
func TestLoadService(t *testing.T) {
s, c, cli := OpenLoadServer()
// If the list of test fixtures changes update this list
tasks := []string{"base", "cpu_alert", "implicit", "join", "other"}
ts, err := cli.ListTasks(nil)
if err != nil {
t.Fatalf("enountered error listing tasks: %v", err)
}
for i, task := range ts {
if exp, got := tasks[i], task.ID; exp != got {
t.Fatalf("expected task ID to be %v, got %v\n", exp, got)
}
}
// If the list of test fixtures changes update this list
templates := []string{"base_template", "implicit_template"}
tmps, err := cli.ListTemplates(nil)
if err != nil {
t.Fatalf("enountered error listing tasks: %v", err)
}
for i, template := range tmps {
if exp, got := templates[i], template.ID; exp != got {
t.Fatalf("expected template ID to be %v, got %v\n", exp, got)
}
}
// If the list of test fixtures changes update this list
topicHandlers := []string{"example", "other"}
link := cli.TopicHandlersLink("cpu")
ths, err := cli.ListTopicHandlers(link, nil)
if err != nil {
t.Fatalf("enountered error listing tasks: %v", err)
}
for i, th := range ths.Handlers {
if exp, got := topicHandlers[i], th.ID; exp != got {
t.Fatalf("expected topic-handler ID to be %v, got %v\n", exp, got)
}
}
// delete task file
err = os.Rename(
path.Join(c.Load.Dir, "tasks", "join.tick"),
path.Join(c.Load.Dir, "tasks", "z.tick"),
)
if err != nil {
t.Fatalf("failed to rename tickscript: %v", err)
}
// reload
s.Reload()
// If the list of test fixtures changes update this list
tasks = []string{"base", "cpu_alert", "implicit", "other", "z"}
ts, err = cli.ListTasks(nil)
if err != nil {
t.Fatalf("enountered error listing tasks: %v", err)
}
for i, task := range ts {
if exp, got := tasks[i], task.ID; exp != got {
t.Fatalf("expected task ID to be %v, got %v\n", exp, got)
}
}
// rename template file
err = os.Rename(
path.Join(c.Load.Dir, "templates", "base_template.tick"),
path.Join(c.Load.Dir, "templates", "new.tick"),
)
if err != nil {
t.Fatalf("failed to rename tickscript: %v", err)
}
// reload
s.Reload()
// If the list of test fixtures changes update this list
templates = []string{"implicit_template", "new"}
tmps, err = cli.ListTemplates(nil)
if err != nil {
t.Fatalf("enountered error listing templates: %v", err)
}
for i, template := range tmps {
if exp, got := templates[i], template.ID; exp != got {
t.Fatalf("expected template ID to be %v, got %v\n", exp, got)
}
}
// move template file back
err = os.Rename(
path.Join(c.Load.Dir, "templates", "new.tick"),
path.Join(c.Load.Dir, "templates", "base_template.tick"),
)
// add a new handler
f, err := os.Create(path.Join(c.Load.Dir, "handlers", "new.tick"))
if err != nil {
t.Fatalf("failed to create new handler file: %v", err)
}
script := `topic: cpu
id: new
kind: slack
match: changed() == TRUE
options:
channel: '#alerts'
`
if _, err := f.Write([]byte(script)); err != nil {
t.Fatalf("failed to write handler: %v", err)
}
f.Close()
// remove handler file back
if err := os.Remove(path.Join(c.Load.Dir, "handlers", "other.yaml")); err != nil {
t.Fatalf("failed to remove handler file: %v", err)
}
// reload
s.Reload()
// If the list of test fixtures changes update this list
topicHandlers = []string{"example", "new"}
link = cli.TopicHandlersLink("cpu")
ths, err = cli.ListTopicHandlers(link, nil)
if err != nil {
t.Fatalf("enountered error listing topic-handlers: %v", err)
}
for i, th := range ths.Handlers {
if exp, got := topicHandlers[i], th.ID; exp != got {
t.Fatalf("expected topic-handler ID to be %v, got %v\n", exp, got)
}
}
}
func TestLogSessions_HeaderJSON(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
u := cli.BaseURL()
u.Path = "/logs"
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
t.Fatal(err)
return
}
req.Header.Add("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
return
}
defer resp.Body.Close()
if exp, got := "application/json; charset=utf-8", resp.Header.Get("Content-Type"); exp != got {
t.Fatalf("expected: %v, got: %v\n", exp, got)
return
}
}
func TestLogSessions_HeaderGzip(t *testing.T) {
s, cli := OpenDefaultServer()
defer s.Close()
u := cli.BaseURL()
u.Path = "/logs"
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
t.Fatal(err)
return
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
return
}
defer resp.Body.Close()
if exp, got := "", resp.Header.Get("Content-Encoding"); exp != got {
t.Fatalf("expected: %v, got: %v\n", exp, got)
return
}
}
| [
"\"PYTHONPATH\"",
"\"PYTHONPATH\"",
"\"PYTHONPATH\""
] | [] | [
"PYTHONPATH"
] | [] | ["PYTHONPATH"] | go | 1 | 0 | |
pkg/karmadactl/logs.go | package karmadactl
import (
"context"
"errors"
"fmt"
"io"
"os"
"regexp"
"sync"
"time"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/rest"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/polymorphichelpers"
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
"github.com/karmada-io/karmada/pkg/karmadactl/options"
"github.com/karmada-io/karmada/pkg/util/gclient"
"github.com/karmada-io/karmada/pkg/util/lifted"
)
const (
defaultPodLogsTimeout = 20 * time.Second
logsUsageStr = "logs [-f] [-p] (POD | TYPE/NAME) [-c CONTAINER] (-C CLUSTER)"
)
var (
selectorTail int64 = 10
logsUsageErrStr = fmt.Sprintf("expected '%s'.\nPOD or TYPE/NAME is a required argument for the logs command", logsUsageStr)
)
// NewCmdLogs new logs command.
func NewCmdLogs(out io.Writer, karmadaConfig KarmadaConfig, parentCommand string) *cobra.Command {
ioStreams := genericclioptions.IOStreams{In: getIn, Out: getOut, ErrOut: getErr}
o := NewCommandLogsOptions(ioStreams, false)
cmd := &cobra.Command{
Use: logsUsageStr,
Short: "Print the logs for a container in a pod in a cluster",
SilenceUsage: true,
Example: getLogsExample(parentCommand),
RunE: func(cmd *cobra.Command, args []string) error {
if err := o.Complete(karmadaConfig, cmd, args); err != nil {
return err
}
if err := o.Validate(); err != nil {
return err
}
if err := o.Run(); err != nil {
return err
}
return nil
},
}
o.GlobalCommandOptions.AddFlags(cmd.Flags())
o.AddFlags(cmd)
return cmd
}
// getLogsExample logs examples by cmd type
func getLogsExample(parentCommand string) string {
example := `
# Return snapshot logs from pod nginx with only one container in cluster(member1)` + "\n" +
fmt.Sprintf("%s logs nginx -C=member1", parentCommand) + `
# Return snapshot logs from pod nginx with multi containers in cluster(member1)` + "\n" +
fmt.Sprintf("%s get logs nginx --all-containers=true -C=member1", parentCommand) + `
# Return snapshot logs from all containers in pods defined by label app=nginx in cluster(member1)` + "\n" +
fmt.Sprintf("%s get logs -l app=nginx --all-containers=true -C=member1", parentCommand) + `
# Return snapshot of previous terminated ruby container logs from pod web-1 in cluster(member1)` + "\n" +
fmt.Sprintf("%s get logs -p -c ruby web-1 -C=member1", parentCommand) + `
# Begin streaming the logs of the ruby container in pod web-1 in cluster(member1)` + "\n" +
fmt.Sprintf("%s logs -f -c ruby web-1 -C=member1", parentCommand) + `
# Begin streaming the logs from all containers in pods defined by label app=nginx in cluster(member1) ` + "\n" +
fmt.Sprintf("%s logs -f -l app=nginx --all-containers=true -C=member1", parentCommand) + `
# Display only the most recent 20 lines of output in pod nginx in cluster(member1) ` + "\n" +
fmt.Sprintf("%s logs --tail=20 nginx -C=member1", parentCommand) + `
# Show all logs from pod nginx written in the last hour in cluster(member1) ` + "\n" +
fmt.Sprintf("%s --since=1h nginx -C=member1", parentCommand)
return example
}
// CommandLogsOptions contains the input to the logs command.
type CommandLogsOptions struct {
// global flags
options.GlobalCommandOptions
Cluster string
Namespace string
ResourceArg string
AllContainers bool
Options runtime.Object
Resources []string
ConsumeRequestFn func(rest.ResponseWrapper, io.Writer) error
// PodLogOptions
SinceTime string
Since time.Duration
Follow bool
Previous bool
Timestamps bool
IgnoreLogErrors bool
LimitBytes int64
Tail int64
Container string
InsecureSkipTLSVerifyBackend bool
// whether or not a container name was given via --container
ContainerNameSpecified bool
Selector string
MaxFollowConcurrency int
Prefix bool
Object runtime.Object
GetPodTimeout time.Duration
RESTClientGetter genericclioptions.RESTClientGetter
LogsForObject polymorphichelpers.LogsForObjectFunc
genericclioptions.IOStreams
TailSpecified bool
containerNameFromRefSpecRegexp *regexp.Regexp
f cmdutil.Factory
}
// NewCommandLogsOptions returns a LogsOptions.
func NewCommandLogsOptions(streams genericclioptions.IOStreams, allContainers bool) *CommandLogsOptions {
return &CommandLogsOptions{
IOStreams: streams,
AllContainers: allContainers,
Tail: -1,
MaxFollowConcurrency: 5,
containerNameFromRefSpecRegexp: regexp.MustCompile(`spec\.(?:initContainers|containers|ephemeralContainers){(.+)}`),
}
}
// AddFlags adds flags to the specified FlagSet.
func (o *CommandLogsOptions) AddFlags(cmd *cobra.Command) {
cmd.Flags().BoolVar(&o.AllContainers, "all-containers", o.AllContainers, "Get all containers' logs in the pod(s).")
cmd.Flags().BoolVarP(&o.Follow, "follow", "f", o.Follow, "Specify if the logs should be streamed.")
cmd.Flags().BoolVar(&o.Timestamps, "timestamps", o.Timestamps, "Include timestamps on each line in the log output")
cmd.Flags().Int64Var(&o.LimitBytes, "limit-bytes", o.LimitBytes, "Maximum bytes of logs to return. Defaults to no limit.")
cmd.Flags().BoolVarP(&o.Previous, "previous", "p", o.Previous, "If true, print the logs for the previous instance of the container in a pod if it exists.")
cmd.Flags().Int64Var(&o.Tail, "tail", o.Tail, "Lines of recent log file to display. Defaults to -1 with no selector, showing all log lines otherwise 10, if a selector is provided.")
cmd.Flags().BoolVar(&o.IgnoreLogErrors, "ignore-errors", o.IgnoreLogErrors, "If watching / following pod logs, allow for any errors that occur to be non-fatal")
cmd.Flags().StringVar(&o.SinceTime, "since-time", o.SinceTime, "Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of since-time / since may be used.")
cmd.Flags().DurationVar(&o.Since, "since", o.Since, "Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of since-time / since may be used.")
cmd.Flags().StringVarP(&o.Container, "container", "c", o.Container, "Print the logs of this container")
cmd.Flags().BoolVar(&o.InsecureSkipTLSVerifyBackend, "insecure-skip-tls-verify-backend", o.InsecureSkipTLSVerifyBackend,
"Skip verifying the identity of the kubelet that logs are requested from. In theory, an attacker could provide invalid log content back. You might want to use this if your kubelet serving certificates have expired.")
cmdutil.AddPodRunningTimeoutFlag(cmd, defaultPodLogsTimeout)
cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on.")
cmd.Flags().IntVar(&o.MaxFollowConcurrency, "max-log-requests", o.MaxFollowConcurrency, "Specify maximum number of concurrent logs to follow when using by a selector. Defaults to 5.")
cmd.Flags().BoolVar(&o.Prefix, "prefix", o.Prefix, "Prefix each log line with the log source (pod name and container name)")
cmd.Flags().StringVarP(&o.Namespace, "namespace", "n", "default", "-n=namespace or -n namespace")
cmd.Flags().StringVarP(&o.Cluster, "cluster", "C", "", "-C=member1")
}
// Complete ensures that options are valid and marshals them if necessary
func (o *CommandLogsOptions) Complete(karmadaConfig KarmadaConfig, cmd *cobra.Command, args []string) error {
o.ContainerNameSpecified = cmd.Flag("container").Changed
o.TailSpecified = cmd.Flag("tail").Changed
o.Resources = args
switch len(args) {
case 0:
if len(o.Selector) == 0 {
return cmdutil.UsageErrorf(cmd, "%s", logsUsageErrStr)
}
case 1:
o.ResourceArg = args[0]
if len(o.Selector) != 0 {
return cmdutil.UsageErrorf(cmd, "only a selector (-l) or a POD name is allowed")
}
case 2:
o.ResourceArg = args[0]
o.Container = args[1]
default:
return cmdutil.UsageErrorf(cmd, "%s", logsUsageErrStr)
}
var err error
o.ConsumeRequestFn = lifted.DefaultConsumeRequest
o.GetPodTimeout, err = cmdutil.GetPodRunningTimeoutFlag(cmd)
if err != nil {
return err
}
o.Options, err = o.toLogOptions()
if err != nil {
return err
}
if len(o.Cluster) == 0 {
return fmt.Errorf("must specify a cluster")
}
karmadaRestConfig, err := karmadaConfig.GetRestConfig(o.KarmadaContext, o.KubeConfig)
if err != nil {
return fmt.Errorf("failed to get control plane rest config. context: %s, kube-config: %s, error: %v",
o.KarmadaContext, o.KubeConfig, err)
}
clusterInfo, err := o.getClusterInfo(karmadaRestConfig, o.Cluster)
if err != nil {
return err
}
o.f = getFactory(o.Cluster, clusterInfo)
o.RESTClientGetter = o.f
o.LogsForObject = polymorphichelpers.LogsForObjectFn
if err := o.completeObj(); err != nil {
return err
}
return nil
}
// Validate checks to the LogsOptions to see if there is sufficient information run the command
func (o *CommandLogsOptions) Validate() error {
if len(o.SinceTime) > 0 && o.Since != 0 {
return fmt.Errorf("at most one of `sinceTime` or `sinceSeconds` may be specified")
}
logsOptions, ok := o.Options.(*corev1.PodLogOptions)
if !ok {
return errors.New("unexpected logs options object")
}
if o.AllContainers && len(logsOptions.Container) > 0 {
return fmt.Errorf("--all-containers=true should not be specified with container name %s", logsOptions.Container)
}
if o.ContainerNameSpecified && len(o.Resources) == 2 {
return fmt.Errorf("only one of -c or an inline [CONTAINER] arg is allowed")
}
if o.LimitBytes < 0 {
return fmt.Errorf("--limit-bytes must be greater than 0")
}
if logsOptions.SinceSeconds != nil && *logsOptions.SinceSeconds < int64(0) {
return fmt.Errorf("--since must be greater than 0")
}
if logsOptions.TailLines != nil && *logsOptions.TailLines < -1 {
return fmt.Errorf("--tail must be greater than or equal to -1")
}
return nil
}
// Run retrieves a pod log
func (o *CommandLogsOptions) Run() error {
requests, err := o.LogsForObject(o.RESTClientGetter, o.Object, o.Options, o.GetPodTimeout, o.AllContainers)
if err != nil {
return err
}
if o.Follow && len(requests) > 1 {
if len(requests) > o.MaxFollowConcurrency {
return fmt.Errorf(
"you are attempting to follow %d log streams, but maximum allowed concurrency is %d, use --max-log-requests to increase the limit",
len(requests), o.MaxFollowConcurrency,
)
}
return o.parallelConsumeRequest(requests)
}
return o.sequentialConsumeRequest(requests)
}
func (o *CommandLogsOptions) parallelConsumeRequest(requests map[corev1.ObjectReference]rest.ResponseWrapper) error {
reader, writer := io.Pipe()
wg := &sync.WaitGroup{}
wg.Add(len(requests))
for objRef, request := range requests {
go func(objRef corev1.ObjectReference, request rest.ResponseWrapper) {
defer wg.Done()
out := o.addPrefixIfNeeded(objRef, writer)
if err := o.ConsumeRequestFn(request, out); err != nil {
if !o.IgnoreLogErrors {
_ = writer.CloseWithError(err)
// It's important to return here to propagate the error via the pipe
return
}
fmt.Fprintf(writer, "error: %v\n", err)
}
}(objRef, request)
}
go func() {
wg.Wait()
writer.Close()
}()
_, err := io.Copy(o.Out, reader)
return err
}
func (o *CommandLogsOptions) completeObj() error {
if o.Object == nil {
builder := o.f.NewBuilder().
WithScheme(gclient.NewSchema(), gclient.NewSchema().PrioritizedVersionsAllGroups()...).
NamespaceParam(o.Namespace).DefaultNamespace().
SingleResourceType()
if o.ResourceArg != "" {
builder.ResourceNames("pods", o.ResourceArg)
}
if o.Selector != "" {
builder.ResourceTypes("pods").LabelSelectorParam(o.Selector)
}
infos, err := builder.Do().Infos()
if err != nil {
return err
}
if o.Selector == "" && len(infos) != 1 {
return errors.New("expected a resource")
}
o.Object = infos[0].Object
if o.Selector != "" && len(o.Object.(*corev1.PodList).Items) == 0 {
fmt.Fprintf(o.ErrOut, "No resources found in %s namespace.\n", o.Namespace)
}
}
return nil
}
func (o *CommandLogsOptions) toLogOptions() (*corev1.PodLogOptions, error) {
logOptions := &corev1.PodLogOptions{
Container: o.Container,
Follow: o.Follow,
Previous: o.Previous,
Timestamps: o.Timestamps,
InsecureSkipTLSVerifyBackend: o.InsecureSkipTLSVerifyBackend,
}
if len(o.SinceTime) > 0 {
t, err := lifted.ParseRFC3339(o.SinceTime, metav1.Now)
if err != nil {
return nil, err
}
logOptions.SinceTime = &t
}
if o.LimitBytes != 0 {
logOptions.LimitBytes = &o.LimitBytes
}
if o.Since != 0 {
// round up to the nearest second
sec := int64(o.Since.Round(time.Second))
logOptions.SinceSeconds = &sec
}
if len(o.Selector) > 0 && o.Tail == -1 && !o.TailSpecified {
logOptions.TailLines = &selectorTail
} else if o.Tail != -1 {
logOptions.TailLines = &o.Tail
}
return logOptions, nil
}
func (o *CommandLogsOptions) sequentialConsumeRequest(requests map[corev1.ObjectReference]rest.ResponseWrapper) error {
for objRef, request := range requests {
out := o.addPrefixIfNeeded(objRef, o.Out)
if err := o.ConsumeRequestFn(request, out); err != nil {
if !o.IgnoreLogErrors {
return err
}
fmt.Fprintf(o.Out, "error: %v\n", err)
}
}
return nil
}
func (o *CommandLogsOptions) addPrefixIfNeeded(ref corev1.ObjectReference, writer io.Writer) io.Writer {
if !o.Prefix || ref.FieldPath == "" || ref.Name == "" {
return writer
}
// We rely on ref.FieldPath to contain a reference to a container
// including a container name (not an index) so we can get a container name
// without making an extra API request.
var containerName string
containerNameMatches := o.containerNameFromRefSpecRegexp.FindStringSubmatch(ref.FieldPath)
if len(containerNameMatches) == 2 {
containerName = containerNameMatches[1]
}
prefix := fmt.Sprintf("[pod/%s/%s] ", ref.Name, containerName)
return &prefixingWriter{
prefix: []byte(prefix),
writer: writer,
}
}
// getClusterInfo get information of cluster
func (o *CommandLogsOptions) getClusterInfo(karmadaRestConfig *rest.Config, name string) (map[string]*ClusterInfo, error) {
clusterClient := karmadaclientset.NewForConfigOrDie(karmadaRestConfig).ClusterV1alpha1().Clusters()
// check if the cluster exist in karmada control plane
_, err := clusterClient.Get(context.TODO(), o.Cluster, metav1.GetOptions{})
if err != nil {
return nil, err
}
clusterInfos := make(map[string]*ClusterInfo)
clusterInfos[name] = &ClusterInfo{}
clusterInfos[name].APIEndpoint = karmadaRestConfig.Host + fmt.Sprintf(proxyURL, name)
clusterInfos[name].KubeConfig = o.KubeConfig
clusterInfos[name].Context = o.KarmadaContext
if clusterInfos[name].KubeConfig == "" {
env := os.Getenv("KUBECONFIG")
if env != "" {
clusterInfos[name].KubeConfig = env
} else {
clusterInfos[name].KubeConfig = defaultKubeConfig
}
}
return clusterInfos, nil
}
type prefixingWriter struct {
prefix []byte
writer io.Writer
}
func (pw *prefixingWriter) Write(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
// Perform an "atomic" write of a prefix and p to make sure that it doesn't interleave
// sub-line when used concurrently with io.PipeWrite.
n, err := pw.writer.Write(append(pw.prefix, p...))
if n > len(p) {
// To comply with the io.Writer interface requirements we must
// return a number of bytes written from p (0 <= n <= len(p)),
// so we are ignoring the length of the prefix here.
return len(p), err
}
return n, err
}
| [
"\"KUBECONFIG\""
] | [] | [
"KUBECONFIG"
] | [] | ["KUBECONFIG"] | go | 1 | 0 | |
java11/movies/functions/update-movie-rating/src/main/java/de/mbe/tutorials/aws/serverless/movies/functions/updatemovierating/FnUpdateMovieRating.java | package de.mbe.tutorials.aws.serverless.movies.functions.updatemovierating;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
import com.amazonaws.services.dynamodbv2.model.AmazonDynamoDBException;
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.RequestHandler;
import com.amazonaws.services.lambda.runtime.events.APIGatewayV2ProxyRequestEvent;
import com.amazonaws.services.lambda.runtime.events.APIGatewayV2ProxyResponseEvent;
import com.amazonaws.xray.AWSXRay;
import com.amazonaws.xray.handlers.TracingHandler;
import com.fasterxml.jackson.databind.ObjectMapper;
import de.mbe.tutorials.aws.serverless.movies.functions.updatemovierating.repository.MoviesDynamoDbRepository;
import de.mbe.tutorials.aws.serverless.movies.functions.updatemovierating.utils.APIGatewayV2ProxyResponseUtils;
import de.mbe.tutorials.aws.serverless.movies.models.MovieRating;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import static com.amazonaws.util.StringUtils.isNullOrEmpty;
public final class FnUpdateMovieRating implements RequestHandler<APIGatewayV2ProxyRequestEvent, APIGatewayV2ProxyResponseEvent>, APIGatewayV2ProxyResponseUtils {
private static final Logger LOGGER = LogManager.getLogger(FnUpdateMovieRating.class);
private final ObjectMapper MAPPER = new ObjectMapper();
private final MoviesDynamoDbRepository repository;
public FnUpdateMovieRating() {
final var amazonDynamoDB = AmazonDynamoDBClientBuilder
.standard()
.withRequestHandlers(new TracingHandler(AWSXRay.getGlobalRecorder()))
.build();
final var movieRatingsTable = System.getenv("MOVIE_RATINGS_TABLE");
this.repository = new MoviesDynamoDbRepository(amazonDynamoDB, movieRatingsTable);
}
@Override
public APIGatewayV2ProxyResponseEvent handleRequest(APIGatewayV2ProxyRequestEvent request, Context context) {
LOGGER.info("FnAddMovieRating.getRemainingTimeInMillis {} ", context.getRemainingTimeInMillis());
if (!request.getHttpMethod().equalsIgnoreCase("patch")) {
return methodNotAllowed(LOGGER, "Method " + request.getHttpMethod() + " not allowed");
}
if (!request.getPathParameters().containsKey("movieId") || isNullOrEmpty(request.getPathParameters().get("movieId"))) {
return badRequest(LOGGER, "Missing {movieId} path parameter");
}
final var movieId = request.getPathParameters().get("movieId");
LOGGER.info("Patching movie {}", movieId);
try {
final var movieRating = MAPPER.readValue(request.getBody(), MovieRating.class);
this.repository.updateMovieRating(movieRating);
return ok(LOGGER, "SUCCESS");
} catch (AmazonDynamoDBException error) {
return amazonDynamoDBException(LOGGER, error);
} catch (Exception error) {
return internalServerError(LOGGER, error);
}
}
}
| [
"\"MOVIE_RATINGS_TABLE\""
] | [] | [
"MOVIE_RATINGS_TABLE"
] | [] | ["MOVIE_RATINGS_TABLE"] | java | 1 | 0 | |
secrets/awskms/kms_test.go | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package awskms
import (
"context"
"errors"
"fmt"
"os"
"testing"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/kms"
"gocloud.dev/internal/testing/setup"
"gocloud.dev/secrets"
"gocloud.dev/secrets/driver"
"gocloud.dev/secrets/drivertest"
)
const (
keyID1 = "alias/test-secrets"
keyID2 = "alias/test-secrets2"
region = "us-east-2"
)
type harness struct {
client *kms.KMS
close func()
}
func (h *harness) MakeDriver(ctx context.Context) (driver.Keeper, driver.Keeper, error) {
return &keeper{keyID: keyID1, client: h.client}, &keeper{keyID: keyID2, client: h.client}, nil
}
func (h *harness) Close() {
h.close()
}
func newHarness(ctx context.Context, t *testing.T) (drivertest.Harness, error) {
sess, _, done := setup.NewAWSSession(t, region)
return &harness{
client: kms.New(sess),
close: done,
}, nil
}
func TestConformance(t *testing.T) {
drivertest.RunConformanceTests(t, newHarness, []drivertest.AsTest{verifyAs{}})
}
type verifyAs struct{}
func (v verifyAs) Name() string {
return "verify As function"
}
func (v verifyAs) ErrorCheck(k *secrets.Keeper, err error) error {
var e awserr.Error
if !k.ErrorAs(err, &e) {
return errors.New("Keeper.ErrorAs failed")
}
if e.Code() != kms.ErrCodeInvalidCiphertextException {
return fmt.Errorf("got %q, want %q", e.Code(), kms.ErrCodeInvalidCiphertextException)
}
return nil
}
// KMS-specific tests.
func TestNoSessionProvidedError(t *testing.T) {
if _, err := Dial(nil); err == nil {
t.Error("got nil, want no AWS session provided")
}
}
func TestNoConnectionError(t *testing.T) {
prevAccessKey := os.Getenv("AWS_ACCESS_KEY")
prevSecretKey := os.Getenv("AWS_SECRET_KEY")
prevRegion := os.Getenv("AWS_REGION")
os.Setenv("AWS_ACCESS_KEY", "myaccesskey")
os.Setenv("AWS_SECRET_KEY", "mysecretkey")
os.Setenv("AWS_REGION", "us-east-1")
defer func() {
os.Setenv("AWS_ACCESS_KEY", prevAccessKey)
os.Setenv("AWS_SECRET_KEY", prevSecretKey)
os.Setenv("AWS_REGION", prevRegion)
}()
sess, err := session.NewSession()
if err != nil {
t.Fatal(err)
}
client, err := Dial(sess)
if err != nil {
t.Fatal(err)
}
keeper := NewKeeper(client, keyID1, nil)
if _, err := keeper.Encrypt(context.Background(), []byte("test")); err == nil {
t.Error("got nil, want UnrecognizedClientException")
}
}
func TestOpenKeeper(t *testing.T) {
tests := []struct {
URL string
WantErr bool
}{
// OK.
{"awskms://alias/my-key", false},
// OK, overriding region.
{"awskms://alias/my-key?region=us-west1", false},
// Unknown parameter.
{"awskms://alias/my-key?param=value", true},
}
ctx := context.Background()
for _, test := range tests {
_, err := secrets.OpenKeeper(ctx, test.URL)
if (err != nil) != test.WantErr {
t.Errorf("%s: got error %v, want error %v", test.URL, err, test.WantErr)
}
}
}
| [
"\"AWS_ACCESS_KEY\"",
"\"AWS_SECRET_KEY\"",
"\"AWS_REGION\""
] | [] | [
"AWS_SECRET_KEY",
"AWS_ACCESS_KEY",
"AWS_REGION"
] | [] | ["AWS_SECRET_KEY", "AWS_ACCESS_KEY", "AWS_REGION"] | go | 3 | 0 | |
utils/bazel/terminfo.bzl | # This file is licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Repository rules to configure the terminfo used by LLVM.
Most users should pick one of the explicit rules to configure their use of terminfo
with LLVM:
- `llvm_terminfo_system` will detect and link against a terminfo-implementing
system library (non-hermetically).
- 'llvm_terminfo_disable` will disable terminfo completely.
If you would like to make your build configurable, you can use
`llvm_terminfo_from_env`. By default, this will disable terminfo, but will
inspect the environment variable (most easily set with a `--repo_env` flag to
the Bazel invocation) `BAZEL_LLVM_TERMINFO_STRATEGY`. If it is set to
`system` then it will behave the same as `llvm_terminfo_system`. Any other
setting will disable terminfo the same as not setting it at all.
"""
def _llvm_terminfo_disable_impl(repository_ctx):
repository_ctx.template(
"BUILD",
repository_ctx.attr._disable_build_template,
executable = False,
)
_terminfo_disable_attrs = {
"_disable_build_template": attr.label(
default = Label("//deps_impl:terminfo_disable.BUILD"),
allow_single_file = True,
),
}
llvm_terminfo_disable = repository_rule(
implementation = _llvm_terminfo_disable_impl,
attrs = _terminfo_disable_attrs,
)
def _find_c_compiler(repository_ctx):
"""Returns the path to a plausible C compiler.
This routine will only reliably work on roughly POSIX-y systems as it
ultimately falls back on the `cc` binary. Fortunately, the thing we are
trying to use it for (detecting if a trivial source file can compile and
link against a particular library) requires very little.
"""
cc_env = repository_ctx.os.environ.get("CC")
cc = None
if cc_env:
if "/" in cc_env:
return repository_ctx.path(cc_env)
else:
return repository_ctx.which(cc_env)
# Look for Clang, GCC, and the POSIX / UNIX specified C compiler
# binaries.
for compiler in ["clang", "gcc", "c99", "c89", "cc"]:
cc = repository_ctx.which(compiler)
if cc:
return cc
return None
def _try_link(repository_ctx, cc, source, linker_flags):
"""Returns `True` if able to link the source with the linker flag.
Given a source file that contains references to library routines, this
will check that when linked with the provided linker flag, those
references are successfully resolved. This routine assumes a generally
POSIX-y and GCC-ish compiler and environment and shouldn't be expected to
work outside of that.
"""
cmd = [
cc,
# Force discard the linked executable.
"-o",
"/dev/null",
# Leave language detection to the compiler.
source,
]
# The linker flag must be valid for a compiler invocation of the link step,
# so just append them to the command.
cmd += linker_flags
exec_result = repository_ctx.execute(cmd, timeout = 20)
return exec_result.return_code == 0
def _llvm_terminfo_system_impl(repository_ctx):
# LLVM doesn't need terminfo support on Windows, so just disable it.
if repository_ctx.os.name.lower().find("windows") != -1:
_llvm_terminfo_disable_impl(repository_ctx)
return
if len(repository_ctx.attr.system_linkopts) > 0:
linkopts = repository_ctx.attr.system_linkopts
else:
required = repository_ctx.attr.system_required
# Find a C compiler we can use to detect viable linkopts on this system.
cc = _find_c_compiler(repository_ctx)
if not cc:
if required:
fail("Failed to find a C compiler executable")
else:
_llvm_terminfo_disable_impl(repository_ctx)
return
# Get the source file we use to detect successful linking of terminfo.
source = repository_ctx.path(repository_ctx.attr._terminfo_test_source)
# Collect the candidate linkopts and wrap them into a list. Ideally,
# these would be provided as lists, but Bazel doesn't currently
# support that. See: https://github.com/bazelbuild/bazel/issues/12178
linkopts_candidates = [[x] for x in repository_ctx.attr.candidate_system_linkopts]
# For each candidate, try to use it to link our test source file.
for linkopts_candidate in linkopts_candidates:
if _try_link(repository_ctx, cc, source, linkopts_candidate):
linkopts = linkopts_candidate
break
# If we never found a viable linkopts candidate, either error or disable
# terminfo for LLVM.
if not linkopts:
if required:
fail("Failed to detect which linkopt would successfully provide the " +
"necessary terminfo functionality")
else:
_llvm_terminfo_disable_impl(repository_ctx)
return
repository_ctx.template(
"BUILD",
repository_ctx.attr._system_build_template,
substitutions = {
"{TERMINFO_LINKOPTS}": str(linkopts),
},
executable = False,
)
def _merge_attrs(attrs_list):
attrs = {}
for input_attrs in attrs_list:
attrs.update(input_attrs)
return attrs
_terminfo_system_attrs = _merge_attrs([_terminfo_disable_attrs, {
"_system_build_template": attr.label(
default = Label("//deps_impl:terminfo_system.BUILD"),
allow_single_file = True,
),
"_terminfo_test_source": attr.label(
default = Label("//deps_impl:terminfo_test.c"),
allow_single_file = True,
),
"candidate_system_linkopts": attr.string_list(
default = [
"-lterminfo",
"-ltinfo",
"-lcurses",
"-lncurses",
"-lncursesw",
],
doc = "Candidate linkopts to test and see if they can link " +
"successfully.",
),
"system_required": attr.bool(
default = False,
doc = "Require that one of the candidates is detected successfully on POSIX platforms where it is needed.",
),
"system_linkopts": attr.string_list(
default = [],
doc = "If non-empty, a specific array of linkopts to use to " +
"successfully link against the terminfo library. No " +
"detection is performed if this option is provided, it " +
"directly forces the use of these link options. No test is " +
"run to determine if they are valid or work correctly either.",
),
}])
llvm_terminfo_system = repository_rule(
implementation = _llvm_terminfo_system_impl,
configure = True,
local = True,
attrs = _terminfo_system_attrs,
)
def _llvm_terminfo_from_env_impl(repository_ctx):
terminfo_strategy = repository_ctx.os.environ.get("BAZEL_LLVM_TERMINFO_STRATEGY")
if terminfo_strategy == "system":
_llvm_terminfo_system_impl(repository_ctx)
else:
_llvm_terminfo_disable_impl(repository_ctx)
llvm_terminfo_from_env = repository_rule(
implementation = _llvm_terminfo_from_env_impl,
configure = True,
local = True,
attrs = _merge_attrs([_terminfo_disable_attrs, _terminfo_system_attrs]),
environ = ["BAZEL_LLVM_TERMINFO_STRATEGY", "CC"],
)
| [] | [] | [
"CC",
"BAZEL_LLVM_TERMINFO_STRATEGY"
] | [] | ["CC", "BAZEL_LLVM_TERMINFO_STRATEGY"] | python | 2 | 0 | |
venv/lib/python3.6/site-packages/ansible_collections/community/grafana/plugins/lookup/grafana_dashboard.py | # (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: grafana_dashboard
author: Thierry Salle (@seuf)
short_description: list or search grafana dashboards
description:
- This lookup returns a list of grafana dashboards with possibility to filter them by query.
options:
grafana_url:
description: url of grafana.
env:
- name: GRAFANA_URL
default: http://127.0.0.1:3000
grafana_api_key:
description:
- Grafana API key.
- When C(grafana_api_key) is set, the options C(grafana_user), C(grafana_password) and C(grafana_org_id) are ignored.
env:
- name: GRAFANA_API_KEY
grafana_user:
description: grafana authentication user.
env:
- name: GRAFANA_USER
default: admin
grafana_password:
description: grafana authentication password.
env:
- name: GRAFANA_PASSWORD
default: admin
grafana_org_id:
description: grafana organisation id.
env:
- name: GRAFANA_ORG_ID
default: 1
search:
description: optional filter for dashboard search.
env:
- name: GRAFANA_DASHBOARD_SEARCH
'''
EXAMPLES = """
- name: get project foo grafana dashboards
set_fact:
grafana_dashboards: "{{ lookup('grafana_dashboard', 'grafana_url=http://grafana.company.com grafana_user=admin grafana_password=admin search=foo') }}"
- name: get all grafana dashboards
set_fact:
grafana_dashboards: "{{ lookup('grafana_dashboard', 'grafana_url=http://grafana.company.com grafana_api_key=' ~ grafana_api_key) }}"
"""
import json
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import basic_auth_header, open_url
from ansible.module_utils._text import to_native
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.utils.display import Display
display = Display()
ANSIBLE_GRAFANA_URL = 'http://127.0.0.1:3000'
ANSIBLE_GRAFANA_API_KEY = None
ANSIBLE_GRAFANA_USER = 'admin'
ANSIBLE_GRAFANA_PASSWORD = 'admin'
ANSIBLE_GRAFANA_ORG_ID = 1
ANSIBLE_GRAFANA_DASHBOARD_SEARCH = None
if os.getenv('GRAFANA_URL') is not None:
ANSIBLE_GRAFANA_URL = os.environ['GRAFANA_URL']
if os.getenv('GRAFANA_API_KEY') is not None:
ANSIBLE_GRAFANA_API_KEY = os.environ['GRAFANA_API_KEY']
if os.getenv('GRAFANA_USER') is not None:
ANSIBLE_GRAFANA_USER = os.environ['GRAFANA_USER']
if os.getenv('GRAFANA_PASSWORD') is not None:
ANSIBLE_GRAFANA_PASSWORD = os.environ['GRAFANA_PASSWORD']
if os.getenv('GRAFANA_ORG_ID') is not None:
ANSIBLE_GRAFANA_ORG_ID = os.environ['GRAFANA_ORG_ID']
if os.getenv('GRAFANA_DASHBOARD_SEARCH') is not None:
ANSIBLE_GRAFANA_DASHBOARD_SEARCH = os.environ['GRAFANA_DASHBOARD_SEARCH']
class GrafanaAPIException(Exception):
pass
class GrafanaAPI:
def __init__(self, **kwargs):
self.grafana_url = kwargs.get('grafana_url', ANSIBLE_GRAFANA_URL)
self.grafana_api_key = kwargs.get('grafana_api_key', ANSIBLE_GRAFANA_API_KEY)
self.grafana_user = kwargs.get('grafana_user', ANSIBLE_GRAFANA_USER)
self.grafana_password = kwargs.get('grafana_password', ANSIBLE_GRAFANA_PASSWORD)
self.grafana_org_id = kwargs.get('grafana_org_id', ANSIBLE_GRAFANA_ORG_ID)
self.search = kwargs.get('search', ANSIBLE_GRAFANA_DASHBOARD_SEARCH)
def grafana_switch_organisation(self, headers):
try:
r = open_url('%s/api/user/using/%s' % (self.grafana_url, self.grafana_org_id), headers=headers, method='POST')
except HTTPError as e:
raise GrafanaAPIException('Unable to switch to organization %s : %s' % (self.grafana_org_id, to_native(e)))
if r.getcode() != 200:
raise GrafanaAPIException('Unable to switch to organization %s : %s' % (self.grafana_org_id, str(r.getcode())))
def grafana_headers(self):
headers = {'content-type': 'application/json; charset=utf8'}
if self.grafana_api_key:
api_key = self.grafana_api_key
if len(api_key) % 4 == 2:
display.deprecated(
"Passing a mangled version of the API key to the grafana_dashboard lookup is no longer necessary and should not be done.",
"2.0.0",
collection_name='community.grafana',
)
api_key += '=='
headers['Authorization'] = "Bearer %s" % api_key
else:
headers['Authorization'] = basic_auth_header(self.grafana_user, self.grafana_password)
self.grafana_switch_organisation(headers)
return headers
def grafana_list_dashboards(self):
# define http headers
headers = self.grafana_headers()
dashboard_list = []
try:
if self.search:
r = open_url('%s/api/search?query=%s' % (self.grafana_url, self.search), headers=headers, method='GET')
else:
r = open_url('%s/api/search/' % self.grafana_url, headers=headers, method='GET')
except HTTPError as e:
raise GrafanaAPIException('Unable to search dashboards : %s' % to_native(e))
if r.getcode() == 200:
try:
dashboard_list = json.loads(r.read())
except Exception as e:
raise GrafanaAPIException('Unable to parse json list %s' % to_native(e))
else:
raise GrafanaAPIException('Unable to list grafana dashboards : %s' % str(r.getcode()))
return dashboard_list
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
grafana_args = terms[0].split(' ')
grafana_dict = {}
ret = []
for param in grafana_args:
try:
key, value = param.split('=', 1)
except ValueError:
raise AnsibleError("grafana_dashboard lookup plugin needs key=value pairs, but received %s" % terms)
grafana_dict[key] = value
grafana = GrafanaAPI(**grafana_dict)
ret = grafana.grafana_list_dashboards()
return ret
| [] | [] | [
"GRAFANA_USER",
"GRAFANA_URL",
"GRAFANA_PASSWORD",
"GRAFANA_ORG_ID",
"GRAFANA_API_KEY",
"GRAFANA_DASHBOARD_SEARCH"
] | [] | ["GRAFANA_USER", "GRAFANA_URL", "GRAFANA_PASSWORD", "GRAFANA_ORG_ID", "GRAFANA_API_KEY", "GRAFANA_DASHBOARD_SEARCH"] | python | 6 | 0 | |
trainTripleClassification.py | import argparse
import os
from preprocess.TripleClassificationData import TripleClassificationData
from train.TrainTripleClassification import TrainTripleClassifcation
from utils.readmodel import *
os.environ["CUDA_VISIBLE_DEVICES"] = '3'
FLAGS = None
def main(FLAGS):
data = TripleClassificationData(
os.path.join(FLAGS.datapath,FLAGS.dataset),
FLAGS.trainfilename,
FLAGS.validfilename,
FLAGS.testfilename,
FLAGS.withreverse
)
embedding, generator, discriminator = read_gan_model(FLAGS, data.entity_numbers,data.relation_numbers)
if FLAGS.cuda:
embedding.cuda()
generator.cuda()
discriminator.cuda()
trainGan = TrainTripleClassifcation()
trainGan.set_data(data)
trainGan.set_model(embedding,generator,discriminator)
trainGan.train(
FLAGS.usepretrained,
FLAGS.pretrainedpath,
FLAGS.learningrate,
FLAGS.weightdecay,
FLAGS.margin,
FLAGS.epochs,
FLAGS.batchsize,
FLAGS.evaluationtimes,
FLAGS.savetimes,
FLAGS.savepath,
FLAGS.logpath,
FLAGS.dtuneembedding,
FLAGS.gtuneembedding,
FLAGS.dmargintype,
FLAGS.gusenegative,
FLAGS.meanorsum,
print_file = FLAGS.logpath+'.txt'
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=True, type=bool)
# parameters for model name
parser.add_argument("--embeddingname", default='TransE',type=str)
parser.add_argument("--gname", default='ConvTransE', type=str)
parser.add_argument("--dname", default='Translation', type=str)
# parameters for dataset
parser.add_argument("--datapath",default='data',type=str)
parser.add_argument("--dataset",default="Wordnet11",type=str)
parser.add_argument("--trainfilename", default="train.txt", type=str)
parser.add_argument("--validfilename", default="dev.txt", type=str)
parser.add_argument("--testfilename", default="test.txt", type=str)
parser.add_argument("--withreverse", default=False, type=bool)
# parameters for super parameters
parser.add_argument("--embeddingdim", default=100, type=int)
parser.add_argument("--usepretrained", default=False, type=bool)
parser.add_argument("--pretrainedpath", default='saved_model/TransE/baseline/WN18RR/embedding-model-2000.pkl', type=str)
parser.add_argument("--learningrate", default=0.001, type=float)
parser.add_argument("--epochs", default=1000, type=int)
parser.add_argument("--batchsize", default=1000, type=int)
parser.add_argument("--margin", default=2.0, type=float)
parser.add_argument("--weightdecay", default=1e-6, type=float)
# parameters for save and log times and path
parser.add_argument("--evaluationtimes", default=100, type=int)
parser.add_argument("--savetimes", default=500, type=int)
parser.add_argument("--logtimes", default=1, type=int)
parser.add_argument("--savepath", default='saved_model/FC_TransE/WN11', type=str)
parser.add_argument("--logpath", default='log/FC_TransE/WN11', type=str)
# parameters for fully connected layer
parser.add_argument("--hiddenlayers",default=[200,100],type=list)
# parameters for convolutional layer
parser.add_argument("--numfilter", default=32, type=int)
parser.add_argument("--inputdropout", default=0.2, type=float)
parser.add_argument("--featuredropout", default=0.3, type=float)
parser.add_argument("--kernelsize", default=3, type=int)
# parameters for different selection strategies for GN and DN
parser.add_argument("--dtuneembedding", default=True, type=bool)
parser.add_argument("--gtuneembedding", default=False, type=bool)
parser.add_argument("--dmargintype", default=True, type=bool)
parser.add_argument("--gusenegative", default=False, type=bool)
parser.add_argument("--meanorsum", default='mean', type=str)
FLAGS, unparsed = parser.parse_known_args()
main(FLAGS)
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
internal/distro/distro.go | // Copyright 2017 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package distro
import (
"fmt"
"os"
)
// Distro-specific settings that can be overridden at link time with e.g.
// -X github.com/coreos/ignition/v2/internal/distro.mdadmCmd=/opt/bin/mdadm
var (
// Device node directories and paths
diskByIDDir = "/dev/disk/by-id"
diskByLabelDir = "/dev/disk/by-label"
diskByPartUUIDDir = "/dev/disk/by-partuuid"
// File paths
kernelCmdlinePath = "/proc/cmdline"
// initramfs directory containing distro-provided base config
systemConfigDir = "/usr/lib/ignition"
// Helper programs
groupaddCmd = "groupadd"
mdadmCmd = "mdadm"
mountCmd = "mount"
sgdiskCmd = "sgdisk"
modprobeCmd = "modprobe"
udevadmCmd = "udevadm"
usermodCmd = "usermod"
useraddCmd = "useradd"
setfilesCmd = "setfiles"
wipefsCmd = "wipefs"
// Filesystem tools
btrfsMkfsCmd = "mkfs.btrfs"
ext4MkfsCmd = "mkfs.ext4"
swapMkfsCmd = "mkswap"
vfatMkfsCmd = "mkfs.vfat"
xfsMkfsCmd = "mkfs.xfs"
//zVM programs
vmurCmd = "vmur"
chccwdevCmd = "chccwdev"
cioIgnoreCmd = "cio_ignore"
// LUKS programs
clevisCmd = "clevis"
cryptsetupCmd = "cryptsetup"
// Flags
selinuxRelabel = "true"
blackboxTesting = "false"
// writeAuthorizedKeysFragment indicates whether to write SSH keys
// specified in the Ignition config as a fragment to
// ".ssh/authorized_keys.d/ignition" ("true"), or to
// ".ssh/authorized_keys" ("false").
writeAuthorizedKeysFragment = "true"
luksInitramfsKeyFilePath = "/run/ignition/luks-keyfiles/"
luksRealRootKeyFilePath = "/etc/luks/"
)
func DiskByIDDir() string { return diskByIDDir }
func DiskByLabelDir() string { return diskByLabelDir }
func DiskByPartUUIDDir() string { return diskByPartUUIDDir }
func KernelCmdlinePath() string { return kernelCmdlinePath }
func SystemConfigDir() string { return fromEnv("SYSTEM_CONFIG_DIR", systemConfigDir) }
func GroupaddCmd() string { return groupaddCmd }
func MdadmCmd() string { return mdadmCmd }
func MountCmd() string { return mountCmd }
func SgdiskCmd() string { return sgdiskCmd }
func ModprobeCmd() string { return modprobeCmd }
func UdevadmCmd() string { return udevadmCmd }
func UsermodCmd() string { return usermodCmd }
func UseraddCmd() string { return useraddCmd }
func SetfilesCmd() string { return setfilesCmd }
func WipefsCmd() string { return wipefsCmd }
func BtrfsMkfsCmd() string { return btrfsMkfsCmd }
func Ext4MkfsCmd() string { return ext4MkfsCmd }
func SwapMkfsCmd() string { return swapMkfsCmd }
func VfatMkfsCmd() string { return vfatMkfsCmd }
func XfsMkfsCmd() string { return xfsMkfsCmd }
func VmurCmd() string { return vmurCmd }
func ChccwdevCmd() string { return chccwdevCmd }
func CioIgnoreCmd() string { return cioIgnoreCmd }
func ClevisCmd() string { return clevisCmd }
func CryptsetupCmd() string { return cryptsetupCmd }
func LuksInitramfsKeyFilePath() string { return luksInitramfsKeyFilePath }
func LuksRealRootKeyFilePath() string { return luksRealRootKeyFilePath }
func SelinuxRelabel() bool { return bakedStringToBool(selinuxRelabel) && !BlackboxTesting() }
func BlackboxTesting() bool { return bakedStringToBool(blackboxTesting) }
func WriteAuthorizedKeysFragment() bool {
return bakedStringToBool(fromEnv("WRITE_AUTHORIZED_KEYS_FRAGMENT", writeAuthorizedKeysFragment))
}
func fromEnv(nameSuffix, defaultValue string) string {
value := os.Getenv("IGNITION_" + nameSuffix)
if value != "" {
return value
}
return defaultValue
}
func bakedStringToBool(s string) bool {
// the linker only supports string args, so do some basic bool sensing
if s == "true" || s == "1" {
return true
} else if s == "false" || s == "0" {
return false
} else {
// if we got a bad compile flag, just crash and burn rather than assume
panic(fmt.Sprintf("value '%s' cannot be interpreted as a boolean", s))
}
}
| [
"\"IGNITION_\" + nameSuffix"
] | [] | [
"IGNITION_\" + nameSuffi"
] | [] | ["IGNITION_\" + nameSuffi"] | go | 1 | 0 | |
msc_thesis/stages/etc/_4_probing_models_and_filters.py | from msc_thesis.util import call, config_update, context, config_get
from pcigale.data import Database
import os
import multiprocessing as mp
import click
@click.command()
@click.option('--target', default=None)
def cli(target):
if not target:
target = os.environ['THESIS_TARGET']
main(target)
def main(target):
with context(f'./data/{target}/fitting', target) as (env, _):
vars = env.determining_filters
# recover the filter families created for this thesis. The hack here is that the description
# is the name of the family, where the name is the name of the filter itself
with Database() as db:
filter_families = [row[0] for row in db.session.execute(
"select distinct description from filters where name like 'thesis-filter%'"
)]
for filter_family in filter_families:
with context(f'./{filter_family}', target):
call('pcigale init')
config_update(vars['pcigale_init'], {
'analysis_method': 'pdf_analysis',
'cores': mp.cpu_count() - 1
})
call('pcigale genconf')
#config_update(vars['pcigale_genconf'], {
#
# })
# call('pcigale run')
| [] | [] | [
"THESIS_TARGET"
] | [] | ["THESIS_TARGET"] | python | 1 | 0 | |
app.py | import flask
from PIL.Image import core as _imaging
from flask import Flask, request, jsonify
from PIL import Image
import numpy as np
import base64
import io
import os
from backend.tf_inference import load_model, inference
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
sess, detection_graph = load_model()
app = Flask(__name__)
@app.route('/api/', methods=["POST"])
def main_interface():
response = request.get_json()
data_str = response['image']
point = data_str.find(',')
base64_str = data_str[point:] # remove unused part like this: "data:image/jpeg;base64,"
image = base64.b64decode(base64_str)
img = Image.open(io.BytesIO(image))
if(img.mode!='RGB'):
img = img.convert("RGB")
# convert to numpy array.
img_arr = np.array(img)
# do object detection in inference function.
results = inference(sess, detection_graph, img_arr, conf_thresh=0.5)
print(results)
return jsonify(results)
@app.after_request
def add_headers(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
return response
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
goqless.go | // reference: https://github.com/seomoz/qless-py
package goqless
import (
"bytes"
"crypto/md5"
"crypto/rand"
"encoding/json"
"fmt"
"github.com/garyburd/redigo/redis"
mrand "math/rand"
"os"
"bitbucket.org/kardianos/osext"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
)
// type Opts map[string]interface{}
// func (o Opts) Get(name string, dfault interface{}) interface{} {
// if v, ok := o[name]; ok {
// return v
// }
// return dfault
// }
// represents a string slice with special json unmarshalling
type StringSlice []string
var workerNameStr string
func init() {
hn, err := os.Hostname()
if err != nil {
hn = os.Getenv("HOSTNAME")
}
if hn == "" {
hn = "localhost"
}
workerNameStr = fmt.Sprintf("%s-%d", hn, os.Getpid())
}
func GetCurrentDir() (string, error) {
dir, err := osext.Executable()
if err != nil {
return "", err
}
dir = string(dir[:len(dir)-1])
pos := strings.LastIndex(dir, "/")
dir = string(dir[:pos])
return dir, nil
}
func (s *StringSlice) UnmarshalJSON(data []byte) error {
// because tables and arrays are equal in LUA,
// an empty array would be presented as "{}".
if bytes.Equal(data, []byte("{}")) {
*s = []string{}
return nil
}
var str []string
err := json.Unmarshal(data, &str)
if err != nil {
return err
}
*s = str
return nil
}
// Generates a jid
func generateJID() string {
hasher := md5.New()
uuid := make([]byte, 16)
n, err := rand.Read(uuid)
if err != nil || n != len(uuid) {
src := mrand.NewSource(time.Now().UnixNano())
r := mrand.New(src)
for n, _ := range uuid {
uuid[n] = byte(r.Int())
}
}
hasher.Write([]byte(workerNameStr))
hasher.Write(uuid)
hasher.Write([]byte(time.Now().String()))
return fmt.Sprintf("%x", hasher.Sum(nil))
}
// returns a timestamp used in LUA calls
func timestamp() int64 {
return time.Now().UTC().Unix()
}
// returns a worker name for this machine/process
func workerName() string {
return workerNameStr
}
// makes the first character of a string upper case
func ucfirst(s string) string {
if s == "" {
return ""
}
r, n := utf8.DecodeRuneInString(s)
return string(unicode.ToUpper(r)) + s[n:]
}
// marshals a value. if the value happens to be
// a string or []byte, just return it.
func marshal(i interface{}) []byte {
//switch v := i.(type) {
//case []byte:
// return v
//case string:
// return []byte(v)
//}
byts, err := json.Marshal(i)
if err != nil {
return nil
}
return byts
}
// Bool is a helper that converts a command reply to a boolean. If err is not
// equal to nil, then Bool returns false, err. Otherwise Bool converts the
// reply to boolean as follows:
//
// Reply type Result
// integer value != 0, nil
// bulk strconv.ParseBool(reply) or r != "False", nil
// nil false, ErrNil
// other false, error
func Bool(reply interface{}, err error) (bool, error) {
if err != nil {
return false, err
}
switch reply := reply.(type) {
case int64:
return reply != 0, nil
case []byte:
r := string(reply)
b, err := strconv.ParseBool(r)
if err != nil {
return r != "False", nil
}
return b, err
case nil:
return false, redis.ErrNil
case redis.Error:
return false, reply
}
return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply)
}
| [
"\"HOSTNAME\""
] | [] | [
"HOSTNAME"
] | [] | ["HOSTNAME"] | go | 1 | 0 | |
pkg/client/client.go | package client
import (
"os"
"path/filepath"
"github.com/docker/docker/client"
)
const (
DefaultAPIVersion = "1.39"
)
func GetDockerClient() (*client.Client, error) {
host := os.Getenv("DOCKER_HOST")
version := os.Getenv("DOCKER_API_VERSION")
certPath := os.Getenv("DOCKER_CERT_PATH")
mutators := []client.Opt{}
if len(version) > 0 {
mutators = append(mutators, client.WithVersion(version))
} else {
mutators = append(mutators, client.WithVersion(DefaultAPIVersion))
}
if len(host) > 0 {
mutators = append(mutators, client.WithHost(host))
}
if len(certPath) > 0 {
mutators = append(mutators, client.WithTLSClientConfig(
filepath.Join(certPath, "ca.pem"),
filepath.Join(certPath, "cert.pem"),
filepath.Join(certPath, "key.pem"),
))
}
return client.NewClientWithOpts(mutators...)
}
| [
"\"DOCKER_HOST\"",
"\"DOCKER_API_VERSION\"",
"\"DOCKER_CERT_PATH\""
] | [] | [
"DOCKER_HOST",
"DOCKER_API_VERSION",
"DOCKER_CERT_PATH"
] | [] | ["DOCKER_HOST", "DOCKER_API_VERSION", "DOCKER_CERT_PATH"] | go | 3 | 0 | |
store.go | // Copyright 2017 Matt Ho
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package dynastore
import (
"context"
"encoding/base32"
"errors"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/gorilla/securecookie"
"github.com/gorilla/sessions"
)
const (
// DefaultTableName is the default table name used by the dynamodb store
DefaultTableName = "dynastore"
// DefaultTTLField contains the default name of the ttl field
DefaultTTLField = "ttl"
)
const (
idField = "id"
valuesField = "values"
optionsField = "options"
)
var (
errNotFound = errors.New("session not found")
errMalformedSession = errors.New("malformed session data")
errEncodeFailed = errors.New("failed to encode data")
errDecodeFailed = errors.New("failed to decode data")
)
// Store provides an implementation of the gorilla sessions.Store interface backed by DynamoDB
type Store struct {
tableName string
ttlField string
codecs []securecookie.Codec
config *aws.Config
ddb *dynamodb.DynamoDB
serializer serializer
options sessions.Options
printf func(format string, args ...interface{})
}
// Get should return a cached session.
func (store *Store) Get(req *http.Request, name string) (*sessions.Session, error) {
return sessions.GetRegistry(req).Get(store, name)
}
// New should create and return a new session.
//
// Note that New should never return a nil session, even in the case of
// an error if using the Registry infrastructure to cache the session.
func (store *Store) New(req *http.Request, name string) (*sessions.Session, error) {
if cookie, errCookie := req.Cookie(name); errCookie == nil {
s := sessions.NewSession(store, name)
err := store.load(req.Context(), name, cookie.Value, s)
if err == nil {
return s, nil
}
}
s := sessions.NewSession(store, name)
s.ID = strings.TrimRight(base32.StdEncoding.EncodeToString(securecookie.GenerateRandomKey(32)), "=")
s.IsNew = true
s.Options = &sessions.Options{
Path: store.options.Path,
Domain: store.options.Domain,
MaxAge: store.options.MaxAge,
Secure: store.options.Secure,
HttpOnly: store.options.HttpOnly,
}
return s, nil
}
// Save should persist session to the underlying store implementation.
func (store *Store) Save(req *http.Request, w http.ResponseWriter, session *sessions.Session) error {
err := store.save(req.Context(), session.Name(), session)
if err != nil {
return err
}
if session.Options != nil && session.Options.MaxAge < 0 {
cookie := newCookie(session, session.Name(), "")
http.SetCookie(w, cookie)
return store.delete(req.Context(), session.ID)
}
if !session.IsNew {
// no need to set cookies if they already exist
return nil
}
cookie := newCookie(session, session.Name(), session.ID)
http.SetCookie(w, cookie)
return nil
}
func newCookie(session *sessions.Session, name, value string) *http.Cookie {
cookie := &http.Cookie{
Name: name,
Value: value,
}
if opts := session.Options; opts != nil {
cookie.Path = opts.Path
cookie.Domain = opts.Domain
cookie.MaxAge = opts.MaxAge
cookie.HttpOnly = opts.HttpOnly
cookie.Secure = opts.Secure
}
return cookie
}
// New instantiates a new Store that implements gorilla's sessions.Store interface
func New(opts ...Option) (*Store, error) {
store := &Store{
tableName: DefaultTableName,
ttlField: DefaultTTLField,
printf: func(format string, args ...interface{}) {},
}
for _, opt := range opts {
opt(store)
}
if store.ddb == nil {
if store.config == nil {
region := os.Getenv("AWS_DEFAULT_REGION")
if region == "" {
region = os.Getenv("AWS_REGION")
}
store.config = &aws.Config{Region: aws.String(region)}
}
s, err := session.NewSession(store.config)
if err != nil {
return nil, err
}
store.ddb = dynamodb.New(s)
}
if len(store.codecs) > 0 {
store.serializer = &codecSerializer{codecs: store.codecs}
} else {
store.serializer = &gobSerializer{}
}
return store, nil
}
func (store *Store) save(ctx context.Context, name string, session *sessions.Session) error {
av, err := store.serializer.marshal(name, session)
if err != nil {
store.printf("dynastore: failed to marshal session - %v\n", err)
return err
}
if store.ttlField != "" && session.Options != nil && session.Options.MaxAge > 0 {
expiresAt := time.Now().Add(time.Duration(session.Options.MaxAge) * time.Second)
ttl := strconv.FormatInt(expiresAt.Unix(), 10)
av[store.ttlField] = &dynamodb.AttributeValue{N: aws.String(ttl)}
}
_, err = store.ddb.PutItemWithContext(ctx, &dynamodb.PutItemInput{
TableName: aws.String(store.tableName),
Item: av,
})
if err != nil {
store.printf("dynastore: PutItem failed - %v\n", err)
return err
}
return nil
}
func (store *Store) delete(ctx context.Context, id string) error {
_, err := store.ddb.DeleteItemWithContext(ctx, &dynamodb.DeleteItemInput{
TableName: aws.String(store.tableName),
Key: map[string]*dynamodb.AttributeValue{
"id": {S: aws.String(id)},
},
})
if err != nil {
store.printf("dynastore: delete failed - %v\n", err)
return err
}
return nil
}
// load loads a session data from the database.
// True is returned if there is a session data in the database.
func (store *Store) load(ctx context.Context, name, value string, session *sessions.Session) error {
out, err := store.ddb.GetItemWithContext(ctx, &dynamodb.GetItemInput{
TableName: aws.String(store.tableName),
ConsistentRead: aws.Bool(true),
Key: map[string]*dynamodb.AttributeValue{
"id": {S: aws.String(value)},
},
})
if err != nil {
store.printf("dynastore: GetItem failed\n")
return err
}
if len(out.Item) == 0 {
store.printf("dynastore: session not found\n")
return errNotFound
}
ttl := int64(0)
if av, ok := out.Item[store.ttlField]; ok {
if av.N == nil {
store.printf("dynastore: no ttl associated with session\n")
return errMalformedSession
}
v, err := strconv.ParseInt(*av.N, 10, 64)
if err != nil {
store.printf("dynastore: malformed session - %v\n", err)
return errMalformedSession
}
ttl = v
}
if ttl > 0 && ttl < time.Now().Unix() {
store.printf("dynastore: session expired\n")
return errNotFound
}
err = store.serializer.unmarshal(name, out.Item, session)
if err != nil {
store.printf("dynastore: unable to unmarshal session - %v\n", err)
return err
}
return nil
}
type serializer interface {
marshal(name string, session *sessions.Session) (map[string]*dynamodb.AttributeValue, error)
unmarshal(name string, in map[string]*dynamodb.AttributeValue, session *sessions.Session) error
}
| [
"\"AWS_DEFAULT_REGION\"",
"\"AWS_REGION\""
] | [] | [
"AWS_DEFAULT_REGION",
"AWS_REGION"
] | [] | ["AWS_DEFAULT_REGION", "AWS_REGION"] | go | 2 | 0 | |
example/codegen/main.go | package main
import (
"context"
"log"
"net/http"
"os"
"github.com/99designs/gqlgen/graphql/handler"
"github.com/99designs/gqlgen/graphql/playground"
"github.com/danielvladco/go-proto-gql/example/codegen/gql/constructs"
"github.com/danielvladco/go-proto-gql/example/codegen/gql/options"
"github.com/danielvladco/go-proto-gql/example/codegen/pb"
)
const defaultPort = "8088"
func main() {
port := os.Getenv("PORT")
if port == "" {
port = defaultPort
}
constructsHandler := handler.NewDefaultServer(constructs.NewExecutableSchema(constructs.Config{
Resolvers: constructsRoot{},
}))
optionsHandler := handler.NewDefaultServer(options.NewExecutableSchema(options.Config{
Resolvers: optionsRoot{},
}))
http.Handle("/", playground.Handler("GraphQL playground", "/constructs-query"))
http.Handle("/constructs-query", constructsHandler)
http.Handle("/options-query", optionsHandler)
log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
// constructsRoot implements constructs.ResolverRoot showcasing that all resolvers were generated successfully
// we need a bit of binding like in example bellow and we are ready to go
type constructsRoot struct{}
func (r constructsRoot) Maps() constructs.MapsResolver { return pb.MapsResolvers{} }
func (r constructsRoot) MapsInput() constructs.MapsInputResolver { return pb.MapsInputResolvers{} }
func (r constructsRoot) OneofInput() constructs.OneofInputResolver { return pb.OneofInputResolvers{} }
func (r constructsRoot) Mutation() constructs.MutationResolver {
return &pb.ConstructsResolvers{Service: pb.ConstructsServer(nil)}
}
func (r constructsRoot) Oneof() constructs.OneofResolver {
return pb.OneofResolvers{}
}
func (r constructsRoot) Query() constructs.QueryResolver {
return dummy{}
}
// dummy is generated when graphql schema doesn't have any query resolvers.
// In this case the dummy resolver will not be generated by the library and
// you should do it yourself like in example bellow
type dummy struct{}
func (d dummy) Dummy(ctx context.Context) (*bool, error) { panic("implement me") }
// optionsRoot implements options.ResolverRoot to showcase the generated resolvers
// as well as the missing ones. Some resolvers are missing because they use grpc streams
// which is too complex for graphql to deal with it by default.
//
// I might consider implementing it on the future. If you need this feature let me know by
// submitting an issue or if the issue already exists, show activity on it so I know there is real interest.
type optionsRoot struct{}
func (r optionsRoot) Data() options.DataResolver { return pb.DataResolvers{} }
func (r optionsRoot) DataInput() options.DataInputResolver { return pb.DataInputResolvers{} }
func (r optionsRoot) Mutation() options.MutationResolver {
return &optionsMutationQueryResolver{
ServiceResolvers: &pb.ServiceResolvers{Service: pb.ServiceServer(nil)},
QueryResolvers: &pb.QueryResolvers{Service: pb.QueryServer(nil)},
}
}
func (r optionsRoot) Query() options.QueryResolver {
return &optionsMutationQueryResolver{
ServiceResolvers: &pb.ServiceResolvers{Service: pb.ServiceServer(nil)},
QueryResolvers: &pb.QueryResolvers{Service: pb.QueryServer(nil)},
}
}
func (r optionsRoot) Subscription() options.SubscriptionResolver {
return &optionsSubscriptionResolver{}
}
type optionsMutationQueryResolver struct {
*pb.ServiceResolvers
*pb.TestResolvers
*pb.QueryResolvers
}
func (o optionsMutationQueryResolver) ServicePublish(ctx context.Context, in *pb.Data) (*pb.Data, error) {
panic("implement me")
}
func (o optionsMutationQueryResolver) ServicePubSub1(ctx context.Context, in *pb.Data) (*pb.Data, error) {
panic("implement me")
}
func (o optionsMutationQueryResolver) ServiceInvalidSubscribe3(ctx context.Context, in *pb.Data) (*pb.Data, error) {
panic("implement me")
}
func (o optionsMutationQueryResolver) ServicePubSub2(ctx context.Context, in *pb.Data) (*pb.Data, error) {
panic("implement me")
}
func (o optionsMutationQueryResolver) ServiceInvalidSubscribe1(ctx context.Context, in *pb.Data) (*pb.Data, error) {
panic("implement me")
}
type optionsSubscriptionResolver struct{}
func (o optionsSubscriptionResolver) ServiceSubscribe(ctx context.Context, in *pb.Data) (<-chan *pb.Data, error) {
panic("implement me")
}
func (o optionsSubscriptionResolver) ServicePubSub1(ctx context.Context, in *pb.Data) (<-chan *pb.Data, error) {
panic("implement me")
}
func (o optionsSubscriptionResolver) ServiceInvalidSubscribe2(ctx context.Context, in *pb.Data) (<-chan *pb.Data, error) {
panic("implement me")
}
func (o optionsSubscriptionResolver) ServiceInvalidSubscribe3(ctx context.Context, in *pb.Data) (<-chan *pb.Data, error) {
panic("implement me")
}
func (o optionsSubscriptionResolver) ServicePubSub2(ctx context.Context, in *pb.Data) (<-chan *pb.Data, error) {
panic("implement me")
}
func (o optionsSubscriptionResolver) QuerySubscribe(ctx context.Context, in *pb.Data) (<-chan *pb.Data, error) {
panic("implement me")
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
tool/test.py | import os
import time
import logging
import argparse
import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
from util import dataset, transform, config
from util.util import AverageMeter, intersectionAndUnion, check_makedirs, colorize
cv2.ocl.setUseOpenCL(False)
from collections import namedtuple
Label = namedtuple( 'Label' , [
'name' , # The identifier of this label, e.g. 'car', 'person', ... .
# We use them to uniquely name a class
'id' , # An integer ID that is associated with this label.
# The IDs are used to represent the label in ground truth images
# An ID of -1 means that this label does not have an ID and thus
# is ignored when creating ground truth images (e.g. license plate).
# Do not modify these IDs, since exactly these IDs are expected by the
# evaluation server.
'trainId' , # Feel free to modify these IDs as suitable for your method. Then create
# ground truth images with train IDs, using the tools provided in the
# 'preparation' folder. However, make sure to validate or submit results
# to our evaluation server using the regular IDs above!
# For trainIds, multiple labels might have the same ID. Then, these labels
# are mapped to the same class in the ground truth images. For the inverse
# mapping, we use the label that is defined first in the list below.
# For example, mapping all void-type classes to the same ID in training,
# might make sense for some approaches.
# Max value is 255!
'category' , # The name of the category that this label belongs to
'categoryId' , # The ID of this category. Used to create ground truth images
# on category level.
'hasInstances', # Whether this label distinguishes between single instances or not
'ignoreInEval', # Whether pixels having this class as ground truth label are ignored
# during evaluations or not
'color' , # The color of this label
] )
labels = [
# name id trainId category catId hasInstances ignoreInEval color
Label( 'unlabeled' , 0 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'ego vehicle' , 1 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'rectification border' , 2 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'out of roi' , 3 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'static' , 4 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'dynamic' , 5 , 255 , 'void' , 0 , False , True , (111, 74, 0) ),
Label( 'ground' , 6 , 255 , 'void' , 0 , False , True , ( 81, 0, 81) ),
Label( 'road' , 7 , 0 , 'flat' , 1 , False , False , (128, 64,128) ),
Label( 'sidewalk' , 8 , 1 , 'flat' , 1 , False , False , (244, 35,232) ),
Label( 'parking' , 9 , 255 , 'flat' , 1 , False , True , (250,170,160) ),
Label( 'rail track' , 10 , 255 , 'flat' , 1 , False , True , (230,150,140) ),
Label( 'building' , 11 , 2 , 'construction' , 2 , False , False , ( 70, 70, 70) ),
Label( 'wall' , 12 , 3 , 'construction' , 2 , False , False , (102,102,156) ),
Label( 'fence' , 13 , 4 , 'construction' , 2 , False , False , (190,153,153) ),
Label( 'guard rail' , 14 , 255 , 'construction' , 2 , False , True , (180,165,180) ),
Label( 'bridge' , 15 , 255 , 'construction' , 2 , False , True , (150,100,100) ),
Label( 'tunnel' , 16 , 255 , 'construction' , 2 , False , True , (150,120, 90) ),
Label( 'pole' , 17 , 5 , 'object' , 3 , False , False , (153,153,153) ),
Label( 'polegroup' , 18 , 255 , 'object' , 3 , False , True , (153,153,153) ),
Label( 'traffic light' , 19 , 6 , 'object' , 3 , False , False , (250,170, 30) ),
Label( 'traffic sign' , 20 , 7 , 'object' , 3 , False , False , (220,220, 0) ),
Label( 'vegetation' , 21 , 8 , 'nature' , 4 , False , False , (107,142, 35) ),
Label( 'terrain' , 22 , 9 , 'nature' , 4 , False , False , (152,251,152) ),
Label( 'sky' , 23 , 10 , 'sky' , 5 , False , False , ( 70,130,180) ),
Label( 'person' , 24 , 11 , 'human' , 6 , True , False , (220, 20, 60) ),
Label( 'rider' , 25 , 12 , 'human' , 6 , True , False , (255, 0, 0) ),
Label( 'car' , 26 , 13 , 'vehicle' , 7 , True , False , ( 0, 0,142) ),
Label( 'truck' , 27 , 14 , 'vehicle' , 7 , True , False , ( 0, 0, 70) ),
Label( 'bus' , 28 , 15 , 'vehicle' , 7 , True , False , ( 0, 60,100) ),
Label( 'caravan' , 29 , 255 , 'vehicle' , 7 , True , True , ( 0, 0, 90) ),
Label( 'trailer' , 30 , 255 , 'vehicle' , 7 , True , True , ( 0, 0,110) ),
Label( 'train' , 31 , 16 , 'vehicle' , 7 , True , False , ( 0, 80,100) ),
Label( 'motorcycle' , 32 , 17 , 'vehicle' , 7 , True , False , ( 0, 0,230) ),
Label( 'bicycle' , 33 , 18 , 'vehicle' , 7 , True , False , (119, 11, 32) ),
Label( 'license plate' , -1 , -1 , 'vehicle' , 7 , False , True , ( 0, 0,142) ),
]
trainid2id = {label.trainId: label.id for label in labels}
def get_parser():
parser = argparse.ArgumentParser(description='PyTorch Semantic Segmentation')
parser.add_argument('--config', type=str, default='config/ade20k/ade20k_pspnet50.yaml', help='config file')
parser.add_argument('opts', help='see config/ade20k/ade20k_pspnet50.yaml for all options', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
assert args.config is not None
cfg = config.load_cfg_from_cfg_file(args.config)
if args.opts is not None:
cfg = config.merge_cfg_from_list(cfg, args.opts)
return cfg
def get_logger():
logger_name = "main-logger"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
return logger
def check(args):
assert args.classes > 1
assert args.zoom_factor in [1, 2, 4, 8]
assert args.split in ['train', 'val', 'test']
if args.arch == 'psp':
assert (args.train_h - 1) % 8 == 0 and (args.train_w - 1) % 8 == 0
elif args.arch == 'psa':
if args.compact:
args.mask_h = (args.train_h - 1) // (8 * args.shrink_factor) + 1
args.mask_w = (args.train_w - 1) // (8 * args.shrink_factor) + 1
else:
assert (args.mask_h is None and args.mask_w is None) or (args.mask_h is not None and args.mask_w is not None)
if args.mask_h is None and args.mask_w is None:
args.mask_h = 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1
args.mask_w = 2 * ((args.train_w - 1) // (8 * args.shrink_factor) + 1) - 1
else:
assert (args.mask_h % 2 == 1) and (args.mask_h >= 3) and (
args.mask_h <= 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1)
assert (args.mask_w % 2 == 1) and (args.mask_w >= 3) and (
args.mask_w <= 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1)
elif args.arch == 'deeplabv2':
pass
else:
raise Exception('architecture not supported yet'.format(args.arch))
import subprocess
# operations for polyaxon
def polyaxon_data_prepare():
from polyaxon_client.tracking import get_data_paths, get_outputs_refs_paths, get_outputs_path
# fetch data to job pods
sync_dest_dir = fetch_data_from_ssd()
#print("sync_dest_dir")
#print(sync_dest_dir)
# os.system('ls ' + sync_dest_dir + '/cityscapes')
args.data_root = os.path.join(sync_dest_dir, args.data_root)
# args.train_list = os.path.join(sync_dest_dir, args.train_list)
# args.train_labeled_list = os.path.join(sync_dest_dir, args.train_labeled_list)
# args.train_unlabeled_list = os.path.join(sync_dest_dir, args.train_unlabeled_list)
# args.unlabeled_list = os.path.join(sync_dest_dir, args.unlabeled_list)
args.val_list = os.path.join(sync_dest_dir, args.val_list)
args.test_list = os.path.join(sync_dest_dir, args.test_list)
outputpath = get_outputs_path()
#os.system('ls ' + outputpath)
#print(outputpath)
import sys
# sys.exit("debug output path")
# set output result path
# args.save_path = os.path.join(get_outputs_path(), args.save_path.replace('..', 'output'))
args.save_path = os.path.join(get_outputs_path(), args.save_path)
args.save_folder = os.path.join(get_outputs_path(), args.save_folder)
args.model_path = os.path.join(get_outputs_path(), args.model_path)
# args.result_path = os.path.join(get_outputs_path(), args.result_path.replace('..', 'output'))
# args.tensorboard_path = os.path.join(get_outputs_path(), args.tensorboard_path.replace('..', 'output'))
cmd_line = "mkdir -p {0}".format(args.save_path)
subprocess.call(cmd_line.split())
#
cmd_line = "mkdir -p {0}".format(args.save_folder)
subprocess.call(cmd_line.split())
# # copy file to save as backup
subprocess.call(
"cp tool/my_train2.sh tool/my_train2.py tool/test.py config/cityscapes/config.yaml {0}".format(args.save_path).split())
# subprocess.call("cp -r utils models {0}".format(args.result_path + '/../').split())
# cmd_line = "mkdir -p {0}".format(args.tensorboard_path)
# subprocess.call(cmd_line.split())
def fetch_data_from_ssd():
from polyaxon_client.tracking import get_data_paths, get_outputs_refs_paths, get_outputs_path
source_data = 'wubowen/' + args.data_root
sync_source_dir = os.path.join(get_data_paths()['ssd20'], source_data)
sync_dest_dir = os.path.join(get_data_paths()['host-path'], os.path.dirname(source_data))
# if not os.path.exists(sync_dest_dir):
cmd_line = "mkdir -p {0}".format(sync_dest_dir)
subprocess.call(cmd_line.split())
# data_dir = os.path.join(get_data_paths()['host-path'], source_data)
# if not os.path.exists(data_dir):
cmd_line = "rsync -r {0} {1}".format(sync_source_dir, sync_dest_dir)
subprocess.call(cmd_line.split())
return sync_dest_dir
def local_data_prepare():
args.data_root = os.path.join(args.local_prefix, args.data_root)
args.train_labeled_list = os.path.join(args.local_prefix, args.train_labeled_list)
args.train_unlabeled_list = os.path.join(args.local_prefix, args.train_unlabeled_list)
args.val_list = os.path.join(args.local_prefix, args.val_list)
args.test_list = os.path.join(args.local_prefix, args.test_list)
def main():
global args, logger
args = get_parser()
check(args)
# if args.polyaxon:
# polyaxon_data_prepare()
# else:
local_data_prepare()
logger = get_logger()
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.test_gpu)
logger.info(args)
logger.info("=> creating model ...")
logger.info("Classes: {}".format(args.classes))
value_scale = 255
mean = [0.485, 0.456, 0.406]
mean = [item * value_scale for item in mean]
std = [0.229, 0.224, 0.225]
std = [item * value_scale for item in std]
gray_folder = os.path.join(args.save_folder, 'gray')
color_folder = os.path.join(args.save_folder, 'color')
test_transform = transform.Compose([transform.ToTensor()])
test_data = dataset.SemData(split=args.split, data_root=args.data_root, data_list=args.test_list, transform=test_transform)
index_start = args.index_start
if args.index_step == 0:
index_end = len(test_data.data_list)
else:
index_end = min(index_start + args.index_step, len(test_data.data_list))
test_data.data_list = test_data.data_list[index_start:index_end]
test_loader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)
colors = np.loadtxt(args.colors_path).astype('uint8')
names = [line.rstrip('\n') for line in open(args.names_path)]
if not args.has_prediction:
if args.arch == 'psp':
from model.pspnet import PSPNet
model = PSPNet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, pretrained=False, concatenate=args.concatenate)
elif args.arch == 'psa':
from model.psanet import PSANet
model = PSANet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, compact=args.compact,
shrink_factor=args.shrink_factor, mask_h=args.mask_h, mask_w=args.mask_w,
normalization_factor=args.normalization_factor, psa_softmax=args.psa_softmax,
pretrained=False)
elif args.arch == 'deeplabv2':
from model.deeplabv2 import Resnet101_deeplab
print("args.pretrain data=" + args.pretrain_data)
# import ipdb; ipdb.set_trace(context=20)
model = Resnet101_deeplab(num_classes=args.classes, pretrained=True,
pretrain_data=args.pretrain_data)
modules_ori = model.pretrained_layers()
modules_new = model.new_layers()
# logger.info(model)
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
if os.path.isfile(args.model_path):
logger.info("=> loading checkpoint '{}'".format(args.model_path))
checkpoint = torch.load(args.model_path)
model.load_state_dict(checkpoint['state_dict'], strict=False)
logger.info("=> loaded checkpoint '{}'".format(args.model_path))
else:
raise RuntimeError("=> no checkpoint found at '{}'".format(args.model_path))
if args.test_adabn:
target_transform = transform.Compose([
# target_transform = Compose([
transform.RandScale([args.scale_min, args.scale_max]),
transform.RandRotate([args.rotate_min, args.rotate_max], padding=mean, ignore_label=args.ignore_label),
transform.RandomGaussianBlur(),
transform.RandomHorizontalFlip(),
transform.Crop([args.train_h, args.train_w], crop_type='rand', padding=mean,
ignore_label=args.ignore_label),
transform.ToTensor(),
transform.Normalize(mean=mean, std=std)])
target_ds = dataset.SemData(split='train', data_root=args.data_root,
data_list=args.train_labeled_list,
transform=target_transform)
target_sampler = None
target_loader = torch.utils.data.DataLoader(target_ds, batch_size=args.batch_size_adabn,
shuffle=(target_sampler is None),
num_workers=args.workers, pin_memory=True,
sampler=target_sampler,
drop_last=True)
from util.reader import DataReader
reader = DataReader(target_loader)
adabn(model, reader=reader, iterations=1000, args=args)
test(test_loader, test_data.data_list, model, args.classes, mean, std, args.base_size, args.test_h, args.test_w, args.scales, gray_folder, color_folder, colors)
if args.split != 'test':
cal_acc(test_data.data_list, gray_folder, args.classes, names)
print(args.model_path)
print('\n')
print('\n')
def net_process(model, image, mean, std=None, flip=True):
input = torch.from_numpy(image.transpose((2, 0, 1))).float()
if std is None:
for t, m in zip(input, mean):
t.sub_(m)
else:
for t, m, s in zip(input, mean, std):
t.sub_(m).div_(s)
input = input.unsqueeze(0).cuda()
if flip:
input = torch.cat([input, input.flip(3)], 0)
with torch.no_grad():
output = model(input)
_, _, h_i, w_i = input.shape
_, _, h_o, w_o = output.shape
if (h_o != h_i) or (w_o != w_i):
output = F.interpolate(output, (h_i, w_i), mode='bilinear', align_corners=True)
output = F.softmax(output, dim=1)
if flip:
output = (output[0] + output[1].flip(2)) / 2
else:
output = output[0]
output = output.data.cpu().numpy()
output = output.transpose(1, 2, 0)
return output
def scale_process(model, image, classes, crop_h, crop_w, h, w, mean, std=None, stride_rate=2/3):
ori_h, ori_w, _ = image.shape
pad_h = max(crop_h - ori_h, 0)
pad_w = max(crop_w - ori_w, 0)
pad_h_half = int(pad_h / 2)
pad_w_half = int(pad_w / 2)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=mean)
new_h, new_w, _ = image.shape
stride_h = int(np.ceil(crop_h*stride_rate))
stride_w = int(np.ceil(crop_w*stride_rate))
grid_h = int(np.ceil(float(new_h-crop_h)/stride_h) + 1)
grid_w = int(np.ceil(float(new_w-crop_w)/stride_w) + 1)
prediction_crop = np.zeros((new_h, new_w, classes), dtype=float)
count_crop = np.zeros((new_h, new_w), dtype=float)
for index_h in range(0, grid_h):
for index_w in range(0, grid_w):
s_h = index_h * stride_h
e_h = min(s_h + crop_h, new_h)
s_h = e_h - crop_h
s_w = index_w * stride_w
e_w = min(s_w + crop_w, new_w)
s_w = e_w - crop_w
image_crop = image[s_h:e_h, s_w:e_w].copy()
count_crop[s_h:e_h, s_w:e_w] += 1
prediction_crop[s_h:e_h, s_w:e_w, :] += net_process(model, image_crop, mean, std)
prediction_crop /= np.expand_dims(count_crop, 2)
prediction_crop = prediction_crop[pad_h_half:pad_h_half+ori_h, pad_w_half:pad_w_half+ori_w]
prediction = cv2.resize(prediction_crop, (w, h), interpolation=cv2.INTER_LINEAR)
return prediction
def adabn(model, reader, iterations, args):
logger.info('>>>>>>>>>>>>>>>> Start Adabn >>>>>>>>>>>>>>>>')
data_time = AverageMeter()
batch_time = AverageMeter()
model.train()
end = time.time()
# for i, (input, target) in enumerate(loader):
for i in range(iterations):
input, target = reader.read_data()
input = input.cuda(non_blocking=True) # input.shape= Bx3xHxW
target = target.cuda(non_blocking=True) # TARGET.shape= BxHxW
# import ipdb;ipdb.set_trace(context=20)
data_time.update(time.time() - end)
if args.zoom_factor != 8:
h = int((target.size()[1] - 1) / 8 * args.zoom_factor + 1)
w = int((target.size()[2] - 1) / 8 * args.zoom_factor + 1)
# 'nearest' mode doesn't support align_corners mode and 'bilinear' mode is fine for downsampling
target = F.interpolate(target.unsqueeze(1).float(), size=(h, w), mode='bilinear',
align_corners=True).squeeze(1).long()
input = input.cuda(non_blocking=True) # input.shape= Bx3xHxW
target = target.cuda(non_blocking=True) # TARGET.shape= BxHxW
output_pred, main_loss, aux_loss = model(input, target, sup_loss_method=args.sup_loss_method)
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % 10 == 0):
logger.info('adabn: [{}/{}] '
'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(i + 1, iterations,
data_time=data_time,
batch_time=batch_time))
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h, crop_w, scales, gray_folder, color_folder, colors):
logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
data_time = AverageMeter()
batch_time = AverageMeter()
model.eval()
end = time.time()
for i, (input, _) in enumerate(test_loader):
data_time.update(time.time() - end)
input = np.squeeze(input.numpy(), axis=0)
image = np.transpose(input, (1, 2, 0))
h, w, _ = image.shape
prediction = np.zeros((h, w, classes), dtype=float)
for scale in scales:
long_size = round(scale * base_size)
new_h = long_size
new_w = long_size
if h > w:
new_w = round(long_size/float(h)*w)
else:
new_h = round(long_size/float(w)*h)
image_scale = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
prediction += scale_process(model, image_scale, classes, crop_h, crop_w, h, w, mean, std)
prediction /= len(scales)
prediction = np.argmax(prediction, axis=2)
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
logger.info('Test: [{}/{}] '
'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(i + 1, len(test_loader),
data_time=data_time,
batch_time=batch_time))
check_makedirs(gray_folder)
check_makedirs(color_folder)
gray = np.uint8(prediction)
color = colorize(gray, colors)
if args.split == 'test' and args.pseudo_data == 'cityscapes':
# ---- trainid to id
for trainid in range(args.classes):
trainid = 18-trainid
id = trainid2id[trainid]
gray[gray == trainid] = id
# import ipdb; ipdb.set_trace(context=20)
image_path, _ = data_list[i]
image_name = image_path.split('/')[-1].split('.')[0]
gray_path = os.path.join(gray_folder, image_name + '.png')
color_path = os.path.join(color_folder, image_name + '.png')
cv2.imwrite(gray_path, gray)
# color.save(color_path)
logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
def cal_acc(data_list, pred_folder, classes, names):
intersection_meter = AverageMeter()
union_meter = AverageMeter()
target_meter = AverageMeter()
for i, (image_path, target_path) in enumerate(data_list):
image_name = image_path.split('/')[-1].split('.')[0]
pred = cv2.imread(os.path.join(pred_folder, image_name+'.png'), cv2.IMREAD_GRAYSCALE)
target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
intersection, union, target = intersectionAndUnion(pred, target, classes)
intersection_meter.update(intersection)
union_meter.update(union)
target_meter.update(target)
accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
logger.info('Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.format(i + 1, len(data_list), image_name+'.png', accuracy))
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
mIoU = np.mean(iou_class)
mAcc = np.mean(accuracy_class)
allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))
for i in range(classes):
logger.info('Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(i, iou_class[i], accuracy_class[i], names[i]))
for i in range(classes):
print(iou_class[i])
if __name__ == '__main__':
main()
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
pegaflow/service/server.py | import logging
import os
import random
import click
import flask
from OpenSSL import crypto
from pegaflow.service import cache
from pegaflow.service._encoder import PegasusJsonEncoder
from pegaflow.service.base import BooleanConverter
from pegaflow.service.filters import register_jinja2_filters
from pegaflow.service.lifecycle import register_lifecycle_handlers
log = logging.getLogger(__name__)
# Services
services = ["dashboard", "monitoring"]
def generate_self_signed_certificate(certfile, pkeyfile):
"""
SSL.
:param certfile:
:param pkeyfile:
:return:
If certfile and pkeyfile don't exist, create a self-signed certificate
"""
if os.path.isfile(certfile) and os.path.isfile(pkeyfile):
return
logging.info("Generating self-signed certificate")
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 2048)
cert = crypto.X509()
sub = cert.get_subject()
sub.C = "US"
sub.ST = "California"
sub.L = "Marina Del Rey"
sub.O = "University of Southern California"
sub.OU = "Information Sciences Institute"
sub.CN = "Pegasus Service"
cert.set_version(1)
cert.set_serial_number(random.randint(0, 2 ** 32))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60) # 10 years
cert.set_issuer(sub)
cert.set_pubkey(pkey)
cert.sign(pkey, "sha1")
open(certfile, "wb").write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
open(pkeyfile, "wb").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
def run(host="localhost", port=5000, debug=True, verbose=logging.INFO, **kwargs):
app = create_app(env=os.getenv("FLASK_ENV", "development"))
if debug:
app.config.update(DEBUG=True)
logging.getLogger().setLevel(logging.DEBUG)
pegasusdir = os.path.expanduser("~/.pegasus")
if not os.path.isdir(pegasusdir):
os.makedirs(pegasusdir, mode=0o744)
cert = app.config.get("CERTIFICATE", None)
pkey = app.config.get("PRIVATE_KEY", None)
if cert is None or pkey is None:
log.warning("SSL is not configured: Using self-signed certificate")
cert = os.path.expanduser("~/.pegasus/selfcert.pem")
pkey = os.path.expanduser("~/.pegasus/selfkey.pem")
generate_self_signed_certificate(cert, pkey)
ssl_context = (cert, pkey)
if os.getuid() != 0:
log.warning("Service not running as root: Will not be able to switch users")
app.run(
host=host, port=port, threaded=True, ssl_context=ssl_context,
)
log.info("Exiting")
def _load_user_config(app):
# Load user configuration
conf = os.path.expanduser("~/.pegasus/service.py")
if os.path.isfile(conf):
app.config.from_pyfile(conf)
def create_app(config=None, env="development"):
"""Configure app."""
# Environment
os.environ["FLASK_ENV"] = env
app = flask.Flask(__name__)
# Flask Configuration
app.config.from_object("Pegasus.service.defaults")
# app.config.from_object("Pegasus.service.config.%sConfig" % env.capitalize())
_load_user_config(app)
app.config.update(config or {})
if "PEGASUS_ENV" in os.environ:
app.config.from_envvar("PEGASUS_ENV")
# Initialize Extensions
cache.init_app(app)
# db.init_app(app)
# socketio.init_app(app, json=flask.json)
configure_app(app)
# Service Configuration
for service in services:
config_method = "configure_%s" % service
if config_method in globals():
globals()["configure_%s" % service](app)
return app
def configure_app(app):
#
# Flask URL variables support int, float, and path converters.
# Adding support for a boolean converter.
#
app.url_map.converters["boolean"] = BooleanConverter
#
# Relax trailing slash requirement
#
app.url_map.strict_slashes = False
# Attach global JSONEncoder
app.json_encoder = PegasusJsonEncoder
# Register lifecycle methods
register_lifecycle_handlers(app)
# Register Jinja2 Filters
register_jinja2_filters(app)
# Error handlers
## register_error_handlers(app)
...
def configure_dashboard(app):
from pegaflow.service.dashboard import blueprint
app.register_blueprint(blueprint)
def configure_monitoring(app):
from pegaflow.service.monitoring import monitoring
app.register_blueprint(monitoring, url_prefix="/api/v1/user/<string:username>")
@click.command(name="pegasus-service")
@click.option(
"--host",
default="localhost",
metavar="<hostname>",
show_default=True,
help="Hostname",
)
@click.option(
"-p",
"--port",
type=int,
default=5000,
metavar="<port-number>",
show_default=True,
help="Port no. on which to listen for requests",
)
@click.option(
"-d/-nd",
"--debug/--no-debug",
default=True,
metavar="<debug-mode>",
help="Start server in development mode",
)
@click.option(
"-v",
"--verbose",
default=logging.DEBUG,
count=True,
metavar="<verbosity>",
help="Logging verbosity",
)
def main(host: str, port: int, debug: bool, verbose: int):
"""Run the Pegasus Service server."""
run(host=host, port=port, debug=debug, verbose=verbose)
if __name__ == "__main__":
main()
| [] | [] | [
"FLASK_ENV"
] | [] | ["FLASK_ENV"] | python | 1 | 0 | |
catalyst/contrib/utils/compression.py | import base64
import logging
import os
import numpy as np
from six import string_types
from .serialization import deserialize, serialize
logger = logging.getLogger(__name__)
try:
import lz4.frame
LZ4_ENABLED = True
except ImportError as ex:
if os.environ.get("USE_LZ4", "0") == "1":
logger.warning(
"lz4 not available, disabling compression. "
"To install lz4, run `pip install lz4`."
)
raise ex
LZ4_ENABLED = False
def is_compressed(data):
"""@TODO: Docs. Contribution is welcome."""
return isinstance(data, (bytes, string_types))
def compress(data):
"""@TODO: Docs. Contribution is welcome."""
if LZ4_ENABLED:
data = serialize(data)
data = lz4.frame.compress(data)
data = base64.b64encode(data).decode("ascii")
return data
def compress_if_needed(data):
"""@TODO: Docs. Contribution is welcome."""
if isinstance(data, np.ndarray):
data = compress(data)
return data
def decompress(data):
"""@TODO: Docs. Contribution is welcome."""
if LZ4_ENABLED:
data = base64.b64decode(data)
data = lz4.frame.decompress(data)
data = deserialize(data)
return data
def decompress_if_needed(data):
"""@TODO: Docs. Contribution is welcome."""
if is_compressed(data):
data = decompress(data)
return data
if LZ4_ENABLED:
pack = compress
pack_if_needed = compress_if_needed
unpack = decompress
unpack_if_needed = decompress_if_needed
else:
pack = serialize
pack_if_needed = serialize
unpack = deserialize
unpack_if_needed = deserialize
__all__ = ["pack", "pack_if_needed", "unpack", "unpack_if_needed"]
| [] | [] | [
"USE_LZ4"
] | [] | ["USE_LZ4"] | python | 1 | 0 | |
Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/common.go | // +build acceptance rackspace objectstorage v1
package v1
import (
"os"
"testing"
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/acceptance/tools"
"github.com/rackspace/gophercloud/rackspace"
th "github.com/rackspace/gophercloud/testhelper"
)
func rackspaceAuthOptions(t *testing.T) gophercloud.AuthOptions {
// Obtain credentials from the environment.
options, err := rackspace.AuthOptionsFromEnv()
th.AssertNoErr(t, err)
options = tools.OnlyRS(options)
if options.Username == "" {
t.Fatal("Please provide a Rackspace username as RS_USERNAME.")
}
if options.APIKey == "" {
t.Fatal("Please provide a Rackspace API key as RS_API_KEY.")
}
return options
}
func createClient(t *testing.T, cdn bool) (*gophercloud.ServiceClient, error) {
region := os.Getenv("RS_REGION")
if region == "" {
t.Fatal("Please provide a Rackspace region as RS_REGION")
}
ao := rackspaceAuthOptions(t)
provider, err := rackspace.NewClient(ao.IdentityEndpoint)
th.AssertNoErr(t, err)
err = rackspace.Authenticate(provider, ao)
th.AssertNoErr(t, err)
if cdn {
return rackspace.NewObjectCDNV1(provider, gophercloud.EndpointOpts{
Region: region,
})
}
return rackspace.NewObjectStorageV1(provider, gophercloud.EndpointOpts{
Region: region,
})
}
| [
"\"RS_REGION\""
] | [] | [
"RS_REGION"
] | [] | ["RS_REGION"] | go | 1 | 0 | |
vendor/github.com/denverdino/aliyungo/ecs/client.go | package ecs
import (
"os"
"github.com/denverdino/aliyungo/common"
)
// Interval for checking status in WaitForXXX method
const DefaultWaitForInterval = 5
// Default timeout value for WaitForXXX method
const DefaultTimeout = 60
type Client struct {
common.Client
}
const (
// ECSDefaultEndpoint is the default API endpoint of ECS services
ECSDefaultEndpoint = "https://ecs-cn-hangzhou.aliyuncs.com"
ECSAPIVersion = "2014-05-26"
ECSServiceCode = "ecs"
VPCDefaultEndpoint = "https://vpc.aliyuncs.com"
VPCAPIVersion = "2016-04-28"
VPCServiceCode = "vpc"
)
// NewClient creates a new instance of ECS client
func NewClient(accessKeyId, accessKeySecret string) *Client {
endpoint := os.Getenv("ECS_ENDPOINT")
if endpoint == "" {
endpoint = ECSDefaultEndpoint
}
return NewClientWithEndpoint(endpoint, accessKeyId, accessKeySecret)
}
func NewClientWithRegion(endpoint string, accessKeyId string, accessKeySecret string, regionID common.Region) *Client {
client := &Client{}
client.NewInit(endpoint, ECSAPIVersion, accessKeyId, accessKeySecret, ECSServiceCode, regionID)
return client
}
func NewClientWithEndpoint(endpoint string, accessKeyId string, accessKeySecret string) *Client {
client := &Client{}
client.Init(endpoint, ECSAPIVersion, accessKeyId, accessKeySecret)
return client
}
// ---------------------------------------
// NewECSClient creates a new instance of ECS client
// ---------------------------------------
func NewECSClient(accessKeyId, accessKeySecret string, regionID common.Region) *Client {
return NewECSClientWithSecurityToken(accessKeyId, accessKeySecret, "", regionID)
}
func NewECSClientWithSecurityToken(accessKeyId string, accessKeySecret string, securityToken string, regionID common.Region) *Client {
endpoint := os.Getenv("ECS_ENDPOINT")
if endpoint == "" {
endpoint = ECSDefaultEndpoint
}
return NewECSClientWithEndpointAndSecurityToken(endpoint, accessKeyId, accessKeySecret, securityToken, regionID)
}
//only for Hangzhou Regional Domain
func NewECSClientWithSecurityToken4RegionalDomain(accessKeyId string, accessKeySecret string, securityToken string, regionID common.Region) *Client {
endpoint := os.Getenv("ECS_ENDPOINT")
if endpoint == "" {
endpoint = ECSDefaultEndpoint
}
return NewECSClientWithEndpointAndSecurityToken4RegionalDomain(endpoint, accessKeyId, accessKeySecret, securityToken, regionID)
}
func NewECSClientWithEndpoint(endpoint string, accessKeyId string, accessKeySecret string, regionID common.Region) *Client {
return NewECSClientWithEndpointAndSecurityToken(endpoint, accessKeyId, accessKeySecret, "", regionID)
}
func NewECSClientWithEndpointAndSecurityToken(endpoint string, accessKeyId string, accessKeySecret string, securityToken string, regionID common.Region) *Client {
client := &Client{}
client.WithEndpoint(endpoint).
WithVersion(ECSAPIVersion).
WithAccessKeyId(accessKeyId).
WithAccessKeySecret(accessKeySecret).
WithSecurityToken(securityToken).
WithServiceCode(ECSServiceCode).
WithRegionID(regionID).
InitClient()
return client
}
func NewECSClientWithEndpointAndSecurityToken4RegionalDomain(endpoint string, accessKeyId string, accessKeySecret string, securityToken string, regionID common.Region) *Client {
client := &Client{}
client.WithEndpoint(endpoint).
WithVersion(ECSAPIVersion).
WithAccessKeyId(accessKeyId).
WithAccessKeySecret(accessKeySecret).
WithSecurityToken(securityToken).
WithServiceCode(ECSServiceCode).
WithRegionID(regionID).
InitClient4RegionalDomain()
return client
}
// ---------------------------------------
// NewVPCClient creates a new instance of VPC client
// ---------------------------------------
func NewVPCClient(accessKeyId string, accessKeySecret string, regionID common.Region) *Client {
return NewVPCClientWithSecurityToken(accessKeyId, accessKeySecret, "", regionID)
}
func NewVPCClientWithSecurityToken(accessKeyId string, accessKeySecret string, securityToken string, regionID common.Region) *Client {
endpoint := os.Getenv("VPC_ENDPOINT")
if endpoint == "" {
endpoint = VPCDefaultEndpoint
}
return NewVPCClientWithEndpointAndSecurityToken(endpoint, accessKeyId, accessKeySecret, securityToken, regionID)
}
//Only for Hangzhou
func NewVPCClientWithSecurityToken4RegionalDomain(accessKeyId string, accessKeySecret string, securityToken string, regionID common.Region) *Client {
endpoint := os.Getenv("VPC_ENDPOINT")
if endpoint == "" {
endpoint = VPCDefaultEndpoint
}
return NewVPCClientWithEndpointAndSecurityToken4RegionalDomain(endpoint, accessKeyId, accessKeySecret, securityToken, regionID)
}
func NewVPCClientWithEndpoint(endpoint string, accessKeyId string, accessKeySecret string, regionID common.Region) *Client {
return NewVPCClientWithEndpointAndSecurityToken(endpoint, accessKeyId, accessKeySecret, "", regionID)
}
func NewVPCClientWithEndpointAndSecurityToken(endpoint string, accessKeyId string, accessKeySecret string, securityToken string, regionID common.Region) *Client {
client := &Client{}
client.WithEndpoint(endpoint).
WithVersion(VPCAPIVersion).
WithAccessKeyId(accessKeyId).
WithAccessKeySecret(accessKeySecret).
WithSecurityToken(securityToken).
WithServiceCode(VPCServiceCode).
WithRegionID(regionID).
InitClient()
return client
}
//Only for Hangzhou
func NewVPCClientWithEndpointAndSecurityToken4RegionalDomain(endpoint string, accessKeyId string, accessKeySecret string, securityToken string, regionID common.Region) *Client {
client := &Client{}
client.WithEndpoint(endpoint).
WithVersion(VPCAPIVersion).
WithAccessKeyId(accessKeyId).
WithAccessKeySecret(accessKeySecret).
WithSecurityToken(securityToken).
WithServiceCode(VPCServiceCode).
WithRegionID(regionID).
InitClient4RegionalDomain()
return client
}
// ---------------------------------------
// NewVPCClientWithRegion creates a new instance of VPC client automatically get endpoint
// ---------------------------------------
func NewVPCClientWithRegion(endpoint string, accessKeyId string, accessKeySecret string, regionID common.Region) *Client {
client := &Client{}
client.NewInit(endpoint, VPCAPIVersion, accessKeyId, accessKeySecret, VPCServiceCode, regionID)
return client
}
| [
"\"ECS_ENDPOINT\"",
"\"ECS_ENDPOINT\"",
"\"ECS_ENDPOINT\"",
"\"VPC_ENDPOINT\"",
"\"VPC_ENDPOINT\""
] | [] | [
"VPC_ENDPOINT",
"ECS_ENDPOINT"
] | [] | ["VPC_ENDPOINT", "ECS_ENDPOINT"] | go | 2 | 0 | |
features/environment.py | import subprocess
import time
import os
TEST_TYPE = os.getenv("TEST_TYPE", "bdd")
def before_scenario(context, scenario):
if f"{TEST_TYPE}" == "bdd":
proc = subprocess.Popen(["make", "start"])
time.sleep(4)
context.proc = proc
context.root_url = "http://localhost:5000"
else:
context.root_url = os.getenv("ROOT_ENDPOINT")
def after_scenario(context, scenario):
if f"{TEST_TYPE}" == "bdd":
proc = context.proc
proc.terminate()
| [] | [] | [
"ROOT_ENDPOINT",
"TEST_TYPE"
] | [] | ["ROOT_ENDPOINT", "TEST_TYPE"] | python | 2 | 0 | |
configs/gdrn/lmSingleObj/resnest50d_a6_AugCosyAAE_BG05_lmRealPbr100e_SO/resnest50d_a6_AugCosyAAE_BG05_lmRealPbr100e_duck.py | _base_ = ["../../../_base_/gdrn_base.py"]
OUTPUT_DIR = "output/gdrn/lmRealPbrSO/resnest50d_a6_AugCosyAAE_BG05_lmRealPbr100e_SO/duck"
INPUT = dict(
DZI_PAD_SCALE=1.5,
TRUNCATE_FG=False,
CHANGE_BG_PROB=0.5,
COLOR_AUG_PROB=0.8,
COLOR_AUG_TYPE="code",
COLOR_AUG_CODE=(
"Sequential(["
# Sometimes(0.5, PerspectiveTransform(0.05)),
# Sometimes(0.5, CropAndPad(percent=(-0.05, 0.1))),
# Sometimes(0.5, Affine(scale=(1.0, 1.2))),
"Sometimes(0.5, CoarseDropout( p=0.2, size_percent=0.05) ),"
"Sometimes(0.4, GaussianBlur((0., 3.))),"
"Sometimes(0.3, pillike.EnhanceSharpness(factor=(0., 50.))),"
"Sometimes(0.3, pillike.EnhanceContrast(factor=(0.2, 50.))),"
"Sometimes(0.5, pillike.EnhanceBrightness(factor=(0.1, 6.))),"
"Sometimes(0.3, pillike.EnhanceColor(factor=(0., 20.))),"
"Sometimes(0.5, Add((-25, 25), per_channel=0.3)),"
"Sometimes(0.3, Invert(0.2, per_channel=True)),"
"Sometimes(0.5, Multiply((0.6, 1.4), per_channel=0.5)),"
"Sometimes(0.5, Multiply((0.6, 1.4))),"
"Sometimes(0.1, AdditiveGaussianNoise(scale=10, per_channel=True)),"
"Sometimes(0.5, iaa.contrast.LinearContrast((0.5, 2.2), per_channel=0.3)),"
# "Sometimes(0.5, Grayscale(alpha=(0.0, 1.0)))," # maybe remove for det
"], random_order=True)"
# cosy+aae
),
)
SOLVER = dict(
IMS_PER_BATCH=24,
TOTAL_EPOCHS=100,
LR_SCHEDULER_NAME="flat_and_anneal",
ANNEAL_METHOD="cosine", # "cosine"
ANNEAL_POINT=0.72,
# REL_STEPS=(0.3125, 0.625, 0.9375),
OPTIMIZER_CFG=dict(_delete_=True, type="Ranger", lr=1e-4, weight_decay=0),
WEIGHT_DECAY=0.0,
WARMUP_FACTOR=0.001,
WARMUP_ITERS=1000,
)
DATASETS = dict(
TRAIN=("lm_pbr_duck_train", "lm_real_duck_train"),
TEST=("lm_real_duck_test",),
DET_FILES_TEST=(
"datasets/BOP_DATASETS/lm/test/test_bboxes/yolov4x_640_test672_augCosyAAEGray_ranger_lm_pbr_lm_test_16e.json",
),
)
MODEL = dict(
LOAD_DETS_TEST=True,
PIXEL_MEAN=[0.0, 0.0, 0.0],
PIXEL_STD=[255.0, 255.0, 255.0],
POSE_NET=dict(
NAME="GDRN",
XYZ_ONLINE=True,
BACKBONE=dict(
FREEZE=False,
PRETRAINED="timm",
INIT_CFG=dict(
type="timm/resnest50d",
pretrained=True,
in_chans=3,
features_only=True,
out_indices=(4,),
),
),
## geo head: Mask, XYZ, Region
GEO_HEAD=dict(
FREEZE=False,
INIT_CFG=dict(
type="TopDownMaskXyzRegionHead",
in_dim=2048, # this is num out channels of backbone conv feature
),
NUM_REGIONS=64,
),
PNP_NET=dict(
INIT_CFG=dict(norm="GN", act="gelu"),
REGION_ATTENTION=True,
WITH_2D_COORD=True,
ROT_TYPE="allo_rot6d",
TRANS_TYPE="centroid_z",
),
LOSS_CFG=dict(
# xyz loss ----------------------------
XYZ_LOSS_TYPE="L1", # L1 | CE_coor
XYZ_LOSS_MASK_GT="visib", # trunc | visib | obj
XYZ_LW=1.0,
# mask loss ---------------------------
MASK_LOSS_TYPE="L1", # L1 | BCE | CE
MASK_LOSS_GT="trunc", # trunc | visib | gt
MASK_LW=1.0,
# region loss -------------------------
REGION_LOSS_TYPE="CE", # CE
REGION_LOSS_MASK_GT="visib", # trunc | visib | obj
REGION_LW=1.0,
# pm loss --------------
PM_LOSS_SYM=True, # NOTE: sym loss
PM_R_ONLY=True, # only do R loss in PM
PM_LW=1.0,
# centroid loss -------
CENTROID_LOSS_TYPE="L1",
CENTROID_LW=1.0,
# z loss -----------
Z_LOSS_TYPE="L1",
Z_LW=1.0,
),
),
)
TEST = dict(EVAL_PERIOD=0, VIS=False, TEST_BBOX_TYPE="est") # gt | est
| [] | [] | [] | [] | [] | python | null | null | null |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.get('DJANGO_SETTINGS_MODULE', 'config.default')
try:
from django.core.management import execute_from_command_line
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
execute_from_command_line(sys.argv)
| [] | [] | [
"DJANGO_SETTINGS_MODULE"
] | [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
godipper.go | package main
import (
"database/sql"
"log"
"net/http"
"os"
"github.com/alexedwards/scs/v2"
_ "github.com/go-sql-driver/mysql"
"github.com/graphql-go/handler"
"github.com/rs/cors"
)
func main() {
db, err := sql.Open("mysql", os.Getenv("DSN"))
if err != nil {
log.Fatalf("opening databse: %v", err)
}
defer db.Close()
sm := scs.New()
us := userService{db: db, sm: sm}
as := addressService{db: db, us: us}
es := extraService{db: db}
is := itemService{db: db, es: es}
tds := tripleDipperService{db: db, is: is}
ors := orderService{db: db, as: as, tds: tds, us: us}
svc := &service{
user: us,
address: as,
extra: es,
item: is,
tripleDipper: tds,
order: ors,
}
mux := http.NewServeMux()
schema, err := schema(svc)
if err != nil {
log.Fatalf("starting server: %v", err)
}
mux.Handle("/graphql", handler.New(&handler.Config{
Schema: &schema,
Pretty: true,
GraphiQL: true,
}))
assetServer := http.FileServer(http.Dir("./assets"))
// The /assets prefix must be stripped. Otherwise, all of the paths that
// the file server would search for in the local assets directory would
// start with the prefix.
// For example, /assets/file => ./assets/assets/file
mux.Handle("/assets/", http.StripPrefix("/assets", assetServer))
c := cors.New(cors.Options{
AllowedOrigins: []string{os.Getenv("CLIENT_ORIGIN")},
AllowCredentials: true,
})
log.Fatal(http.ListenAndServe(":3000", sm.LoadAndSave(c.Handler(mux))))
}
| [
"\"DSN\"",
"\"CLIENT_ORIGIN\""
] | [] | [
"DSN",
"CLIENT_ORIGIN"
] | [] | ["DSN", "CLIENT_ORIGIN"] | go | 2 | 0 | |
conans/client/tools/oss.py | import multiprocessing
import platform
import subprocess
import sys
import os
from conans.client.tools.env import environment_append
from conans.errors import ConanException
from conans.model.version import Version
from conans.util.log import logger
from conans.client.tools import which
_global_output = None
def args_to_string(args):
if not args:
return ""
if sys.platform == 'win32':
return subprocess.list2cmdline(args)
else:
return " ".join("'" + arg.replace("'", r"'\''") + "'" for arg in args)
def cpu_count():
try:
env_cpu_count = os.getenv("CONAN_CPU_COUNT", None)
return int(env_cpu_count) if env_cpu_count else multiprocessing.cpu_count()
except NotImplementedError:
_global_output.warn("multiprocessing.cpu_count() not implemented. Defaulting to 1 cpu")
return 1 # Safe guess
def detected_architecture():
# FIXME: Very weak check but not very common to run conan in other architectures
machine = platform.machine()
if "64" in machine:
return "x86_64"
elif "86" in machine:
return "x86"
elif "armv8" in machine:
return "armv8"
elif "armv7" in machine:
return "armv7"
return None
# DETECT OS, VERSION AND DISTRIBUTIONS
class OSInfo(object):
""" Usage:
(os_info.is_linux) # True/False
(os_info.is_windows) # True/False
(os_info.is_macos) # True/False
(os_info.is_freebsd) # True/False
(os_info.is_solaris) # True/False
(os_info.linux_distro) # debian, ubuntu, fedora, centos...
(os_info.os_version) # 5.1
(os_info.os_version_name) # Windows 7, El Capitan
if os_info.os_version > "10.1":
pass
if os_info.os_version == "10.1.0":
pass
"""
def __init__(self):
self.os_version = None
self.os_version_name = None
self.is_linux = platform.system() == "Linux"
self.linux_distro = None
self.is_windows = platform.system() == "Windows"
self.is_macos = platform.system() == "Darwin"
self.is_freebsd = platform.system() == "FreeBSD"
self.is_solaris = platform.system() == "SunOS"
self.is_posix = os.pathsep == ':'
if self.is_linux:
import distro
self.linux_distro = distro.id()
self.os_version = Version(distro.version())
version_name = distro.codename()
self.os_version_name = version_name if version_name != "n/a" else ""
if not self.os_version_name and self.linux_distro == "debian":
self.os_version_name = self.get_debian_version_name(self.os_version)
elif self.is_windows:
self.os_version = self.get_win_os_version()
self.os_version_name = self.get_win_version_name(self.os_version)
elif self.is_macos:
self.os_version = Version(platform.mac_ver()[0])
self.os_version_name = self.get_osx_version_name(self.os_version)
elif self.is_freebsd:
self.os_version = self.get_freebsd_version()
self.os_version_name = "FreeBSD %s" % self.os_version
elif self.is_solaris:
self.os_version = Version(platform.release())
self.os_version_name = self.get_solaris_version_name(self.os_version)
@property
def with_apt(self):
return self.is_linux and self.linux_distro in \
("debian", "ubuntu", "knoppix", "linuxmint", "raspbian")
@property
def with_yum(self):
return self.is_linux and self.linux_distro in \
("centos", "redhat", "fedora", "pidora", "scientific",
"xenserver", "amazon", "oracle", "rhel")
@property
def with_pacman(self):
if self.is_linux:
return self.linux_distro in ["arch", "manjaro"]
elif self.is_windows and which('uname.exe'):
uname = subprocess.check_output(['uname.exe', '-s']).decode()
return uname.startswith('MSYS_NT') and which('pacman.exe')
return False
@property
def with_zypper(self):
return self.is_linux and self.linux_distro in \
("opensuse", "sles")
@staticmethod
def get_win_os_version():
"""
Get's the OS major and minor versions. Returns a tuple of
(OS_MAJOR, OS_MINOR).
"""
import ctypes
class _OSVERSIONINFOEXW(ctypes.Structure):
_fields_ = [('dwOSVersionInfoSize', ctypes.c_ulong),
('dwMajorVersion', ctypes.c_ulong),
('dwMinorVersion', ctypes.c_ulong),
('dwBuildNumber', ctypes.c_ulong),
('dwPlatformId', ctypes.c_ulong),
('szCSDVersion', ctypes.c_wchar * 128),
('wServicePackMajor', ctypes.c_ushort),
('wServicePackMinor', ctypes.c_ushort),
('wSuiteMask', ctypes.c_ushort),
('wProductType', ctypes.c_byte),
('wReserved', ctypes.c_byte)]
os_version = _OSVERSIONINFOEXW()
os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version)
retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version))
if retcode != 0:
return None
return Version("%d.%d" % (os_version.dwMajorVersion, os_version.dwMinorVersion))
@staticmethod
def get_debian_version_name(version):
if not version:
return None
elif version.major() == "8.Y.Z":
return "jessie"
elif version.major() == "7.Y.Z":
return "wheezy"
elif version.major() == "6.Y.Z":
return "squeeze"
elif version.major() == "5.Y.Z":
return "lenny"
elif version.major() == "4.Y.Z":
return "etch"
elif version.minor() == "3.1.Z":
return "sarge"
elif version.minor() == "3.0.Z":
return "woody"
@staticmethod
def get_win_version_name(version):
if not version:
return None
elif version.major() == "5.Y.Z":
return "Windows XP"
elif version.minor() == "6.0.Z":
return "Windows Vista"
elif version.minor() == "6.1.Z":
return "Windows 7"
elif version.minor() == "6.2.Z":
return "Windows 8"
elif version.minor() == "6.3.Z":
return "Windows 8.1"
elif version.minor() == "10.0.Z":
return "Windows 10"
@staticmethod
def get_osx_version_name(version):
if not version:
return None
elif version.minor() == "10.13.Z":
return "High Sierra"
elif version.minor() == "10.12.Z":
return "Sierra"
elif version.minor() == "10.11.Z":
return "El Capitan"
elif version.minor() == "10.10.Z":
return "Yosemite"
elif version.minor() == "10.9.Z":
return "Mavericks"
elif version.minor() == "10.8.Z":
return "Mountain Lion"
elif version.minor() == "10.7.Z":
return "Lion"
elif version.minor() == "10.6.Z":
return "Snow Leopard"
elif version.minor() == "10.5.Z":
return "Leopard"
elif version.minor() == "10.4.Z":
return "Tiger"
elif version.minor() == "10.3.Z":
return "Panther"
elif version.minor() == "10.2.Z":
return "Jaguar"
elif version.minor() == "10.1.Z":
return "Puma"
elif version.minor() == "10.0.Z":
return "Cheetha"
@staticmethod
def get_freebsd_version():
return platform.release().split("-")[0]
@staticmethod
def get_solaris_version_name(version):
if not version:
return None
elif version.minor() == "5.10":
return "Solaris 10"
elif version.minor() == "5.11":
return "Solaris 11"
@staticmethod
def bash_path():
if os.getenv("CONAN_BASH_PATH"):
return os.getenv("CONAN_BASH_PATH")
return which("bash")
@staticmethod
def uname(options=None):
options = " %s" % options if options else ""
if platform.system() != "Windows":
raise ConanException("Command only for Windows operating system")
custom_bash_path = OSInfo.bash_path()
if not custom_bash_path:
raise ConanException("bash is not in the path")
command = '"%s" -c "uname%s"' % (custom_bash_path, options)
try:
# the uname executable is many times located in the same folder as bash.exe
with environment_append({"PATH": [os.path.dirname(custom_bash_path)]}):
ret = subprocess.check_output(command, shell=True, ).decode().strip().lower()
return ret
except Exception:
return None
@staticmethod
def detect_windows_subsystem():
from conans.client.tools.win import CYGWIN, MSYS2, MSYS, WSL
try:
output = OSInfo.uname()
except ConanException:
return None
if not output:
return None
if "cygwin" in output:
return CYGWIN
elif "msys" in output or "mingw" in output:
output = OSInfo.uname("-or")
if output.startswith("2"):
return MSYS2
elif output.startswith("1"):
return MSYS
else:
return None
elif "linux" in output:
return WSL
else:
return None
def cross_building(settings, self_os=None, self_arch=None):
ret = get_cross_building_settings(settings, self_os, self_arch)
build_os, build_arch, host_os, host_arch = ret
if host_os is not None and (build_os != host_os):
return True
if host_arch is not None and (build_arch != host_arch):
return True
return False
def get_cross_building_settings(settings, self_os=None, self_arch=None):
build_os = self_os or settings.get_safe("os_build") or \
{"Darwin": "Macos"}.get(platform.system(), platform.system())
build_arch = self_arch or settings.get_safe("arch_build") or detected_architecture()
host_os = settings.get_safe("os")
host_arch = settings.get_safe("arch")
return build_os, build_arch, host_os, host_arch
def get_gnu_triplet(os, arch, compiler=None):
"""
Returns string with <machine>-<vendor>-<op_system> triplet (<vendor> can be omitted in practice)
:param os: os to be used to create the triplet
:param arch: arch to be used to create the triplet
:param compiler: compiler used to create the triplet (only needed fo windows)
"""
if os == "Windows" and compiler is None:
raise ConanException("'compiler' parameter for 'get_gnu_triplet()' is not specified and "
"needed for os=Windows")
# Calculate the arch
machine = {"x86": "i686" if os != "Linux" else "x86",
"x86_64": "x86_64",
"armv6": "arm",
"armv7": "arm",
"armv7s": "arm",
"armv7k": "arm",
"armv7hf": "arm",
"armv8": "aarch64"}.get(arch, None)
if machine is None:
raise ConanException("Unknown '%s' machine, Conan doesn't know how to "
"translate it to the GNU triplet, please report at "
" https://github.com/conan-io/conan/issues" % arch)
# Calculate the OS
if compiler == "gcc":
windows_op = "w64-mingw32"
elif compiler == "Visual Studio":
windows_op = "windows-msvc"
else:
windows_op = "windows"
op_system = {"Windows": windows_op,
"Linux": "linux-gnu",
"Darwin": "apple-darwin",
"Android": "linux-android",
"Macos": "apple-darwin",
"iOS": "apple-darwin",
"watchOS": "apple-darwin",
"tvOS": "apple-darwin"}.get(os, os.lower())
if os in ("Linux", "Android"):
if "arm" in arch and arch != "armv8":
op_system += "eabi"
if arch == "armv7hf" and os == "Linux":
op_system += "hf"
return "%s-%s" % (machine, op_system)
try:
os_info = OSInfo()
except Exception as exc:
logger.error(exc)
_global_output.error("Error detecting os_info")
| [] | [] | [
"CONAN_BASH_PATH",
"CONAN_CPU_COUNT"
] | [] | ["CONAN_BASH_PATH", "CONAN_CPU_COUNT"] | python | 2 | 0 | |
dblogger/push_log_test.go | package dblogger_test
import (
"database/sql"
"dblogger"
"encoding/csv"
"os"
"testing"
"time"
_ "github.com/lib/pq"
)
var testdata = struct {
date string
key string
path string
}{
date: "2021-07-01T14:01:46+09:00",
key: "key",
path: "path",
}
func TestPushLog(t *testing.T) {
db, err := sql.Open(os.Getenv("DATABASE_DRIVER"),
"host="+os.Getenv("DATABASE_HOST")+" "+
"port="+os.Getenv("DATABASE_PORT")+" "+
"user="+os.Getenv("DATABASE_USER")+" "+
"password="+os.Getenv("DATABASE_PASSWORD")+" "+
"dbname="+os.Getenv("DATABASE_NAME")+" "+
"sslmode="+os.Getenv("DATABASE_SSLMODE"))
if err != nil {
t.Fatal(err)
}
defer db.Close()
if _, err := db.Exec("DELETE FROM apilog WHERE apikey='key' AND apipath='path'"); err != nil {
t.Fatal(err)
}
file, err := os.OpenFile(os.Getenv("LOGPATH"), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
t.Fatal(err)
}
defer file.Close()
if err := file.Truncate(0); err != nil {
t.Fatal(err)
}
writer := csv.NewWriter(file)
writer.Write([]string{
testdata.date,
testdata.key,
testdata.path,
})
writer.Flush()
dblogger.PushLog()
row := struct {
date string
key string
path string
}{}
if err := db.QueryRow("SELECT * FROM apilog WHERE apikey='key' AND apipath='path'").Scan(&row.date, &row.key, &row.path); err != nil {
t.Fatal(err)
}
t1, err := time.Parse(time.RFC3339, testdata.date)
if err != nil {
t.Fatal(err)
}
t2, err := time.Parse(time.RFC3339, row.date)
if err != nil {
t.Fatal(err)
}
if !t1.Equal(t2) {
t.Fatalf("unexpected date %s, expected %s", t1.String(), t2.String())
} else if row.key != testdata.key {
t.Fatalf("unexpected key %s, expected %s", row.key, testdata.key)
} else if row.path != testdata.path {
t.Fatalf("unexpected path %s, expected %s", row.path, testdata.path)
}
if _, err := db.Exec("DELETE FROM apilog WHERE apikey='key' AND apipath='path'"); err != nil {
t.Fatal(err)
}
}
| [
"\"DATABASE_DRIVER\"",
"\"DATABASE_HOST\"",
"\"DATABASE_PORT\"",
"\"DATABASE_USER\"",
"\"DATABASE_PASSWORD\"",
"\"DATABASE_NAME\"",
"\"DATABASE_SSLMODE\"",
"\"LOGPATH\""
] | [] | [
"DATABASE_PASSWORD",
"DATABASE_HOST",
"DATABASE_NAME",
"DATABASE_DRIVER",
"DATABASE_USER",
"DATABASE_PORT",
"DATABASE_SSLMODE",
"LOGPATH"
] | [] | ["DATABASE_PASSWORD", "DATABASE_HOST", "DATABASE_NAME", "DATABASE_DRIVER", "DATABASE_USER", "DATABASE_PORT", "DATABASE_SSLMODE", "LOGPATH"] | go | 8 | 0 | |
handle.go | package errf
import (
"errors"
"fmt"
"reflect"
)
type handleCondition struct {
onError bool
onSuccess bool
onPanic bool
notValidate bool
}
// InterimHandler defines Handle() API.
type InterimHandler struct{}
// Handle enables additional error handlers in the middle of functions,
// in addition to IfError() handlers.
//
// Notes:
// * Handle() API should be used only in defer statements.
// * Handlers with "Err" in the name (e.g. OnErr, OnErrOrPanic) can only be used
// in functions with IfError() handler.
// * Handlers without "Err" in the name (e.g. Always, OnPanic) can be used
// in any function.
// * It is allowed to use Check* functions inside Handlers even without IfError() set up
// inside a handler. In such cases, defer Handle()... enclosing function IfError()
// will be used to catch errors.
//
// Example:
// func example(filename string) (err error) {
// defer errf.IfError().ThenAssignTo(&err)
//
// /* some code */
//
// writer := errf.Io.WriteCloser(os.Create(filename))
// defer errf.Handle().OnAnyErrorOrPanic(func() { os.Remove(filename) })
// defer errf.CheckDeferErr(writer.Close)
//
// /* more code */
// }
func Handle() *InterimHandler {
return &InterimHandler{}
}
// PanicErr is an error type, which is used in error fn Handle()... callbacks, in case if handler
// was triggered by a panic instead of an error.
//
// See also: errf.IsPanic, errf.GetPanic.
type PanicErr struct {
PanicObj interface{}
}
func (p PanicErr) Error() string {
return fmt.Sprintf("panic: %v", p.PanicObj)
}
// Always handler is always executed.
// Error is not sent to the callback.
//
// Use Everything(), if error info is required.
func (h *InterimHandler) Always(errFn func()) {
h.handle(recover(), handleCondition{onError: true, onPanic: true, onSuccess: true, notValidate: true}, func(err error) {
errFn()
})
}
// Everything handler is always executed.
// Errors and panics are sent to the callback.
//
// Use IsPanic(), IsErr(), IsSuccess() to differentiate between those outcomes.
//
// Use Always(), if error info is not needed.
func (h *InterimHandler) Everything(errFn ErrorActionFn) {
h.handle(recover(), handleCondition{onError: true, onPanic: true, onSuccess: true, notValidate: true}, errFn)
}
// OnErr handler is executed in case of error triggered by one of "Check*" functions.
//
// First encountered error is passed to the callback.
func (h *InterimHandler) OnErr(errFn ErrorActionFn) {
h.handle(recover(), handleCondition{onError: true}, errFn)
}
// OnErrIs handler is executed in case of error triggered by one of "Check*" functions
// and first encountered error is targetErr (using errors.Is definition).
func (h *InterimHandler) OnErrIs(targetErr error, errFn func()) {
h.handle(recover(), handleCondition{onError: true}, func(err error) {
if errors.Is(err, targetErr) {
errFn()
}
})
}
func verifyErrFnType(argument string, errFn interface{}) {
t := reflect.TypeOf(errFn)
if t.Kind() != reflect.Func {
panic(fmt.Errorf("%s should be a function", argument))
}
if t.NumIn() != 1 || t.IsVariadic() {
panic(fmt.Errorf("%s should have exactly 1 input argument", argument))
}
if t.NumOut() != 0 {
panic(fmt.Errorf("%s should have exactly no output arguments", argument))
}
errType := reflect.TypeOf((*error)(nil)).Elem()
if !t.In(0).AssignableTo(errType) {
panic(fmt.Errorf("%s first argument should be assignable to error interface", argument))
}
}
// OnErrAs handler is executed in case of error triggered by one of "Check*" functions
// and first encountered error has type of callback argument (using errors.As definition).
//
// Example:
// defer errf.Handle().OnErrAs(func (err net.Error) {
// // This callback only will be executed if first encountered
// // error has type of net.Error.
// })
func (h *InterimHandler) OnErrAs(errFn interface{}) {
globalErrflowValidator.custom(func() {
verifyErrFnType("OnErrAs: errFn", errFn)
})
h.handle(recover(), handleCondition{onError: true}, func(err error) {
errFnValue := reflect.ValueOf(errFn)
errValue := reflect.New(errFnValue.Type().In(0))
if errors.As(err, errValue.Interface()) {
errFnValue.Call([]reflect.Value{errValue.Elem()})
}
})
}
// OnErrOrPanic handler is executed in case of error triggered by one of "Check*" functions
// or a panic.
//
// First encountered error is passed to the callback.
// See also errf.IsPanic(), errf.IsErr().
func (h *InterimHandler) OnErrOrPanic(errFn ErrorActionFn) {
h.handle(recover(), handleCondition{onError: true, onPanic: true}, errFn)
}
// OnPanic handler is executed in case of a panic.
func (h *InterimHandler) OnPanic(panicFn func(panicObj interface{})) {
h.handle(recover(), handleCondition{onPanic: true}, func(err error) {
panicFn(err.(PanicErr).PanicObj)
})
}
// OnAnyPanic handler is same as OnPanic, when panicObj is not required.
func (h *InterimHandler) OnAnyPanic(panicFn func()) {
h.handle(recover(), handleCondition{onPanic: true}, func(err error) {
panicFn()
})
}
// OnAnyErr handler is same as OnErr, when err is not required.
func (h *InterimHandler) OnAnyErr(errFn func()) {
h.handle(recover(), handleCondition{onError: true}, func(err error) { errFn() })
}
// OnAnyErrOrPanic handler is same as OnErrOrPanic, when err is not required.
func (h *InterimHandler) OnAnyErrOrPanic(errFn func()) {
h.handle(recover(), handleCondition{onError: true, onPanic: true}, func(err error) { errFn() })
}
// OnSuccess handler is executed in case of no errors or panics.
func (h *InterimHandler) OnSuccess(successFn func()) {
h.handle(recover(), handleCondition{onSuccess: true}, func(err error) { successFn() })
}
// IsErr returns true when error send to handler callback indicates an error (not panic or success).
// Useful for handlers which handle multiple types (e.g. Everything(), OnErrOrPanic())
func IsErr(err error) bool {
return err != nil && !IsPanic(err)
}
// IsSuccess returns true when error send to handler callback indicates success
// (is null, no errors or panics).
// Useful for handlers which handle multiple types (e.g. Everything())
func IsSuccess(err error) bool {
return err == nil
}
// IsPanic returns true when error send to handler callback indicates a panic (not error or success).
// Useful for handlers which handle multiple types (e.g. Everything(), OnErrOrPanic())
func IsPanic(err error) bool {
_, ok := err.(PanicErr)
return ok
}
// GetPanic returns true when error send to handler callback indicates a panic (not error or success).
// Also it writes panic value into panicObj pointer.
func GetPanic(err error, panicObj *interface{}) bool {
panicErr, ok := err.(PanicErr)
if ok {
*panicObj = panicErr.PanicObj
}
return ok
}
func handleDoPanicOnError(errflowThrowObj errflowThrow) {
fnRecover := recover()
fnErrflowThrow, isFnErrflowThrow := fnRecover.(errflowThrow)
if fnRecover == nil {
panic(errflowThrowObj)
} else if isFnErrflowThrow {
var combinedErrflowThrow errflowThrow
combinedErrflowThrow.items = append(combinedErrflowThrow.items, errflowThrowObj.items...)
combinedErrflowThrow.items = append(combinedErrflowThrow.items, fnErrflowThrow.items...)
panic(combinedErrflowThrow)
} else {
panic(fnRecover)
}
}
func handleDoPanicOnPanic(recoverObj interface{}) {
fnRecover := recover()
_, isFnErrflowThrow := fnRecover.(errflowThrow)
if fnRecover == nil {
panic(recoverObj)
} else if isFnErrflowThrow {
panic(recoverObj)
} else {
panic(fnRecover)
}
}
func (h *InterimHandler) handle(
recoverObj interface{},
condition handleCondition,
fn ErrorActionFn,
) {
if condition.onError && !condition.notValidate {
if isUnrelatedPanic(recoverObj) {
globalErrflowValidator.markPanic()
}
globalErrflowValidator.validate()
}
if recoverObj != nil {
errflowThrowObj, ok := recoverObj.(errflowThrow)
if ok && len(errflowThrowObj.items) > 0 {
item := errflowThrowObj.items[0]
ef := item.ef
err := item.err
ef.applyDeferredOptions()
if ef.wrapper != nil && err != nil {
err = ef.wrapper(err)
}
defer handleDoPanicOnError(errflowThrowObj)
if condition.onError {
fn(err)
}
} else {
defer handleDoPanicOnPanic(recoverObj)
if condition.onPanic {
fn(PanicErr{PanicObj: recoverObj})
}
}
} else {
if condition.onSuccess {
fn(nil)
}
}
}
| [] | [] | [] | [] | [] | go | null | null | null |
patches/gopsutil/cpu/cpu_test.go | package cpu
import (
"fmt"
"os"
"runtime"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestCpu_times(t *testing.T) {
v, err := Times(false)
if err != nil {
t.Errorf("error %v", err)
}
if len(v) == 0 {
t.Error("could not get CPUs ", err)
}
empty := TimesStat{}
for _, vv := range v {
if vv == empty {
t.Errorf("could not get CPU User: %v", vv)
}
}
// test sum of per cpu stats is within margin of error for cpu total stats
cpuTotal, err := Times(false)
if err != nil {
t.Errorf("error %v", err)
}
if len(cpuTotal) == 0 {
t.Error("could not get CPUs ", err)
}
perCPU, err := Times(true)
if err != nil {
t.Errorf("error %v", err)
}
if len(perCPU) == 0 {
t.Error("could not get CPUs ", err)
}
var perCPUUserTimeSum float64
var perCPUSystemTimeSum float64
var perCPUIdleTimeSum float64
for _, pc := range perCPU {
perCPUUserTimeSum += pc.User
perCPUSystemTimeSum += pc.System
perCPUIdleTimeSum += pc.Idle
}
margin := 2.0
assert.InEpsilon(t, cpuTotal[0].User, perCPUUserTimeSum, margin)
assert.InEpsilon(t, cpuTotal[0].System, perCPUSystemTimeSum, margin)
assert.InEpsilon(t, cpuTotal[0].Idle, perCPUIdleTimeSum, margin)
}
func TestCpu_counts(t *testing.T) {
v, err := Counts(true)
if err != nil {
t.Errorf("error %v", err)
}
if v == 0 {
t.Errorf("could not get CPU counts: %v", v)
}
}
func TestCPUTimeStat_String(t *testing.T) {
v := TimesStat{
CPU: "cpu0",
User: 100.1,
System: 200.1,
Idle: 300.1,
}
e := `{"cpu":"cpu0","user":100.1,"system":200.1,"idle":300.1,"nice":0.0,"iowait":0.0,"irq":0.0,"softirq":0.0,"steal":0.0,"guest":0.0,"guestNice":0.0}`
if e != fmt.Sprintf("%v", v) {
t.Errorf("CPUTimesStat string is invalid: %v", v)
}
}
func TestCpuInfo(t *testing.T) {
v, err := Info()
if err != nil {
t.Errorf("error %v", err)
}
if len(v) == 0 {
t.Errorf("could not get CPU Info")
}
for _, vv := range v {
if vv.ModelName == "" {
t.Errorf("could not get CPU Info: %v", vv)
}
}
}
func testCPUPercent(t *testing.T, percpu bool) {
numcpu := runtime.NumCPU()
testCount := 3
if runtime.GOOS != "windows" {
testCount = 100
v, err := Percent(time.Millisecond, percpu)
if err != nil {
t.Errorf("error %v", err)
}
// Skip CircleCI which CPU num is different
if os.Getenv("CIRCLECI") != "true" {
if (percpu && len(v) != numcpu) || (!percpu && len(v) != 1) {
t.Fatalf("wrong number of entries from CPUPercent: %v", v)
}
}
}
for i := 0; i < testCount; i++ {
duration := time.Duration(10) * time.Microsecond
v, err := Percent(duration, percpu)
if err != nil {
t.Errorf("error %v", err)
}
for _, percent := range v {
// Check for slightly greater then 100% to account for any rounding issues.
if percent < 0.0 || percent > 100.0001*float64(numcpu) {
t.Fatalf("CPUPercent value is invalid: %f", percent)
}
}
}
}
func testCPUPercentLastUsed(t *testing.T, percpu bool) {
numcpu := runtime.NumCPU()
testCount := 10
if runtime.GOOS != "windows" {
testCount = 2
v, err := Percent(time.Millisecond, percpu)
if err != nil {
t.Errorf("error %v", err)
}
// Skip CircleCI which CPU num is different
if os.Getenv("CIRCLECI") != "true" {
if (percpu && len(v) != numcpu) || (!percpu && len(v) != 1) {
t.Fatalf("wrong number of entries from CPUPercent: %v", v)
}
}
}
for i := 0; i < testCount; i++ {
v, err := Percent(0, percpu)
if err != nil {
t.Errorf("error %v", err)
}
time.Sleep(1 * time.Millisecond)
for _, percent := range v {
// Check for slightly greater then 100% to account for any rounding issues.
if percent < 0.0 || percent > 100.0001*float64(numcpu) {
t.Fatalf("CPUPercent value is invalid: %f", percent)
}
}
}
}
func TestCPUPercent(t *testing.T) {
testCPUPercent(t, false)
}
func TestCPUPercentPerCpu(t *testing.T) {
testCPUPercent(t, true)
}
func TestCPUPercentIntervalZero(t *testing.T) {
testCPUPercentLastUsed(t, false)
}
func TestCPUPercentIntervalZeroPerCPU(t *testing.T) {
testCPUPercentLastUsed(t, true)
}
| [
"\"CIRCLECI\"",
"\"CIRCLECI\""
] | [] | [
"CIRCLECI"
] | [] | ["CIRCLECI"] | go | 1 | 0 | |
daemon/daemon.go | // Package daemon exposes the functions that occur on the host server
// that the Docker daemon is running.
//
// In implementing the various functions of the daemon, there is often
// a method-specific struct for configuring the runtime behavior.
package daemon
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/Sirupsen/logrus"
containerd "github.com/docker/containerd/api/grpc/types"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/events"
"github.com/docker/docker/daemon/exec"
"github.com/docker/libnetwork/cluster"
// register graph drivers
_ "github.com/docker/docker/daemon/graphdriver/register"
dmetadata "github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/libcontainerd"
"github.com/docker/docker/migrate/v1"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/graphdb"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/registrar"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/truncindex"
pluginstore "github.com/docker/docker/plugin/store"
"github.com/docker/docker/reference"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
volumedrivers "github.com/docker/docker/volume/drivers"
"github.com/docker/docker/volume/local"
"github.com/docker/docker/volume/store"
"github.com/docker/libnetwork"
nwconfig "github.com/docker/libnetwork/config"
"github.com/docker/libtrust"
)
var (
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "docker-runc"
errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.")
)
// Daemon holds information about the Docker daemon.
type Daemon struct {
//根据传入的证书生成的容器ID,若没有传入则使用ECDSA加密算法生成
ID string
//部署所有Docker容器的路径
repository string
//用于存储具体Docker容器信息的对象
containers container.Store
//Docker容器所执行的命令
execCommands *exec.Store
referenceStore reference.Store
downloadManager *xfer.LayerDownloadManager
uploadManager *xfer.LayerUploadManager
distributionMetadataStore dmetadata.Store
trustKey libtrust.PrivateKey
//用于通过简短有效的字符串前缀定义唯一的镜像
idIndex *truncindex.TruncIndex
configStore *Config
//收集容器网络及cgroup信息
statsCollector *statsCollector
//提供日志的默认配置信息
defaultLogConfig containertypes.LogConfig
//处理远程的registry连接服务
RegistryService registry.Service
//为Docker提供事件通知订阅服务
EventsService *events.Events
//libnetwork提供的controller实例
netController libnetwork.NetworkController
volumes *store.VolumeStore
discoveryWatcher discoveryReloader
//Docker运行的工作目录
root string
seccompEnabled bool
shutdown bool
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
layerStore layer.Store
imageStore image.Store
PluginStore *pluginstore.Store
nameIndex *registrar.Registrar
linkIndex *linkIndex
containerd libcontainerd.Client
containerdRemote libcontainerd.Remote
defaultIsolation containertypes.Isolation // Default isolation mode on Windows
clusterProvider cluster.Provider
}
// HasExperimental returns whether the experimental features of the daemon are enabled or not
func (daemon *Daemon) HasExperimental() bool {
if daemon.configStore != nil && daemon.configStore.Experimental {
return true
}
return false
}
func (daemon *Daemon) restore() error {
var (
currentDriver = daemon.GraphDriverName()
containers = make(map[string]*container.Container)
)
logrus.Info("Loading containers: start.")
dir, err := ioutil.ReadDir(daemon.repository)
if err != nil {
return err
}
for _, v := range dir {
id := v.Name()
container, err := daemon.load(id)
if err != nil {
logrus.Errorf("Failed to load container %v: %v", id, err)
continue
}
// Ignore the container if it does not support the current driver being used by the graph
if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
rwlayer, err := daemon.layerStore.GetRWLayer(container.ID)
if err != nil {
logrus.Errorf("Failed to load container mount %v: %v", id, err)
continue
}
container.RWLayer = rwlayer
logrus.Debugf("Loaded container %v", container.ID)
containers[container.ID] = container
} else {
logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
}
}
var migrateLegacyLinks bool
removeContainers := make(map[string]*container.Container)
restartContainers := make(map[*container.Container]chan struct{})
activeSandboxes := make(map[string]interface{})
for id, c := range containers {
if err := daemon.registerName(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
delete(containers, id)
continue
}
if err := daemon.Register(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
delete(containers, id)
continue
}
// verify that all volumes valid and have been migrated from the pre-1.7 layout
if err := daemon.verifyVolumesInfo(c); err != nil {
// don't skip the container due to error
logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err)
}
// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
// We should rewrite it to use the daemon defaults.
// Fixes https://github.com/docker/docker/issues/22536
if c.HostConfig.LogConfig.Type == "" {
if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err)
continue
}
}
}
var wg sync.WaitGroup
var mapLock sync.Mutex
for _, c := range containers {
wg.Add(1)
go func(c *container.Container) {
defer wg.Done()
if err := backportMountSpec(c); err != nil {
logrus.Errorf("Failed to migrate old mounts to use new spec format")
}
if c.IsRunning() || c.IsPaused() {
c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking
if err := daemon.containerd.Restore(c.ID); err != nil {
logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err)
return
}
c.ResetRestartManager(false)
if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
options, err := daemon.buildSandboxOptions(c)
if err != nil {
logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
}
mapLock.Lock()
activeSandboxes[c.NetworkSettings.SandboxID] = options
mapLock.Unlock()
}
}
// fixme: only if not running
// get list of containers we need to restart
if !c.IsRunning() && !c.IsPaused() {
// Do not autostart containers which
// has endpoints in a swarm scope
// network yet since the cluster is
// not initialized yet. We will start
// it after the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
mapLock.Lock()
removeContainers[c.ID] = c
mapLock.Unlock()
}
}
if c.RemovalInProgress {
// We probably crashed in the middle of a removal, reset
// the flag.
//
// We DO NOT remove the container here as we do not
// know if the user had requested for either the
// associated volumes, network links or both to also
// be removed. So we put the container in the "dead"
// state and leave further processing up to them.
logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
c.ResetRemovalInProgress()
c.SetDead()
c.ToDisk()
}
// if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated
if c.HostConfig != nil && c.HostConfig.Links == nil {
migrateLegacyLinks = true
}
}(c)
}
wg.Wait()
daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes)
if err != nil {
return fmt.Errorf("Error initializing network controller: %v", err)
}
// migrate any legacy links from sqlite
linkdbFile := filepath.Join(daemon.root, "linkgraph.db")
var legacyLinkDB *graphdb.Database
if migrateLegacyLinks {
legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile)
if err != nil {
return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err)
}
defer legacyLinkDB.Close()
}
// Now that all the containers are registered, register the links
for _, c := range containers {
if migrateLegacyLinks {
if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil {
return err
}
}
if err := daemon.registerLinks(c, c.HostConfig); err != nil {
logrus.Errorf("failed to register link for container %s: %v", c.ID, err)
}
}
group := sync.WaitGroup{}
for c, notifier := range restartContainers {
group.Add(1)
go func(c *container.Container, chNotify chan struct{}) {
defer group.Done()
logrus.Debugf("Starting container %s", c.ID)
// ignore errors here as this is a best effort to wait for children to be
// running before we try to start the container
children := daemon.children(c)
timeout := time.After(5 * time.Second)
for _, child := range children {
if notifier, exists := restartContainers[child]; exists {
select {
case <-notifier:
case <-timeout:
}
}
}
// Make sure networks are available before starting
daemon.waitForNetworks(c)
if err := daemon.containerStart(c, "", true); err != nil {
logrus.Errorf("Failed to start container %s: %s", c.ID, err)
}
close(chNotify)
}(c, notifier)
}
group.Wait()
removeGroup := sync.WaitGroup{}
for id := range removeContainers {
removeGroup.Add(1)
go func(cid string) {
if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
logrus.Errorf("Failed to remove container %s: %s", cid, err)
}
removeGroup.Done()
}(id)
}
removeGroup.Wait()
// any containers that were started above would already have had this done,
// however we need to now prepare the mountpoints for the rest of the containers as well.
// This shouldn't cause any issue running on the containers that already had this run.
// This must be run after any containers with a restart policy so that containerized plugins
// can have a chance to be running before we try to initialize them.
for _, c := range containers {
// if the container has restart policy, do not
// prepare the mountpoints since it has been done on restarting.
// This is to speed up the daemon start when a restart container
// has a volume and the volume dirver is not available.
if _, ok := restartContainers[c]; ok {
continue
} else if _, ok := removeContainers[c.ID]; ok {
// container is automatically removed, skip it.
continue
}
group.Add(1)
go func(c *container.Container) {
defer group.Done()
if err := daemon.prepareMountPoints(c); err != nil {
logrus.Error(err)
}
}(c)
}
group.Wait()
logrus.Info("Loading containers: done.")
return nil
}
// RestartSwarmContainers restarts any autostart container which has a
// swarm endpoint.
func (daemon *Daemon) RestartSwarmContainers() {
group := sync.WaitGroup{}
for _, c := range daemon.List() {
if !c.IsRunning() && !c.IsPaused() {
// Autostart all the containers which has a
// swarm endpoint now that the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint {
group.Add(1)
go func(c *container.Container) {
defer group.Done()
if err := daemon.containerStart(c, "", true); err != nil {
logrus.Error(err)
}
}(c)
}
}
}
group.Wait()
}
// waitForNetworks is used during daemon initialization when starting up containers
// It ensures that all of a container's networks are available before the daemon tries to start the container.
// In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
func (daemon *Daemon) waitForNetworks(c *container.Container) {
if daemon.discoveryWatcher == nil {
return
}
// Make sure if the container has a network that requires discovery that the discovery service is available before starting
for netName := range c.NetworkSettings.Networks {
// If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready
// Most likely this is because the K/V store used for discovery is in a container and needs to be started
if _, err := daemon.netController.NetworkByName(netName); err != nil {
if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
continue
}
// use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
// FIXME: why is this slow???
logrus.Debugf("Container %s waiting for network to be ready", c.Name)
select {
case <-daemon.discoveryWatcher.ReadyCh():
case <-time.After(60 * time.Second):
}
return
}
}
}
func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.children(c)
}
// parents returns the names of the parent containers of the container
// with the given name.
func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.parents(c)
}
func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
fullName := path.Join(parent.Name, alias)
if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil {
if err == registrar.ErrNameReserved {
logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
return nil
}
return err
}
daemon.linkIndex.link(parent, child, fullName)
return nil
}
// SetClusterProvider sets a component for querying the current cluster state.
func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) {
daemon.clusterProvider = clusterProvider
daemon.netController.SetClusterProvider(clusterProvider)
}
// IsSwarmCompatible verifies if the current daemon
// configuration is compatible with the swarm mode
func (daemon *Daemon) IsSwarmCompatible() error {
if daemon.configStore == nil {
return nil
}
return daemon.configStore.isSwarmCompatible()
}
// NewDaemon sets up everything for the daemon to be able to service
// requests from the webserver.
func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) {
setDefaultMtu(config)
// Ensure that we have a correct root key limit for launching containers.
if err := ModifyRootKeyLimit(); err != nil {
logrus.Warnf("unable to modify root key limit, number of containers could be limitied by this quota: %v", err)
}
// Ensure we have compatible and valid configuration options
if err := verifyDaemonSettings(config); err != nil {
return nil, err
}
// Do we have a disabled network?
config.DisableBridge = isBridgeNetworkDisabled(config)
// Verify the platform is supported as a daemon
if !platformSupported {
return nil, errSystemNotSupported
}
// Validate platform-specific requirements
if err := checkSystem(); err != nil {
return nil, err
}
// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
// on Windows to dump Go routine stacks
setupDumpStackTrap(config.Root)
uidMaps, gidMaps, err := setupRemappedRoot(config)
if err != nil {
return nil, err
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
// get the canonical path to the Docker root directory
var realRoot string
if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
realRoot = config.Root
} else {
realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root)
if err != nil {
return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
}
}
if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil {
return nil, err
}
if err := setupDaemonProcess(config); err != nil {
return nil, err
}
// set up the tmpDir to use a canonical path
tmp, err := tempDir(config.Root, rootUID, rootGID)
if err != nil {
return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
}
realTmp, err := fileutils.ReadSymlinkedDirectory(tmp)
if err != nil {
return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
}
os.Setenv("TMPDIR", realTmp)
d := &Daemon{configStore: config}
// Ensure the daemon is properly shutdown if there is a failure during
// initialization
defer func() {
if err != nil {
if err := d.Shutdown(); err != nil {
logrus.Error(err)
}
}
}()
// Set the default isolation mode (only applicable on Windows)
if err := d.setDefaultIsolation(); err != nil {
return nil, fmt.Errorf("error setting default isolation mode: %v", err)
}
logrus.Debugf("Using default logging driver %s", config.LogConfig.Type)
if err := configureMaxThreads(config); err != nil {
logrus.Warnf("Failed to configure golang's threads limit: %v", err)
}
installDefaultAppArmorProfile()
daemonRepo := filepath.Join(config.Root, "containers")
if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return nil, err
}
if runtime.GOOS == "windows" {
if err := idtools.MkdirAllAs(filepath.Join(config.Root, "credentialspecs"), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return nil, err
}
}
driverName := os.Getenv("DOCKER_DRIVER")
if driverName == "" {
driverName = config.GraphDriver
}
d.PluginStore = pluginstore.NewStore(config.Root)
d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
StorePath: config.Root,
MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
GraphDriver: driverName,
GraphDriverOptions: config.GraphOptions,
UIDMaps: uidMaps,
GIDMaps: gidMaps,
PluginGetter: d.PluginStore,
})
if err != nil {
return nil, err
}
graphDriver := d.layerStore.DriverName()
imageRoot := filepath.Join(config.Root, "image", graphDriver)
// Configure and validate the kernels security support
if err := configureKernelSecuritySupport(config, graphDriver); err != nil {
return nil, err
}
logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads)
logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads)
ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
if err != nil {
return nil, err
}
d.imageStore, err = image.NewImageStore(ifs, d.layerStore)
if err != nil {
return nil, err
}
// Configure the volumes driver
volStore, err := d.configureVolumes(rootUID, rootGID)
if err != nil {
return nil, err
}
trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
if err != nil {
return nil, err
}
trustDir := filepath.Join(config.Root, "trust")
if err := system.MkdirAll(trustDir, 0700); err != nil {
return nil, err
}
distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
if err != nil {
return nil, err
}
eventsService := events.New()
referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json"))
if err != nil {
return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err)
}
migrationStart := time.Now()
if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil {
logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
}
logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
// Discovery is only enabled when the daemon is launched with an address to advertise. When
// initialized, the daemon is registered and we can store the discovery backend as its read-only
if err := d.initDiscovery(config); err != nil {
return nil, err
}
sysInfo := sysinfo.New(false)
// Check if Devices cgroup is mounted, it is hard requirement for container security,
// on Linux.
if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled {
return nil, fmt.Errorf("Devices cgroup isn't mounted")
}
d.ID = trustKey.PublicKey().KeyID()
d.repository = daemonRepo
d.containers = container.NewMemoryStore()
d.execCommands = exec.NewStore()
d.referenceStore = referenceStore
d.distributionMetadataStore = distributionMetadataStore
d.trustKey = trustKey
d.idIndex = truncindex.NewTruncIndex([]string{})
d.statsCollector = d.newStatsCollector(1 * time.Second)
d.defaultLogConfig = containertypes.LogConfig{
Type: config.LogConfig.Type,
Config: config.LogConfig.Config,
}
d.RegistryService = registryService
d.EventsService = eventsService
d.volumes = volStore
d.root = config.Root
d.uidMaps = uidMaps
d.gidMaps = gidMaps
d.seccompEnabled = sysInfo.Seccomp
d.nameIndex = registrar.NewRegistrar()
d.linkIndex = newLinkIndex()
d.containerdRemote = containerdRemote
go d.execCommandGC()
d.containerd, err = containerdRemote.Client(d)
if err != nil {
return nil, err
}
// Plugin system initialization should happen before restore. Do not change order.
if err := d.pluginInit(config, containerdRemote); err != nil {
return nil, err
}
if err := d.restore(); err != nil {
return nil, err
}
return d, nil
}
//先用SIGTERM杀死容器进程,如果StopTimeout内不能完成,则使用SIGKILL强制杀死
func (daemon *Daemon) shutdownContainer(c *container.Container) error {
stopTimeout := c.StopTimeout()
// TODO(windows): Handle docker restart with paused containers
if c.IsPaused() {
// To terminate a process in freezer cgroup, we should send
// SIGTERM to this process then unfreeze it, and the process will
// force to terminate immediately.
logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID)
sig, ok := signal.SignalMap["TERM"]
if !ok {
return fmt.Errorf("System does not support SIGTERM")
}
if err := daemon.kill(c, int(sig)); err != nil {
return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err)
}
if err := daemon.containerUnpause(c); err != nil {
return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err)
}
if _, err := c.WaitStop(time.Duration(stopTimeout) * time.Second); err != nil {
logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout)
sig, ok := signal.SignalMap["KILL"]
if !ok {
return fmt.Errorf("System does not support SIGKILL")
}
if err := daemon.kill(c, int(sig)); err != nil {
logrus.Errorf("Failed to SIGKILL container %s", c.ID)
}
c.WaitStop(-1 * time.Second)
return err
}
}
// If container failed to exit in stopTimeout seconds of SIGTERM, then using the force
if err := daemon.containerStop(c, stopTimeout); err != nil {
return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err)
}
c.WaitStop(-1 * time.Second)
return nil
}
// ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers,
// and is limited by daemon's ShutdownTimeout.
func (daemon *Daemon) ShutdownTimeout() int {
// By default we use daemon's ShutdownTimeout.
shutdownTimeout := daemon.configStore.ShutdownTimeout
graceTimeout := 5
if daemon.containers != nil {
for _, c := range daemon.containers.List() {
if shutdownTimeout >= 0 {
stopTimeout := c.StopTimeout()
if stopTimeout < 0 {
shutdownTimeout = -1
} else {
if stopTimeout+graceTimeout > shutdownTimeout {
shutdownTimeout = stopTimeout + graceTimeout
}
}
}
}
}
return shutdownTimeout
}
// Shutdown stops the daemon.
func (daemon *Daemon) Shutdown() error {
daemon.shutdown = true
// Keep mounts and networking running on daemon shutdown if
// we are to keep containers running and restore them.
if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil {
// check if there are any running containers, if none we should do some cleanup
if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil {
return nil
}
}
if daemon.containers != nil {
logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout)
daemon.containers.ApplyAll(func(c *container.Container) {
if !c.IsRunning() {
return
}
logrus.Debugf("stopping %s", c.ID)
if err := daemon.shutdownContainer(c); err != nil {
logrus.Errorf("Stop container error: %v", err)
return
}
if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil {
daemon.cleanupMountsByID(mountid)
}
logrus.Debugf("container stopped %s", c.ID)
})
}
// Shutdown plugins after containers. Dont change the order.
daemon.pluginShutdown()
// trigger libnetwork Stop only if it's initialized
if daemon.netController != nil {
daemon.netController.Stop()
}
if daemon.layerStore != nil {
if err := daemon.layerStore.Cleanup(); err != nil {
logrus.Errorf("Error during layer Store.Cleanup(): %v", err)
}
}
if err := daemon.cleanupMounts(); err != nil {
return err
}
return nil
}
// Mount sets container.BaseFS
// (is it not set coming in? why is it unset?)
func (daemon *Daemon) Mount(container *container.Container) error {
dir, err := container.RWLayer.Mount(container.GetMountLabel())
if err != nil {
return err
}
logrus.Debugf("container mounted via layerStore: %v", dir)
if container.BaseFS != dir {
// The mount path reported by the graph driver should always be trusted on Windows, since the
// volume path for a given mounted layer may change over time. This should only be an error
// on non-Windows operating systems.
if container.BaseFS != "" && runtime.GOOS != "windows" {
daemon.Unmount(container)
return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
daemon.GraphDriverName(), container.ID, container.BaseFS, dir)
}
}
container.BaseFS = dir // TODO: combine these fields
return nil
}
// Unmount unsets the container base filesystem
func (daemon *Daemon) Unmount(container *container.Container) error {
if err := container.RWLayer.Unmount(); err != nil {
logrus.Errorf("Error unmounting container %s: %s", container.ID, err)
return err
}
return nil
}
// V4Subnets returns the IPv4 subnets of networks that are managed by Docker.
func (daemon *Daemon) V4Subnets() []net.IPNet {
var subnets []net.IPNet
managedNetworks := daemon.netController.Networks()
for _, managedNetwork := range managedNetworks {
v4Infos, _ := managedNetwork.Info().IpamInfo()
for _, v4Info := range v4Infos {
if v4Info.IPAMData.Pool != nil {
subnets = append(subnets, *v4Info.IPAMData.Pool)
}
}
}
return subnets
}
// V6Subnets returns the IPv6 subnets of networks that are managed by Docker.
func (daemon *Daemon) V6Subnets() []net.IPNet {
var subnets []net.IPNet
managedNetworks := daemon.netController.Networks()
for _, managedNetwork := range managedNetworks {
_, v6Infos := managedNetwork.Info().IpamInfo()
for _, v6Info := range v6Infos {
if v6Info.IPAMData.Pool != nil {
subnets = append(subnets, *v6Info.IPAMData.Pool)
}
}
}
return subnets
}
func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) {
progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false)
operationCancelled := false
for prog := range progressChan {
if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled {
// don't log broken pipe errors as this is the normal case when a client aborts
if isBrokenPipe(err) {
logrus.Info("Pull session cancelled")
} else {
logrus.Errorf("error writing progress to client: %v", err)
}
cancelFunc()
operationCancelled = true
// Don't return, because we need to continue draining
// progressChan until it's closed to avoid a deadlock.
}
}
}
func isBrokenPipe(e error) bool {
if netErr, ok := e.(*net.OpError); ok {
e = netErr.Err
if sysErr, ok := netErr.Err.(*os.SyscallError); ok {
e = sysErr.Err
}
}
return e == syscall.EPIPE
}
// GraphDriverName returns the name of the graph driver used by the layer.Store
func (daemon *Daemon) GraphDriverName() string {
return daemon.layerStore.DriverName()
}
// GetUIDGIDMaps returns the current daemon's user namespace settings
// for the full uid and gid maps which will be applied to containers
// started in this instance.
func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) {
return daemon.uidMaps, daemon.gidMaps
}
// GetRemappedUIDGID returns the current daemon's uid and gid values
// if user namespaces are in use for this daemon instance. If not
// this function will return "real" root values of 0, 0.
func (daemon *Daemon) GetRemappedUIDGID() (int, int) {
uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps)
return uid, gid
}
// tempDir returns the default directory to use for temporary files.
func tempDir(rootDir string, rootUID, rootGID int) (string, error) {
var tmpDir string
if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
tmpDir = filepath.Join(rootDir, "tmp")
}
return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID)
}
func (daemon *Daemon) setupInitLayer(initPath string) error {
rootUID, rootGID := daemon.GetRemappedUIDGID()
return setupInitLayer(initPath, rootUID, rootGID)
}
func setDefaultMtu(config *Config) {
// do nothing if the config does not have the default 0 value.
if config.Mtu != 0 {
return
}
config.Mtu = defaultNetworkMtu
}
func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) {
volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID)
if err != nil {
return nil, err
}
volumedrivers.RegisterPluginGetter(daemon.PluginStore)
if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) {
return nil, fmt.Errorf("local volume driver could not be registered")
}
return store.New(daemon.configStore.Root)
}
// IsShuttingDown tells whether the daemon is shutting down or not
func (daemon *Daemon) IsShuttingDown() bool {
return daemon.shutdown
}
// initDiscovery initializes the discovery watcher for this daemon.
func (daemon *Daemon) initDiscovery(config *Config) error {
advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise)
if err != nil {
if err == errDiscoveryDisabled {
return nil
}
return err
}
config.ClusterAdvertise = advertise
discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts)
if err != nil {
return fmt.Errorf("discovery initialization failed (%v)", err)
}
daemon.discoveryWatcher = discoveryWatcher
return nil
}
// Reload reads configuration changes and modifies the
// daemon according to those changes.
// These are the settings that Reload changes:
// - Daemon labels.
// - Daemon debug log level.
// - Daemon insecure registries.
// - Daemon max concurrent downloads
// - Daemon max concurrent uploads
// - Cluster discovery (reconfigure and restart).
// - Daemon live restore
// - Daemon shutdown timeout (in seconds).
func (daemon *Daemon) Reload(config *Config) (err error) {
daemon.configStore.reloadLock.Lock()
attributes := daemon.platformReload(config)
defer func() {
// we're unlocking here, because
// LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes()
// holds that lock too.
daemon.configStore.reloadLock.Unlock()
if err == nil {
daemon.LogDaemonEventWithAttributes("reload", attributes)
}
}()
if err := daemon.reloadClusterDiscovery(config); err != nil {
return err
}
if config.IsValueSet("labels") {
daemon.configStore.Labels = config.Labels
}
if config.IsValueSet("debug") {
daemon.configStore.Debug = config.Debug
}
if config.IsValueSet("insecure-registries") {
daemon.configStore.InsecureRegistries = config.InsecureRegistries
if err := daemon.RegistryService.LoadInsecureRegistries(config.InsecureRegistries); err != nil {
return err
}
}
if config.IsValueSet("live-restore") {
daemon.configStore.LiveRestoreEnabled = config.LiveRestoreEnabled
if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestoreEnabled)); err != nil {
return err
}
}
// If no value is set for max-concurrent-downloads we assume it is the default value
// We always "reset" as the cost is lightweight and easy to maintain.
if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil {
*daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads
} else {
maxConcurrentDownloads := defaultMaxConcurrentDownloads
daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads
}
logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads)
if daemon.downloadManager != nil {
daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads)
}
// If no value is set for max-concurrent-upload we assume it is the default value
// We always "reset" as the cost is lightweight and easy to maintain.
if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil {
*daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads
} else {
maxConcurrentUploads := defaultMaxConcurrentUploads
daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads
}
logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads)
if daemon.uploadManager != nil {
daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads)
}
if config.IsValueSet("shutdown-timeout") {
daemon.configStore.ShutdownTimeout = config.ShutdownTimeout
logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout)
}
// We emit daemon reload event here with updatable configurations
attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug)
attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled)
if daemon.configStore.InsecureRegistries != nil {
insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries)
if err != nil {
return err
}
attributes["insecure-registries"] = string(insecureRegistries)
} else {
attributes["insecure-registries"] = "[]"
}
attributes["cluster-store"] = daemon.configStore.ClusterStore
if daemon.configStore.ClusterOpts != nil {
opts, err := json.Marshal(daemon.configStore.ClusterOpts)
if err != nil {
return err
}
attributes["cluster-store-opts"] = string(opts)
} else {
attributes["cluster-store-opts"] = "{}"
}
attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise
if daemon.configStore.Labels != nil {
labels, err := json.Marshal(daemon.configStore.Labels)
if err != nil {
return err
}
attributes["labels"] = string(labels)
} else {
attributes["labels"] = "[]"
}
attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads)
attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads)
attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout)
return nil
}
func (daemon *Daemon) reloadClusterDiscovery(config *Config) error {
var err error
newAdvertise := daemon.configStore.ClusterAdvertise
newClusterStore := daemon.configStore.ClusterStore
if config.IsValueSet("cluster-advertise") {
if config.IsValueSet("cluster-store") {
newClusterStore = config.ClusterStore
}
newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise)
if err != nil && err != errDiscoveryDisabled {
return err
}
}
if daemon.clusterProvider != nil {
if err := config.isSwarmCompatible(); err != nil {
return err
}
}
// check discovery modifications
if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) {
return nil
}
// enable discovery for the first time if it was not previously enabled
if daemon.discoveryWatcher == nil {
discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts)
if err != nil {
return fmt.Errorf("discovery initialization failed (%v)", err)
}
daemon.discoveryWatcher = discoveryWatcher
} else {
if err == errDiscoveryDisabled {
// disable discovery if it was previously enabled and it's disabled now
daemon.discoveryWatcher.Stop()
} else {
// reload discovery
if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil {
return err
}
}
}
daemon.configStore.ClusterStore = newClusterStore
daemon.configStore.ClusterOpts = config.ClusterOpts
daemon.configStore.ClusterAdvertise = newAdvertise
if daemon.netController == nil {
return nil
}
netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil)
if err != nil {
logrus.WithError(err).Warnf("failed to get options with network controller")
return nil
}
err = daemon.netController.ReloadConfiguration(netOptions...)
if err != nil {
logrus.Warnf("Failed to reload configuration with network controller: %v", err)
}
return nil
}
func isBridgeNetworkDisabled(config *Config) bool {
return config.bridgeConfig.Iface == disableNetworkBridge
}
func (daemon *Daemon) networkOptions(dconfig *Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
options := []nwconfig.Option{}
if dconfig == nil {
return options, nil
}
options = append(options, nwconfig.OptionDataDir(dconfig.Root))
options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot()))
dd := runconfig.DefaultDaemonNetworkMode()
dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
options = append(options, nwconfig.OptionDefaultNetwork(dn))
if strings.TrimSpace(dconfig.ClusterStore) != "" {
kv := strings.Split(dconfig.ClusterStore, "://")
if len(kv) != 2 {
return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
}
options = append(options, nwconfig.OptionKVProvider(kv[0]))
options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
}
if len(dconfig.ClusterOpts) > 0 {
options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
}
if daemon.discoveryWatcher != nil {
options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
}
if dconfig.ClusterAdvertise != "" {
options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
}
options = append(options, nwconfig.OptionLabels(dconfig.Labels))
options = append(options, driverOptions(dconfig)...)
if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 {
options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
}
if pg != nil {
options = append(options, nwconfig.OptionPluginGetter(pg))
}
return options, nil
}
func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry {
out := make([]types.BlkioStatEntry, len(entries))
for i, re := range entries {
out[i] = types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: re.Op,
Value: re.Value,
}
}
return out
}
| [
"\"DOCKER_DRIVER\"",
"\"DOCKER_TMPDIR\""
] | [] | [
"DOCKER_DRIVER",
"DOCKER_TMPDIR"
] | [] | ["DOCKER_DRIVER", "DOCKER_TMPDIR"] | go | 2 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tranpathPY.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
open_widget_framework/runtests.py | import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def run_tests():
os.environ["DJANGO_SETTINGS_MODULE"] = "open_widget_framework.test_settings"
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(["open_widget_framework.tests"])
sys.exit(bool(failures))
if __name__ == "__main__":
run_tests()
| [] | [] | [
"DJANGO_SETTINGS_MODULE"
] | [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
internal/generated/snippets/aiplatform/apiv1beta1/FeaturestoreClient/BatchReadFeatureValues/main.go | // Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by cloud.google.com/go/internal/gapicgen/gensnippets. DO NOT EDIT.
// [START aiplatform_v1beta1_generated_FeaturestoreService_BatchReadFeatureValues_sync]
package main
import (
"context"
aiplatform "cloud.google.com/go/aiplatform/apiv1beta1"
aiplatformpb "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1"
)
func main() {
ctx := context.Background()
c, err := aiplatform.NewFeaturestoreClient(ctx)
if err != nil {
// TODO: Handle error.
}
defer c.Close()
req := &aiplatformpb.BatchReadFeatureValuesRequest{
// TODO: Fill request struct fields.
// See https://pkg.go.dev/google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1#BatchReadFeatureValuesRequest.
}
op, err := c.BatchReadFeatureValues(ctx, req)
if err != nil {
// TODO: Handle error.
}
resp, err := op.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
// [END aiplatform_v1beta1_generated_FeaturestoreService_BatchReadFeatureValues_sync]
| [] | [] | [] | [] | [] | go | null | null | null |
runsc/container/container_test.go | // Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package container
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
"syscall"
"testing"
"time"
"github.com/cenkalti/backoff"
specs "github.com/opencontainers/runtime-spec/specs-go"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bits"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/control"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/test/testutil"
"gvisor.dev/gvisor/pkg/urpc"
"gvisor.dev/gvisor/runsc/boot/platforms"
"gvisor.dev/gvisor/runsc/config"
"gvisor.dev/gvisor/runsc/specutils"
)
// waitForProcessList waits for the given process list to show up in the container.
func waitForProcessList(cont *Container, want []*control.Process) error {
cb := func() error {
got, err := cont.Processes()
if err != nil {
err = fmt.Errorf("error getting process data from container: %v", err)
return &backoff.PermanentError{Err: err}
}
if !procListsEqual(got, want) {
return fmt.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(want))
}
return nil
}
// Gives plenty of time as tests can run slow under --race.
return testutil.Poll(cb, 30*time.Second)
}
func waitForProcessCount(cont *Container, want int) error {
cb := func() error {
pss, err := cont.Processes()
if err != nil {
err = fmt.Errorf("error getting process data from container: %v", err)
return &backoff.PermanentError{Err: err}
}
if got := len(pss); got != want {
log.Infof("Waiting for process count to reach %d. Current: %d", want, got)
return fmt.Errorf("wrong process count, got: %d, want: %d", got, want)
}
return nil
}
// Gives plenty of time as tests can run slow under --race.
return testutil.Poll(cb, 30*time.Second)
}
func blockUntilWaitable(pid int) error {
_, _, err := specutils.RetryEintr(func() (uintptr, uintptr, error) {
var err error
_, _, err1 := syscall.Syscall6(syscall.SYS_WAITID, 1, uintptr(pid), 0, syscall.WEXITED|syscall.WNOWAIT, 0, 0)
if err1 != 0 {
err = err1
}
return 0, 0, err
})
return err
}
// procListsEqual is used to check whether 2 Process lists are equal. Fields
// set to -1 in wants are ignored. Timestamp and threads fields are always
// ignored.
func procListsEqual(gots, wants []*control.Process) bool {
if len(gots) != len(wants) {
return false
}
for i := range gots {
got := gots[i]
want := wants[i]
if want.UID != math.MaxUint32 && want.UID != got.UID {
return false
}
if want.PID != -1 && want.PID != got.PID {
return false
}
if want.PPID != -1 && want.PPID != got.PPID {
return false
}
if len(want.TTY) != 0 && want.TTY != got.TTY {
return false
}
if len(want.Cmd) != 0 && want.Cmd != got.Cmd {
return false
}
}
return true
}
type processBuilder struct {
process control.Process
}
func newProcessBuilder() *processBuilder {
return &processBuilder{
process: control.Process{
UID: math.MaxUint32,
PID: -1,
PPID: -1,
},
}
}
func (p *processBuilder) Cmd(cmd string) *processBuilder {
p.process.Cmd = cmd
return p
}
func (p *processBuilder) PID(pid kernel.ThreadID) *processBuilder {
p.process.PID = pid
return p
}
func (p *processBuilder) PPID(ppid kernel.ThreadID) *processBuilder {
p.process.PPID = ppid
return p
}
func (p *processBuilder) UID(uid auth.KUID) *processBuilder {
p.process.UID = uid
return p
}
func (p *processBuilder) Process() *control.Process {
return &p.process
}
func procListToString(pl []*control.Process) string {
strs := make([]string, 0, len(pl))
for _, p := range pl {
strs = append(strs, fmt.Sprintf("%+v", p))
}
return fmt.Sprintf("[%s]", strings.Join(strs, ","))
}
// createWriteableOutputFile creates an output file that can be read and
// written to in the sandbox.
func createWriteableOutputFile(path string) (*os.File, error) {
outputFile, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
if err != nil {
return nil, fmt.Errorf("error creating file: %q, %v", path, err)
}
// Chmod to allow writing after umask.
if err := outputFile.Chmod(0666); err != nil {
return nil, fmt.Errorf("error chmoding file: %q, %v", path, err)
}
return outputFile, nil
}
func waitForFileNotEmpty(f *os.File) error {
op := func() error {
fi, err := f.Stat()
if err != nil {
return err
}
if fi.Size() == 0 {
return fmt.Errorf("file %q is empty", f.Name())
}
return nil
}
return testutil.Poll(op, 30*time.Second)
}
func waitForFileExist(path string) error {
op := func() error {
if _, err := os.Stat(path); os.IsNotExist(err) {
return err
}
return nil
}
return testutil.Poll(op, 30*time.Second)
}
// readOutputNum reads a file at given filepath and returns the int at the
// requested position.
func readOutputNum(file string, position int) (int, error) {
f, err := os.Open(file)
if err != nil {
return 0, fmt.Errorf("error opening file: %q, %v", file, err)
}
// Ensure that there is content in output file.
if err := waitForFileNotEmpty(f); err != nil {
return 0, fmt.Errorf("error waiting for output file: %v", err)
}
b, err := ioutil.ReadAll(f)
if err != nil {
return 0, fmt.Errorf("error reading file: %v", err)
}
if len(b) == 0 {
return 0, fmt.Errorf("error no content was read")
}
// Strip leading null bytes caused by file offset not being 0 upon restore.
b = bytes.Trim(b, "\x00")
nums := strings.Split(string(b), "\n")
if position >= len(nums) {
return 0, fmt.Errorf("position %v is not within the length of content %v", position, nums)
}
if position == -1 {
// Expectation of newline at the end of last position.
position = len(nums) - 2
}
num, err := strconv.Atoi(nums[position])
if err != nil {
return 0, fmt.Errorf("error getting number from file: %v", err)
}
return num, nil
}
// run starts the sandbox and waits for it to exit, checking that the
// application succeeded.
func run(spec *specs.Spec, conf *config.Config) error {
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
return fmt.Errorf("error setting up container: %v", err)
}
defer cleanup()
// Create, start and wait for the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
Attached: true,
}
ws, err := Run(conf, args)
if err != nil {
return fmt.Errorf("running container: %v", err)
}
if !ws.Exited() || ws.ExitStatus() != 0 {
return fmt.Errorf("container failed, waitStatus: %v", ws)
}
return nil
}
type configOption int
const (
overlay configOption = iota
ptrace
kvm
nonExclusiveFS
)
var (
noOverlay = append(platformOptions, nonExclusiveFS)
all = append(noOverlay, overlay)
)
// configs generates different configurations to run tests.
func configs(t *testing.T, opts ...configOption) map[string]*config.Config {
// Always load the default config.
cs := make(map[string]*config.Config)
testutil.TestConfig(t)
for _, o := range opts {
c := testutil.TestConfig(t)
switch o {
case overlay:
c.Overlay = true
cs["overlay"] = c
case ptrace:
c.Platform = platforms.Ptrace
cs["ptrace"] = c
case kvm:
c.Platform = platforms.KVM
cs["kvm"] = c
case nonExclusiveFS:
c.FileAccess = config.FileAccessShared
cs["non-exclusive"] = c
default:
panic(fmt.Sprintf("unknown config option %v", o))
}
}
return cs
}
func configsWithVFS2(t *testing.T, opts ...configOption) map[string]*config.Config {
all := configs(t, opts...)
for key, value := range configs(t, opts...) {
value.VFS2 = true
all[key+"VFS2"] = value
}
return all
}
// TestLifecycle tests the basic Create/Start/Signal/Destroy container lifecycle.
// It verifies after each step that the container can be loaded from disk, and
// has the correct status.
func TestLifecycle(t *testing.T) {
// Start the child reaper.
childReaper := &testutil.Reaper{}
childReaper.Start()
defer childReaper.Stop()
for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
// The container will just sleep for a long time. We will kill it before
// it finishes sleeping.
spec := testutil.NewSpecWithArgs("sleep", "100")
rootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// expectedPL lists the expected process state of the container.
expectedPL := []*control.Process{
newProcessBuilder().Cmd("sleep").Process(),
}
// Create the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
// Load the container from disk and check the status.
c, err = Load(rootDir, args.ID)
if err != nil {
t.Fatalf("error loading container: %v", err)
}
if got, want := c.Status, Created; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
// List should return the container id.
ids, err := List(rootDir)
if err != nil {
t.Fatalf("error listing containers: %v", err)
}
if got, want := ids, []string{args.ID}; !reflect.DeepEqual(got, want) {
t.Errorf("container list got %v, want %v", got, want)
}
// Start the container.
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Load the container from disk and check the status.
c, err = Load(rootDir, args.ID)
if err != nil {
t.Fatalf("error loading container: %v", err)
}
if got, want := c.Status, Running; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
// Verify that "sleep 100" is running.
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Wait on the container.
ch := make(chan error)
go func() {
ws, err := c.Wait()
if err != nil {
ch <- err
}
if got, want := ws.Signal(), syscall.SIGTERM; got != want {
ch <- fmt.Errorf("got signal %v, want %v", got, want)
}
ch <- nil
}()
// Wait a bit to ensure that we've started waiting on
// the container before we signal.
time.Sleep(time.Second)
// Send the container a SIGTERM which will cause it to stop.
if err := c.SignalContainer(syscall.SIGTERM, false); err != nil {
t.Fatalf("error sending signal %v to container: %v", syscall.SIGTERM, err)
}
// Wait for it to die.
if err := <-ch; err != nil {
t.Fatalf("error waiting for container: %v", err)
}
// Load the container from disk and check the status.
c, err = Load(rootDir, args.ID)
if err != nil {
t.Fatalf("error loading container: %v", err)
}
if got, want := c.Status, Stopped; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
// Destroy the container.
if err := c.Destroy(); err != nil {
t.Fatalf("error destroying container: %v", err)
}
// List should not return the container id.
ids, err = List(rootDir)
if err != nil {
t.Fatalf("error listing containers: %v", err)
}
if len(ids) != 0 {
t.Errorf("expected container list to be empty, but got %v", ids)
}
// Loading the container by id should fail.
if _, err = Load(rootDir, args.ID); err == nil {
t.Errorf("expected loading destroyed container to fail, but it did not")
}
})
}
}
// Test the we can execute the application with different path formats.
func TestExePath(t *testing.T) {
// Create two directories that will be prepended to PATH.
firstPath, err := ioutil.TempDir(testutil.TmpDir(), "first")
if err != nil {
t.Fatalf("error creating temporary directory: %v", err)
}
defer os.RemoveAll(firstPath)
secondPath, err := ioutil.TempDir(testutil.TmpDir(), "second")
if err != nil {
t.Fatalf("error creating temporary directory: %v", err)
}
defer os.RemoveAll(secondPath)
// Create two minimal executables in the second path, two of which
// will be masked by files in first path.
for _, p := range []string{"unmasked", "masked1", "masked2"} {
path := filepath.Join(secondPath, p)
f, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0777)
if err != nil {
t.Fatalf("error opening path: %v", err)
}
defer f.Close()
if _, err := io.WriteString(f, "#!/bin/true\n"); err != nil {
t.Fatalf("error writing contents: %v", err)
}
}
// Create a non-executable file in the first path which masks a healthy
// executable in the second.
nonExecutable := filepath.Join(firstPath, "masked1")
f2, err := os.OpenFile(nonExecutable, os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
t.Fatalf("error opening file: %v", err)
}
f2.Close()
// Create a non-regular file in the first path which masks a healthy
// executable in the second.
nonRegular := filepath.Join(firstPath, "masked2")
if err := os.Mkdir(nonRegular, 0777); err != nil {
t.Fatalf("error making directory: %v", err)
}
for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
for _, test := range []struct {
path string
success bool
}{
{path: "true", success: true},
{path: "bin/true", success: true},
{path: "/bin/true", success: true},
{path: "thisfiledoesntexit", success: false},
{path: "bin/thisfiledoesntexit", success: false},
{path: "/bin/thisfiledoesntexit", success: false},
{path: "unmasked", success: true},
{path: filepath.Join(firstPath, "unmasked"), success: false},
{path: filepath.Join(secondPath, "unmasked"), success: true},
{path: "masked1", success: true},
{path: filepath.Join(firstPath, "masked1"), success: false},
{path: filepath.Join(secondPath, "masked1"), success: true},
{path: "masked2", success: true},
{path: filepath.Join(firstPath, "masked2"), success: false},
{path: filepath.Join(secondPath, "masked2"), success: true},
} {
t.Run(fmt.Sprintf("path=%s,success=%t", test.path, test.success), func(t *testing.T) {
spec := testutil.NewSpecWithArgs(test.path)
spec.Process.Env = []string{
fmt.Sprintf("PATH=%s:%s:%s", firstPath, secondPath, os.Getenv("PATH")),
}
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("exec: error setting up container: %v", err)
}
defer cleanup()
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
Attached: true,
}
ws, err := Run(conf, args)
if test.success {
if err != nil {
t.Errorf("exec: error running container: %v", err)
}
if ws.ExitStatus() != 0 {
t.Errorf("exec: got exit status %v want %v", ws.ExitStatus(), 0)
}
} else {
if err == nil {
t.Errorf("exec: got: no error, want: error")
}
}
})
}
})
}
}
// Test the we can retrieve the application exit status from the container.
func TestAppExitStatus(t *testing.T) {
doAppExitStatus(t, false)
}
// This is TestAppExitStatus for VFSv2.
func TestAppExitStatusVFS2(t *testing.T) {
doAppExitStatus(t, true)
}
func doAppExitStatus(t *testing.T, vfs2 bool) {
// First container will succeed.
succSpec := testutil.NewSpecWithArgs("true")
conf := testutil.TestConfig(t)
conf.VFS2 = vfs2
_, bundleDir, cleanup, err := testutil.SetupContainer(succSpec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
args := Args{
ID: testutil.RandomContainerID(),
Spec: succSpec,
BundleDir: bundleDir,
Attached: true,
}
ws, err := Run(conf, args)
if err != nil {
t.Fatalf("error running container: %v", err)
}
if ws.ExitStatus() != 0 {
t.Errorf("got exit status %v want %v", ws.ExitStatus(), 0)
}
// Second container exits with non-zero status.
wantStatus := 123
errSpec := testutil.NewSpecWithArgs("bash", "-c", fmt.Sprintf("exit %d", wantStatus))
_, bundleDir2, cleanup2, err := testutil.SetupContainer(errSpec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup2()
args2 := Args{
ID: testutil.RandomContainerID(),
Spec: errSpec,
BundleDir: bundleDir2,
Attached: true,
}
ws, err = Run(conf, args2)
if err != nil {
t.Fatalf("error running container: %v", err)
}
if ws.ExitStatus() != wantStatus {
t.Errorf("got exit status %v want %v", ws.ExitStatus(), wantStatus)
}
}
// TestExec verifies that a container can exec a new program.
func TestExec(t *testing.T) {
for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
dir, err := ioutil.TempDir(testutil.TmpDir(), "exec-test")
if err != nil {
t.Fatalf("error creating temporary directory: %v", err)
}
// Note that some shells may exec the final command in a sequence as
// an optimization. We avoid this here by adding the exit 0.
cmd := fmt.Sprintf("ln -s /bin/true %q/symlink && sleep 100 && exit 0", dir)
spec := testutil.NewSpecWithArgs("sh", "-c", cmd)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Wait until sleep is running to ensure the symlink was created.
expectedPL := []*control.Process{
newProcessBuilder().Cmd("sh").Process(),
newProcessBuilder().Cmd("sleep").Process(),
}
if err := waitForProcessList(cont, expectedPL); err != nil {
t.Fatalf("waitForProcessList: %v", err)
}
for _, tc := range []struct {
name string
args control.ExecArgs
}{
{
name: "complete",
args: control.ExecArgs{
Filename: "/bin/true",
Argv: []string{"/bin/true"},
},
},
{
name: "filename",
args: control.ExecArgs{
Filename: "/bin/true",
},
},
{
name: "argv",
args: control.ExecArgs{
Argv: []string{"/bin/true"},
},
},
{
name: "filename resolution",
args: control.ExecArgs{
Filename: "true",
Envv: []string{"PATH=/bin"},
},
},
{
name: "argv resolution",
args: control.ExecArgs{
Argv: []string{"true"},
Envv: []string{"PATH=/bin"},
},
},
{
name: "argv symlink",
args: control.ExecArgs{
Argv: []string{filepath.Join(dir, "symlink")},
},
},
{
name: "working dir",
args: control.ExecArgs{
Argv: []string{"/bin/sh", "-c", `if [[ "${PWD}" != "/tmp" ]]; then exit 1; fi`},
WorkingDirectory: "/tmp",
},
},
{
name: "user",
args: control.ExecArgs{
Argv: []string{"/bin/sh", "-c", `if [[ "$(id -u)" != "343" ]]; then exit 1; fi`},
KUID: 343,
},
},
{
name: "group",
args: control.ExecArgs{
Argv: []string{"/bin/sh", "-c", `if [[ "$(id -g)" != "343" ]]; then exit 1; fi`},
KGID: 343,
},
},
{
name: "env",
args: control.ExecArgs{
Argv: []string{"/bin/sh", "-c", `if [[ "${FOO}" != "123" ]]; then exit 1; fi`},
Envv: []string{"FOO=123"},
},
},
} {
t.Run(tc.name, func(t *testing.T) {
// t.Parallel()
if ws, err := cont.executeSync(&tc.args); err != nil {
t.Fatalf("executeAsync(%+v): %v", tc.args, err)
} else if ws != 0 {
t.Fatalf("executeAsync(%+v) failed with exit: %v", tc.args, ws)
}
})
}
})
}
}
// TestExecProcList verifies that a container can exec a new program and it
// shows correcly in the process list.
func TestExecProcList(t *testing.T) {
for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
const uid = 343
spec := testutil.NewSpecWithArgs("sleep", "100")
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
execArgs := &control.ExecArgs{
Filename: "/bin/sleep",
Argv: []string{"/bin/sleep", "5"},
WorkingDirectory: "/",
KUID: uid,
}
// Verify that "sleep 100" and "sleep 5" are running after exec. First,
// start running exec (which blocks).
ch := make(chan error)
go func() {
exitStatus, err := cont.executeSync(execArgs)
if err != nil {
ch <- err
} else if exitStatus != 0 {
ch <- fmt.Errorf("failed with exit status: %v", exitStatus)
} else {
ch <- nil
}
}()
// expectedPL lists the expected process state of the container.
expectedPL := []*control.Process{
newProcessBuilder().PID(1).PPID(0).Cmd("sleep").UID(0).Process(),
newProcessBuilder().PID(2).PPID(0).Cmd("sleep").UID(uid).Process(),
}
if err := waitForProcessList(cont, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
// Ensure that exec finished without error.
select {
case <-time.After(10 * time.Second):
t.Fatalf("container timed out waiting for exec to finish.")
case err := <-ch:
if err != nil {
t.Errorf("container failed to exec %v: %v", args, err)
}
}
})
}
}
// TestKillPid verifies that we can signal individual exec'd processes.
func TestKillPid(t *testing.T) {
for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
app, err := testutil.FindFile("test/cmd/test_app/test_app")
if err != nil {
t.Fatal("error finding test_app:", err)
}
const nProcs = 4
spec := testutil.NewSpecWithArgs(app, "task-tree", "--depth", strconv.Itoa(nProcs-1), "--width=1", "--pause=true")
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Verify that all processes are running.
if err := waitForProcessCount(cont, nProcs); err != nil {
t.Fatalf("timed out waiting for processes to start: %v", err)
}
// Kill the child process with the largest PID.
procs, err := cont.Processes()
if err != nil {
t.Fatalf("failed to get process list: %v", err)
}
var pid int32
for _, p := range procs {
if pid < int32(p.PID) {
pid = int32(p.PID)
}
}
if err := cont.SignalProcess(syscall.SIGKILL, pid); err != nil {
t.Fatalf("failed to signal process %d: %v", pid, err)
}
// Verify that one process is gone.
if err := waitForProcessCount(cont, nProcs-1); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
procs, err = cont.Processes()
if err != nil {
t.Fatalf("failed to get process list: %v", err)
}
for _, p := range procs {
if pid == int32(p.PID) {
t.Fatalf("pid %d is still alive, which should be killed", pid)
}
}
})
}
}
// TestCheckpointRestore creates a container that continuously writes successive integers
// to a file. To test checkpoint and restore functionality, the container is
// checkpointed and the last number printed to the file is recorded. Then, it is restored in two
// new containers and the first number printed from these containers is checked. Both should
// be the next consecutive number after the last number from the checkpointed container.
func TestCheckpointRestore(t *testing.T) {
// Skip overlay because test requires writing to host file.
for name, conf := range configs(t, noOverlay...) {
t.Run(name, func(t *testing.T) {
dir, err := ioutil.TempDir(testutil.TmpDir(), "checkpoint-test")
if err != nil {
t.Fatalf("ioutil.TempDir failed: %v", err)
}
defer os.RemoveAll(dir)
if err := os.Chmod(dir, 0777); err != nil {
t.Fatalf("error chmoding file: %q, %v", dir, err)
}
outputPath := filepath.Join(dir, "output")
outputFile, err := createWriteableOutputFile(outputPath)
if err != nil {
t.Fatalf("error creating output file: %v", err)
}
defer outputFile.Close()
script := fmt.Sprintf("for ((i=0; ;i++)); do echo $i >> %q; sleep 1; done", outputPath)
spec := testutil.NewSpecWithArgs("bash", "-c", script)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Set the image path, which is where the checkpoint image will be saved.
imagePath := filepath.Join(dir, "test-image-file")
// Create the image file and open for writing.
file, err := os.OpenFile(imagePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644)
if err != nil {
t.Fatalf("error opening new file at imagePath: %v", err)
}
defer file.Close()
// Wait until application has ran.
if err := waitForFileNotEmpty(outputFile); err != nil {
t.Fatalf("Failed to wait for output file: %v", err)
}
// Checkpoint running container; save state into new file.
if err := cont.Checkpoint(file); err != nil {
t.Fatalf("error checkpointing container to empty file: %v", err)
}
defer os.RemoveAll(imagePath)
lastNum, err := readOutputNum(outputPath, -1)
if err != nil {
t.Fatalf("error with outputFile: %v", err)
}
// Delete and recreate file before restoring.
if err := os.Remove(outputPath); err != nil {
t.Fatalf("error removing file")
}
outputFile2, err := createWriteableOutputFile(outputPath)
if err != nil {
t.Fatalf("error creating output file: %v", err)
}
defer outputFile2.Close()
// Restore into a new container.
args2 := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
cont2, err := New(conf, args2)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont2.Destroy()
if err := cont2.Restore(spec, conf, imagePath); err != nil {
t.Fatalf("error restoring container: %v", err)
}
// Wait until application has ran.
if err := waitForFileNotEmpty(outputFile2); err != nil {
t.Fatalf("Failed to wait for output file: %v", err)
}
firstNum, err := readOutputNum(outputPath, 0)
if err != nil {
t.Fatalf("error with outputFile: %v", err)
}
// Check that lastNum is one less than firstNum and that the container picks
// up from where it left off.
if lastNum+1 != firstNum {
t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum)
}
cont2.Destroy()
// Restore into another container!
// Delete and recreate file before restoring.
if err := os.Remove(outputPath); err != nil {
t.Fatalf("error removing file")
}
outputFile3, err := createWriteableOutputFile(outputPath)
if err != nil {
t.Fatalf("error creating output file: %v", err)
}
defer outputFile3.Close()
// Restore into a new container.
args3 := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
cont3, err := New(conf, args3)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont3.Destroy()
if err := cont3.Restore(spec, conf, imagePath); err != nil {
t.Fatalf("error restoring container: %v", err)
}
// Wait until application has ran.
if err := waitForFileNotEmpty(outputFile3); err != nil {
t.Fatalf("Failed to wait for output file: %v", err)
}
firstNum2, err := readOutputNum(outputPath, 0)
if err != nil {
t.Fatalf("error with outputFile: %v", err)
}
// Check that lastNum is one less than firstNum and that the container picks
// up from where it left off.
if lastNum+1 != firstNum2 {
t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum2)
}
cont3.Destroy()
})
}
}
// TestUnixDomainSockets checks that Checkpoint/Restore works in cases
// with filesystem Unix Domain Socket use.
func TestUnixDomainSockets(t *testing.T) {
// Skip overlay because test requires writing to host file.
for name, conf := range configs(t, noOverlay...) {
t.Run(name, func(t *testing.T) {
// UDS path is limited to 108 chars for compatibility with older systems.
// Use '/tmp' (instead of testutil.TmpDir) to ensure the size limit is
// not exceeded. Assumes '/tmp' exists in the system.
dir, err := ioutil.TempDir("/tmp", "uds-test")
if err != nil {
t.Fatalf("ioutil.TempDir failed: %v", err)
}
defer os.RemoveAll(dir)
outputPath := filepath.Join(dir, "uds_output")
outputFile, err := os.OpenFile(outputPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
if err != nil {
t.Fatalf("error creating output file: %v", err)
}
defer outputFile.Close()
app, err := testutil.FindFile("test/cmd/test_app/test_app")
if err != nil {
t.Fatal("error finding test_app:", err)
}
socketPath := filepath.Join(dir, "uds_socket")
defer os.Remove(socketPath)
spec := testutil.NewSpecWithArgs(app, "uds", "--file", outputPath, "--socket", socketPath)
spec.Process.User = specs.User{
UID: uint32(os.Getuid()),
GID: uint32(os.Getgid()),
}
spec.Mounts = []specs.Mount{{
Type: "bind",
Destination: dir,
Source: dir,
}}
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Set the image path, the location where the checkpoint image will be saved.
imagePath := filepath.Join(dir, "test-image-file")
// Create the image file and open for writing.
file, err := os.OpenFile(imagePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644)
if err != nil {
t.Fatalf("error opening new file at imagePath: %v", err)
}
defer file.Close()
defer os.RemoveAll(imagePath)
// Wait until application has ran.
if err := waitForFileNotEmpty(outputFile); err != nil {
t.Fatalf("Failed to wait for output file: %v", err)
}
// Checkpoint running container; save state into new file.
if err := cont.Checkpoint(file); err != nil {
t.Fatalf("error checkpointing container to empty file: %v", err)
}
// Read last number outputted before checkpoint.
lastNum, err := readOutputNum(outputPath, -1)
if err != nil {
t.Fatalf("error with outputFile: %v", err)
}
// Delete and recreate file before restoring.
if err := os.Remove(outputPath); err != nil {
t.Fatalf("error removing file")
}
outputFile2, err := os.OpenFile(outputPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
if err != nil {
t.Fatalf("error creating output file: %v", err)
}
defer outputFile2.Close()
// Restore into a new container.
argsRestore := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
contRestore, err := New(conf, argsRestore)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer contRestore.Destroy()
if err := contRestore.Restore(spec, conf, imagePath); err != nil {
t.Fatalf("error restoring container: %v", err)
}
// Wait until application has ran.
if err := waitForFileNotEmpty(outputFile2); err != nil {
t.Fatalf("Failed to wait for output file: %v", err)
}
// Read first number outputted after restore.
firstNum, err := readOutputNum(outputPath, 0)
if err != nil {
t.Fatalf("error with outputFile: %v", err)
}
// Check that lastNum is one less than firstNum.
if lastNum+1 != firstNum {
t.Errorf("error numbers not consecutive, previous: %d, next: %d", lastNum, firstNum)
}
contRestore.Destroy()
})
}
}
// TestPauseResume tests that we can successfully pause and resume a container.
// The container will keep touching a file to indicate it's running. The test
// pauses the container, removes the file, and checks that it doesn't get
// recreated. Then it resumes the container, verify that the file gets created
// again.
func TestPauseResume(t *testing.T) {
for name, conf := range configs(t, noOverlay...) {
t.Run(name, func(t *testing.T) {
tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "lock")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
running := path.Join(tmpDir, "running")
script := fmt.Sprintf("while [[ true ]]; do touch %q; sleep 0.1; done", running)
spec := testutil.NewSpecWithArgs("/bin/bash", "-c", script)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Wait until container starts running, observed by the existence of running
// file.
if err := waitForFileExist(running); err != nil {
t.Errorf("error waiting for container to start: %v", err)
}
// Pause the running container.
if err := cont.Pause(); err != nil {
t.Errorf("error pausing container: %v", err)
}
if got, want := cont.Status, Paused; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
if err := os.Remove(running); err != nil {
t.Fatalf("os.Remove(%q) failed: %v", running, err)
}
// Script touches the file every 100ms. Give a bit a time for it to run to
// catch the case that pause didn't work.
time.Sleep(200 * time.Millisecond)
if _, err := os.Stat(running); !os.IsNotExist(err) {
t.Fatalf("container did not pause: file exist check: %v", err)
}
// Resume the running container.
if err := cont.Resume(); err != nil {
t.Errorf("error pausing container: %v", err)
}
if got, want := cont.Status, Running; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
// Verify that the file is once again created by container.
if err := waitForFileExist(running); err != nil {
t.Fatalf("error resuming container: file exist check: %v", err)
}
})
}
}
// TestPauseResumeStatus makes sure that the statuses are set correctly
// with calls to pause and resume and that pausing and resuming only
// occurs given the correct state.
func TestPauseResumeStatus(t *testing.T) {
spec := testutil.NewSpecWithArgs("sleep", "20")
conf := testutil.TestConfig(t)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Pause the running container.
if err := cont.Pause(); err != nil {
t.Errorf("error pausing container: %v", err)
}
if got, want := cont.Status, Paused; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
// Try to Pause again. Should cause error.
if err := cont.Pause(); err == nil {
t.Errorf("error pausing container that was already paused: %v", err)
}
if got, want := cont.Status, Paused; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
// Resume the running container.
if err := cont.Resume(); err != nil {
t.Errorf("error resuming container: %v", err)
}
if got, want := cont.Status, Running; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
// Try to resume again. Should cause error.
if err := cont.Resume(); err == nil {
t.Errorf("error resuming container already running: %v", err)
}
if got, want := cont.Status, Running; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
}
// TestCapabilities verifies that:
// - Running exec as non-root UID and GID will result in an error (because the
// executable file can't be read).
// - Running exec as non-root with CAP_DAC_OVERRIDE succeeds because it skips
// this check.
func TestCapabilities(t *testing.T) {
// Pick uid/gid different than ours.
uid := auth.KUID(os.Getuid() + 1)
gid := auth.KGID(os.Getgid() + 1)
for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("sleep", "100")
rootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// expectedPL lists the expected process state of the container.
expectedPL := []*control.Process{
newProcessBuilder().Cmd("sleep").Process(),
}
if err := waitForProcessList(cont, expectedPL); err != nil {
t.Fatalf("Failed to wait for sleep to start, err: %v", err)
}
// Create an executable that can't be run with the specified UID:GID.
// This shouldn't be callable within the container until we add the
// CAP_DAC_OVERRIDE capability to skip the access check.
exePath := filepath.Join(rootDir, "exe")
if err := ioutil.WriteFile(exePath, []byte("#!/bin/sh\necho hello"), 0770); err != nil {
t.Fatalf("couldn't create executable: %v", err)
}
defer os.Remove(exePath)
// Need to traverse the intermediate directory.
os.Chmod(rootDir, 0755)
execArgs := &control.ExecArgs{
Filename: exePath,
Argv: []string{exePath},
WorkingDirectory: "/",
KUID: uid,
KGID: gid,
Capabilities: &auth.TaskCapabilities{},
}
// "exe" should fail because we don't have the necessary permissions.
if _, err := cont.executeSync(execArgs); err == nil {
t.Fatalf("container executed without error, but an error was expected")
}
// Now we run with the capability enabled and should succeed.
execArgs.Capabilities = &auth.TaskCapabilities{
EffectiveCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE),
}
// "exe" should not fail this time.
if _, err := cont.executeSync(execArgs); err != nil {
t.Fatalf("container failed to exec %v: %v", args, err)
}
})
}
}
// TestRunNonRoot checks that sandbox can be configured when running as
// non-privileged user.
func TestRunNonRoot(t *testing.T) {
for name, conf := range configsWithVFS2(t, noOverlay...) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("/bin/true")
// Set a random user/group with no access to "blocked" dir.
spec.Process.User.UID = 343
spec.Process.User.GID = 2401
spec.Process.Capabilities = nil
// User running inside container can't list '$TMP/blocked' and would fail to
// mount it.
dir, err := ioutil.TempDir(testutil.TmpDir(), "blocked")
if err != nil {
t.Fatalf("ioutil.TempDir() failed: %v", err)
}
if err := os.Chmod(dir, 0700); err != nil {
t.Fatalf("os.MkDir(%q) failed: %v", dir, err)
}
dir = path.Join(dir, "test")
if err := os.Mkdir(dir, 0755); err != nil {
t.Fatalf("os.MkDir(%q) failed: %v", dir, err)
}
src, err := ioutil.TempDir(testutil.TmpDir(), "src")
if err != nil {
t.Fatalf("ioutil.TempDir() failed: %v", err)
}
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: dir,
Source: src,
Type: "bind",
})
if err := run(spec, conf); err != nil {
t.Fatalf("error running sandbox: %v", err)
}
})
}
}
// TestMountNewDir checks that runsc will create destination directory if it
// doesn't exit.
func TestMountNewDir(t *testing.T) {
for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
root, err := ioutil.TempDir(testutil.TmpDir(), "root")
if err != nil {
t.Fatal("ioutil.TempDir() failed:", err)
}
srcDir := path.Join(root, "src", "dir", "anotherdir")
if err := os.MkdirAll(srcDir, 0755); err != nil {
t.Fatalf("os.MkDir(%q) failed: %v", srcDir, err)
}
mountDir := path.Join(root, "dir", "anotherdir")
spec := testutil.NewSpecWithArgs("/bin/ls", mountDir)
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: mountDir,
Source: srcDir,
Type: "bind",
})
// Extra points for creating the mount with a readonly root.
spec.Root.Readonly = true
if err := run(spec, conf); err != nil {
t.Fatalf("error running sandbox: %v", err)
}
})
}
}
func TestReadonlyRoot(t *testing.T) {
for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("sleep", "100")
spec.Root.Readonly = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Read mounts to check that root is readonly.
out, ws, err := executeCombinedOutput(c, "/bin/sh", "-c", "mount | grep ' / '")
if err != nil || ws != 0 {
t.Fatalf("exec failed, ws: %v, err: %v", ws, err)
}
t.Logf("root mount: %q", out)
if !strings.Contains(string(out), "(ro)") {
t.Errorf("root not mounted readonly: %q", out)
}
// Check that file cannot be created.
ws, err = execute(c, "/bin/touch", "/foo")
if err != nil {
t.Fatalf("touch file in ro mount: %v", err)
}
if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM {
t.Fatalf("wrong waitStatus: %v", ws)
}
})
}
}
func TestReadonlyMount(t *testing.T) {
for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
dir, err := ioutil.TempDir(testutil.TmpDir(), "ro-mount")
if err != nil {
t.Fatalf("ioutil.TempDir() failed: %v", err)
}
spec := testutil.NewSpecWithArgs("sleep", "100")
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: dir,
Source: dir,
Type: "bind",
Options: []string{"ro"},
})
spec.Root.Readonly = false
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Read mounts to check that volume is readonly.
cmd := fmt.Sprintf("mount | grep ' %s '", dir)
out, ws, err := executeCombinedOutput(c, "/bin/sh", "-c", cmd)
if err != nil || ws != 0 {
t.Fatalf("exec failed, ws: %v, err: %v", ws, err)
}
t.Logf("mount: %q", out)
if !strings.Contains(string(out), "(ro)") {
t.Errorf("volume not mounted readonly: %q", out)
}
// Check that file cannot be created.
ws, err = execute(c, "/bin/touch", path.Join(dir, "file"))
if err != nil {
t.Fatalf("touch file in ro mount: %v", err)
}
if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM {
t.Fatalf("wrong WaitStatus: %v", ws)
}
})
}
}
func TestUIDMap(t *testing.T) {
for name, conf := range configsWithVFS2(t, noOverlay...) {
t.Run(name, func(t *testing.T) {
testDir, err := ioutil.TempDir(testutil.TmpDir(), "test-mount")
if err != nil {
t.Fatalf("ioutil.TempDir() failed: %v", err)
}
defer os.RemoveAll(testDir)
testFile := path.Join(testDir, "testfile")
spec := testutil.NewSpecWithArgs("touch", "/tmp/testfile")
uid := os.Getuid()
gid := os.Getgid()
spec.Linux = &specs.Linux{
Namespaces: []specs.LinuxNamespace{
{Type: specs.UserNamespace},
{Type: specs.PIDNamespace},
{Type: specs.MountNamespace},
},
UIDMappings: []specs.LinuxIDMapping{
{
ContainerID: 0,
HostID: uint32(uid),
Size: 1,
},
},
GIDMappings: []specs.LinuxIDMapping{
{
ContainerID: 0,
HostID: uint32(gid),
Size: 1,
},
},
}
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: "/tmp",
Source: testDir,
Type: "bind",
})
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create, start and wait for the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
ws, err := c.Wait()
if err != nil {
t.Fatalf("error waiting on container: %v", err)
}
if !ws.Exited() || ws.ExitStatus() != 0 {
t.Fatalf("container failed, waitStatus: %v", ws)
}
st := syscall.Stat_t{}
if err := syscall.Stat(testFile, &st); err != nil {
t.Fatalf("error stat /testfile: %v", err)
}
if st.Uid != uint32(uid) || st.Gid != uint32(gid) {
t.Fatalf("UID: %d (%d) GID: %d (%d)", st.Uid, uid, st.Gid, gid)
}
})
}
}
// TestAbbreviatedIDs checks that runsc supports using abbreviated container
// IDs in place of full IDs.
func TestAbbreviatedIDs(t *testing.T) {
doAbbreviatedIDsTest(t, false)
}
func TestAbbreviatedIDsVFS2(t *testing.T) {
doAbbreviatedIDsTest(t, true)
}
func doAbbreviatedIDsTest(t *testing.T, vfs2 bool) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
defer cleanup()
conf := testutil.TestConfig(t)
conf.RootDir = rootDir
conf.VFS2 = vfs2
cids := []string{
"foo-" + testutil.RandomContainerID(),
"bar-" + testutil.RandomContainerID(),
"baz-" + testutil.RandomContainerID(),
}
for _, cid := range cids {
spec := testutil.NewSpecWithArgs("sleep", "100")
bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: cid,
Spec: spec,
BundleDir: bundleDir,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
}
// These should all be unambigious.
unambiguous := map[string]string{
"f": cids[0],
cids[0]: cids[0],
"bar": cids[1],
cids[1]: cids[1],
"baz": cids[2],
cids[2]: cids[2],
}
for shortid, longid := range unambiguous {
if _, err := Load(rootDir, shortid); err != nil {
t.Errorf("%q should resolve to %q: %v", shortid, longid, err)
}
}
// These should be ambiguous.
ambiguous := []string{
"b",
"ba",
}
for _, shortid := range ambiguous {
if s, err := Load(rootDir, shortid); err == nil {
t.Errorf("%q should be ambiguous, but resolved to %q", shortid, s.ID)
}
}
}
func TestGoferExits(t *testing.T) {
doGoferExitTest(t, false)
}
func TestGoferExitsVFS2(t *testing.T) {
doGoferExitTest(t, true)
}
func doGoferExitTest(t *testing.T, vfs2 bool) {
spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
conf := testutil.TestConfig(t)
conf.VFS2 = vfs2
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Kill sandbox and expect gofer to exit on its own.
sandboxProc, err := os.FindProcess(c.Sandbox.Pid)
if err != nil {
t.Fatalf("error finding sandbox process: %v", err)
}
if err := sandboxProc.Kill(); err != nil {
t.Fatalf("error killing sandbox process: %v", err)
}
err = blockUntilWaitable(c.GoferPid)
if err != nil && err != syscall.ECHILD {
t.Errorf("error waiting for gofer to exit: %v", err)
}
}
func TestRootNotMount(t *testing.T) {
appSym, err := testutil.FindFile("test/cmd/test_app/test_app")
if err != nil {
t.Fatal("error finding test_app:", err)
}
app, err := filepath.EvalSymlinks(appSym)
if err != nil {
t.Fatalf("error resolving %q symlink: %v", appSym, err)
}
log.Infof("App path %q is a symlink to %q", appSym, app)
static, err := testutil.IsStatic(app)
if err != nil {
t.Fatalf("error reading application binary: %v", err)
}
if !static {
// This happens during race builds; we cannot map in shared
// libraries also, so we need to skip the test.
t.Skip()
}
root := filepath.Dir(app)
exe := "/" + filepath.Base(app)
log.Infof("Executing %q in %q", exe, root)
spec := testutil.NewSpecWithArgs(exe, "help")
spec.Root.Path = root
spec.Root.Readonly = true
spec.Mounts = nil
conf := testutil.TestConfig(t)
if err := run(spec, conf); err != nil {
t.Fatalf("error running sandbox: %v", err)
}
}
func TestUserLog(t *testing.T) {
app, err := testutil.FindFile("test/cmd/test_app/test_app")
if err != nil {
t.Fatal("error finding test_app:", err)
}
// sched_rr_get_interval = 148 - not implemented in gvisor.
spec := testutil.NewSpecWithArgs(app, "syscall", "--syscall=148")
conf := testutil.TestConfig(t)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
dir, err := ioutil.TempDir(testutil.TmpDir(), "user_log_test")
if err != nil {
t.Fatalf("error creating tmp dir: %v", err)
}
userLog := filepath.Join(dir, "user.log")
// Create, start and wait for the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
UserLog: userLog,
Attached: true,
}
ws, err := Run(conf, args)
if err != nil {
t.Fatalf("error running container: %v", err)
}
if !ws.Exited() || ws.ExitStatus() != 0 {
t.Fatalf("container failed, waitStatus: %v", ws)
}
out, err := ioutil.ReadFile(userLog)
if err != nil {
t.Fatalf("error opening user log file %q: %v", userLog, err)
}
if want := "Unsupported syscall sched_rr_get_interval("; !strings.Contains(string(out), want) {
t.Errorf("user log file doesn't contain %q, out: %s", want, string(out))
}
}
func TestWaitOnExitedSandbox(t *testing.T) {
for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
// Run a shell that sleeps for 1 second and then exits with a
// non-zero code.
const wantExit = 17
cmd := fmt.Sprintf("sleep 1; exit %d", wantExit)
spec := testutil.NewSpecWithArgs("/bin/sh", "-c", cmd)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and Start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Wait on the sandbox. This will make an RPC to the sandbox
// and get the actual exit status of the application.
ws, err := c.Wait()
if err != nil {
t.Fatalf("error waiting on container: %v", err)
}
if got := ws.ExitStatus(); got != wantExit {
t.Errorf("got exit status %d, want %d", got, wantExit)
}
// Now the sandbox has exited, but the zombie sandbox process
// still exists. Calling Wait() now will return the sandbox
// exit status.
ws, err = c.Wait()
if err != nil {
t.Fatalf("error waiting on container: %v", err)
}
if got := ws.ExitStatus(); got != wantExit {
t.Errorf("got exit status %d, want %d", got, wantExit)
}
})
}
}
func TestDestroyNotStarted(t *testing.T) {
doDestroyNotStartedTest(t, false)
}
func TestDestroyNotStartedVFS2(t *testing.T) {
doDestroyNotStartedTest(t, true)
}
func doDestroyNotStartedTest(t *testing.T, vfs2 bool) {
spec := testutil.NewSpecWithArgs("/bin/sleep", "100")
conf := testutil.TestConfig(t)
conf.VFS2 = vfs2
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create the container and check that it can be destroyed.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
if err := c.Destroy(); err != nil {
t.Fatalf("deleting non-started container failed: %v", err)
}
}
// TestDestroyStarting attempts to force a race between start and destroy.
func TestDestroyStarting(t *testing.T) {
doDestroyNotStartedTest(t, false)
}
func TestDestroyStartedVFS2(t *testing.T) {
doDestroyNotStartedTest(t, true)
}
func doDestroyStartingTest(t *testing.T, vfs2 bool) {
for i := 0; i < 10; i++ {
spec := testutil.NewSpecWithArgs("/bin/sleep", "100")
conf := testutil.TestConfig(t)
conf.VFS2 = vfs2
rootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create the container and check that it can be destroyed.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
// Container is not thread safe, so load another instance to run in
// concurrently.
startCont, err := Load(rootDir, args.ID)
if err != nil {
t.Fatalf("error loading container: %v", err)
}
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
// Ignore failures, start can fail if destroy runs first.
startCont.Start(conf)
}()
wg.Add(1)
go func() {
defer wg.Done()
if err := c.Destroy(); err != nil {
t.Errorf("deleting non-started container failed: %v", err)
}
}()
wg.Wait()
}
}
func TestCreateWorkingDir(t *testing.T) {
for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "cwd-create")
if err != nil {
t.Fatalf("ioutil.TempDir() failed: %v", err)
}
dir := path.Join(tmpDir, "new/working/dir")
// touch will fail if the directory doesn't exist.
spec := testutil.NewSpecWithArgs("/bin/touch", path.Join(dir, "file"))
spec.Process.Cwd = dir
spec.Root.Readonly = true
if err := run(spec, conf); err != nil {
t.Fatalf("Error running container: %v", err)
}
})
}
}
// TestMountPropagation verifies that mount propagates to slave but not to
// private mounts.
func TestMountPropagation(t *testing.T) {
// Setup dir structure:
// - src: is mounted as shared and is used as source for both private and
// slave mounts
// - dir: will be bind mounted inside src and should propagate to slave
tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "mount")
if err != nil {
t.Fatalf("ioutil.TempDir() failed: %v", err)
}
src := filepath.Join(tmpDir, "src")
srcMnt := filepath.Join(src, "mnt")
dir := filepath.Join(tmpDir, "dir")
for _, path := range []string{src, srcMnt, dir} {
if err := os.MkdirAll(path, 0777); err != nil {
t.Fatalf("MkdirAll(%q): %v", path, err)
}
}
dirFile := filepath.Join(dir, "file")
f, err := os.Create(dirFile)
if err != nil {
t.Fatalf("os.Create(%q): %v", dirFile, err)
}
f.Close()
// Setup src as a shared mount.
if err := syscall.Mount(src, src, "bind", syscall.MS_BIND, ""); err != nil {
t.Fatalf("mount(%q, %q, MS_BIND): %v", dir, srcMnt, err)
}
if err := syscall.Mount("", src, "", syscall.MS_SHARED, ""); err != nil {
t.Fatalf("mount(%q, MS_SHARED): %v", srcMnt, err)
}
spec := testutil.NewSpecWithArgs("sleep", "1000")
priv := filepath.Join(tmpDir, "priv")
slave := filepath.Join(tmpDir, "slave")
spec.Mounts = []specs.Mount{
{
Source: src,
Destination: priv,
Type: "bind",
Options: []string{"private"},
},
{
Source: src,
Destination: slave,
Type: "bind",
Options: []string{"slave"},
},
}
conf := testutil.TestConfig(t)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("starting container: %v", err)
}
// After the container is started, mount dir inside source and check what
// happens to both destinations.
if err := syscall.Mount(dir, srcMnt, "bind", syscall.MS_BIND, ""); err != nil {
t.Fatalf("mount(%q, %q, MS_BIND): %v", dir, srcMnt, err)
}
// Check that mount didn't propagate to private mount.
privFile := filepath.Join(priv, "mnt", "file")
if ws, err := execute(cont, "/usr/bin/test", "!", "-f", privFile); err != nil || ws != 0 {
t.Fatalf("exec: test ! -f %q, ws: %v, err: %v", privFile, ws, err)
}
// Check that mount propagated to slave mount.
slaveFile := filepath.Join(slave, "mnt", "file")
if ws, err := execute(cont, "/usr/bin/test", "-f", slaveFile); err != nil || ws != 0 {
t.Fatalf("exec: test -f %q, ws: %v, err: %v", privFile, ws, err)
}
}
func TestMountSymlink(t *testing.T) {
for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
dir, err := ioutil.TempDir(testutil.TmpDir(), "mount-symlink")
if err != nil {
t.Fatalf("ioutil.TempDir() failed: %v", err)
}
defer os.RemoveAll(dir)
source := path.Join(dir, "source")
target := path.Join(dir, "target")
for _, path := range []string{source, target} {
if err := os.MkdirAll(path, 0777); err != nil {
t.Fatalf("os.MkdirAll(): %v", err)
}
}
f, err := os.Create(path.Join(source, "file"))
if err != nil {
t.Fatalf("os.Create(): %v", err)
}
f.Close()
link := path.Join(dir, "link")
if err := os.Symlink(target, link); err != nil {
t.Fatalf("os.Symlink(%q, %q): %v", target, link, err)
}
spec := testutil.NewSpecWithArgs("/bin/sleep", "1000")
// Mount to a symlink to ensure the mount code will follow it and mount
// at the symlink target.
spec.Mounts = append(spec.Mounts, specs.Mount{
Type: "bind",
Destination: link,
Source: source,
})
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("starting container: %v", err)
}
// Check that symlink was resolved and mount was created where the symlink
// is pointing to.
file := path.Join(target, "file")
if ws, err := execute(cont, "/usr/bin/test", "-f", file); err != nil || ws != 0 {
t.Fatalf("exec: test -f %q, ws: %v, err: %v", file, ws, err)
}
})
}
}
// Check that --net-raw disables the CAP_NET_RAW capability.
func TestNetRaw(t *testing.T) {
capNetRaw := strconv.FormatUint(bits.MaskOf64(int(linux.CAP_NET_RAW)), 10)
app, err := testutil.FindFile("test/cmd/test_app/test_app")
if err != nil {
t.Fatal("error finding test_app:", err)
}
for _, enableRaw := range []bool{true, false} {
conf := testutil.TestConfig(t)
conf.EnableRaw = enableRaw
test := "--enabled"
if !enableRaw {
test = "--disabled"
}
spec := testutil.NewSpecWithArgs(app, "capability", test, capNetRaw)
if err := run(spec, conf); err != nil {
t.Fatalf("Error running container: %v", err)
}
}
}
// TestTTYField checks TTY field returned by container.Processes().
func TestTTYField(t *testing.T) {
stop := testutil.StartReaper()
defer stop()
testApp, err := testutil.FindFile("test/cmd/test_app/test_app")
if err != nil {
t.Fatal("error finding test_app:", err)
}
testCases := []struct {
name string
useTTY bool
wantTTYField string
}{
{
name: "no tty",
useTTY: false,
wantTTYField: "?",
},
{
name: "tty used",
useTTY: true,
wantTTYField: "pts/0",
},
}
for _, test := range testCases {
for _, vfs2 := range []bool{false, true} {
name := test.name
if vfs2 {
name += "-vfs2"
}
t.Run(name, func(t *testing.T) {
conf := testutil.TestConfig(t)
conf.VFS2 = vfs2
// We will run /bin/sleep, possibly with an open TTY.
cmd := []string{"/bin/sleep", "10000"}
if test.useTTY {
// Run inside the "pty-runner".
cmd = append([]string{testApp, "pty-runner"}, cmd...)
}
spec := testutil.NewSpecWithArgs(cmd...)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Wait for sleep to be running, and check the TTY
// field.
var gotTTYField string
cb := func() error {
ps, err := c.Processes()
if err != nil {
err = fmt.Errorf("error getting process data from container: %v", err)
return &backoff.PermanentError{Err: err}
}
for _, p := range ps {
if strings.Contains(p.Cmd, "sleep") {
gotTTYField = p.TTY
return nil
}
}
return fmt.Errorf("sleep not running")
}
if err := testutil.Poll(cb, 30*time.Second); err != nil {
t.Fatalf("error waiting for sleep process: %v", err)
}
if gotTTYField != test.wantTTYField {
t.Errorf("tty field got %q, want %q", gotTTYField, test.wantTTYField)
}
})
}
}
}
func execute(cont *Container, name string, arg ...string) (syscall.WaitStatus, error) {
args := &control.ExecArgs{
Filename: name,
Argv: append([]string{name}, arg...),
}
return cont.executeSync(args)
}
func executeCombinedOutput(cont *Container, name string, arg ...string) ([]byte, syscall.WaitStatus, error) {
r, w, err := os.Pipe()
if err != nil {
return nil, 0, err
}
defer r.Close()
args := &control.ExecArgs{
Filename: name,
Argv: append([]string{name}, arg...),
FilePayload: urpc.FilePayload{Files: []*os.File{os.Stdin, w, w}},
}
ws, err := cont.executeSync(args)
w.Close()
if err != nil {
return nil, 0, err
}
out, err := ioutil.ReadAll(r)
return out, ws, err
}
// executeSync synchronously executes a new process.
func (cont *Container) executeSync(args *control.ExecArgs) (syscall.WaitStatus, error) {
pid, err := cont.Execute(args)
if err != nil {
return 0, fmt.Errorf("error executing: %v", err)
}
ws, err := cont.WaitPID(pid)
if err != nil {
return 0, fmt.Errorf("error waiting: %v", err)
}
return ws, nil
}
func TestMain(m *testing.M) {
log.SetLevel(log.Debug)
flag.Parse()
if err := testutil.ConfigureExePath(); err != nil {
panic(err.Error())
}
specutils.MaybeRunAsRoot()
os.Exit(m.Run())
}
| [
"\"PATH\""
] | [] | [
"PATH"
] | [] | ["PATH"] | go | 1 | 0 | |
src/main/java/tk/ungeschickt/main/Info.java | package tk.ungeschickt.main;
import net.dv8tion.jda.api.entities.TextChannel;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.nio.file.FileSystems;
public class Info {
public void setPrefix(String prefix) {
logger.trace("Set var prefix to " + prefix);
this.prefix = prefix;
}
private static final Logger logger = LoggerFactory.getLogger(Info.class);
private String prefix;
public String getBotToken() {
return botToken;
}
private final String botToken;
private final String websiteUsername;
private final String websitePassword;
public TextChannel getDebugChannel() {
return debugChannel;
}
public void setDebugChannel(TextChannel debugChannel) {
logger.trace("Set TextChannel debugChannel to " + prefix);
this.debugChannel = debugChannel;
}
private TextChannel debugChannel;
public boolean isDebug() {
return debug;
}
public void setDebug(boolean debug) {
logger.trace("Set flag debug to " + debug);
this.debug = debug;
}
private boolean debug = true;
public static Info getInstance() throws ParseException, IOException {
if (instance == null) {
return new Info();
} else return instance;
}
private static void setInstance(Info instance) {
logger.trace("Set Info instance");
Info.instance = instance;
}
private static Info instance;
private Info() throws IOException, ParseException {
String botToken1 = System.getenv("botToken");
String websiteUsername1 = System.getenv("webUsername");
String websitePassword1 = System.getenv("webPassword");
logger.trace("Try to get credentials with environments vars");
boolean botToken1Empty = botToken1 == null;
if (botToken1 != null) botToken1Empty = botToken1.isEmpty();
boolean webUsername1Empty = websiteUsername1 == null;
if (websiteUsername1 != null)
webUsername1Empty = websiteUsername1.isEmpty();
boolean webPassword1Empty = websitePassword1 == null;
if (websitePassword1 != null)
webPassword1Empty = websitePassword1.isEmpty();
if (botToken1Empty || webUsername1Empty || webPassword1Empty) {
logger.trace("Some Environment vars are empty");
File file = new File(String.valueOf(FileSystems.getDefault().getPath("secrets.json")));
if (file.exists()) {
JSONParser jsonParser = new JSONParser();
Object obj = jsonParser.parse(new FileReader(file));
JSONObject jsonObject = (JSONObject) obj;
logger.trace("Created JSONObject");
if (botToken1Empty)
botToken1 = (String) jsonObject.get("botToken");
if (webUsername1Empty)
websiteUsername1 = (String) jsonObject.get("webUsername");
if (webPassword1Empty)
websitePassword1 = (String) jsonObject.get("webPassword");
} else
throw new FileNotFoundException("secrets.json not found.");
}
assert botToken1 != null;
assert websiteUsername1 != null;
assert websitePassword1 != null;
if (botToken1.equals("") || websiteUsername1.equals("") || websitePassword1.equals(""))
throw new RuntimeException("Secrets are empty. Please write those in secret.json.");
this.botToken = botToken1;
this.websiteUsername = websiteUsername1;
this.websitePassword = websitePassword1;
logger.info("Successfully acquired credentials for the bot!");
setInstance(this);
logger.trace("Set Instance Info");
}
public String getPrefix() {
return prefix;
}
public String getWebsiteUsername() {
return websiteUsername;
}
public String getWebsitePassword() {
return websitePassword;
}
}
| [
"\"botToken\"",
"\"webUsername\"",
"\"webPassword\""
] | [] | [
"webPassword",
"webUsername",
"botToken"
] | [] | ["webPassword", "webUsername", "botToken"] | java | 3 | 0 | |
cmd/appsctl/mattermost.go | // main handles deployment of the plugin to a development server using the Client4 API.
package main
import (
"net/http"
"os"
"strings"
"github.com/pkg/errors"
"github.com/mattermost/mattermost-plugin-apps/apps"
"github.com/mattermost/mattermost-plugin-apps/apps/appclient"
"github.com/mattermost/mattermost-plugin-apps/utils/httputils"
)
func getMattermostClient() (*appclient.Client, error) {
siteURL := os.Getenv("MM_SERVICESETTINGS_SITEURL")
adminToken := os.Getenv("MM_ADMIN_TOKEN")
if siteURL == "" || adminToken == "" {
return nil, errors.New("MM_SERVICESETTINGS_SITEURL and MM_ADMIN_TOKEN must be set")
}
return appclient.NewClient("", adminToken, siteURL), nil
}
func updateMattermost(m apps.Manifest, deployType apps.DeployType, installApp bool) error {
appClient, err := getMattermostClient()
if err != nil {
return err
}
// Update the listed app manifest and append the new deployment type if it's
// not already listed.
_, err = appClient.UpdateAppListing(appclient.UpdateAppListingRequest{
Manifest: m,
AddDeploys: apps.DeployTypes{deployType},
})
if err != nil {
return errors.Wrap(err, "failed to add local manifest to Mattermost")
}
log.Debugw("updated local manifest", "app_id", m.AppID, "deploy_type", deployType)
if installApp {
_, err = appClient.InstallApp(m.AppID, deployType)
if err != nil {
return errors.Wrap(err, "failed to install the app to Mattermost")
}
log.Debugw("installed app to Mattermost", "app_id", m.AppID)
}
return nil
}
func installPlugin(bundlePath string) (*apps.Manifest, error) {
appClient, err := getMattermostClient()
if err != nil {
return nil, err
}
f, err := os.Open(bundlePath)
if err != nil {
return nil, errors.Wrap(err, "failed to open the plugin bundle")
}
defer f.Close()
pluginManifest, _, err := appClient.UploadPluginForced(f)
if err != nil {
return nil, errors.Wrap(err, "failed to upload the plugin to Mattermost")
}
_, err = appClient.EnablePlugin(pluginManifest.Id)
if err != nil {
return nil, errors.Wrap(err, "failed to enable plugin on Mattermost")
}
manifestPath := strings.Join([]string{
appClient.Client4.URL,
"plugins",
pluginManifest.Id,
apps.PluginAppPath,
"manifest.json",
}, "/")
resp, err := appClient.Client4.HTTPClient.Get(manifestPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to get the app manifest %s", manifestPath)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, errors.Errorf("failed to get the app manifest %s: status %v", manifestPath, resp.Status)
}
data, err := httputils.LimitReadAll(resp.Body, apps.MaxManifestSize)
if err != nil {
return nil, errors.Wrap(err, "failed to get the app manifest")
}
m, err := apps.DecodeCompatibleManifest(data)
if err != nil {
return nil, errors.Wrap(err, "failed to parse the app manifest")
}
return m, nil
}
| [
"\"MM_SERVICESETTINGS_SITEURL\"",
"\"MM_ADMIN_TOKEN\""
] | [] | [
"MM_SERVICESETTINGS_SITEURL",
"MM_ADMIN_TOKEN"
] | [] | ["MM_SERVICESETTINGS_SITEURL", "MM_ADMIN_TOKEN"] | go | 2 | 0 | |
zerver/migrations/0038_realm_change_to_community_defaults.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0037_disallow_null_string_id'),
]
operations = [
migrations.AlterField(
model_name='realm',
name='invite_required',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='realm',
name='org_type',
field=models.PositiveSmallIntegerField(default=2),
),
migrations.AlterField(
model_name='realm',
name='restricted_to_domain',
field=models.BooleanField(default=False),
),
]
| [] | [] | [] | [] | [] | python | null | null | null |
CI/trebuchet-release-pipeline/lambda_handle_trebuchet_release_notification.py | # Whenever you make any change here, you should update it in Amazon S3.
# This function serves as glue between SNS and S3.
# 1- Receives SNS message when Trebuchet release starts
# 2- Extracts the message (which should be JSON)
# 3- Writes the JSON to a file on disk
# 4- Downloads models with the presigned URL
# 5- Writes release notes to a file
# 6- Writes release id to a file
# 7- Upload all these files as a zip file to S3
import os
import shutil
import re
import json
import zipfile
import traceback
import boto3
from botocore.vendored import requests
S3_BUCKET_NAME = os.environ['S3_BUCKET_NAME']
RELEASE_MESSAGE_FILENAME = os.environ['RELEASE_MESSAGE_FILENAME']
RELEASE_ID_FILENAME = os.environ['RELEASE_ID_FILENAME']
RELEASE_NOTES_FILENAME = os.environ['RELEASE_NOTES_FILENAME']
PIPELINE_SOURCE = os.environ['PIPELINE_SOURCE']
UPDATE_STATUS_LAMBDA_FUNCTION_NAME = os.environ['UPDATE_STATUS_LAMBDA_FUNCTION_NAME']
OUTPUT_PATH = os.path.join('/tmp', 'output')
MODELS_OUTPUT_PATH = os.path.join(OUTPUT_PATH, 'models')
s3Resource = boto3.resource('s3', region_name = os.environ['AWS_REGION'])
lambdaClient = boto3.client('lambda', region_name = os.environ['AWS_REGION'])
updateStatusMessage = {
'stageName': 'HandleTrebuchetReleaseNotification',
'internalMessage': '',
'internalOnly': False,
'messageToTrebuchet': {
'releaseId' : '',
'language' : 'CPP',
'releaseState' : '',
'statusMessage' : ''
}
}
def lambda_handler(event, context):
try:
releaseMessage = json.loads(event['Records'][0]['Sns']['Message'])
# For local testing:
# with open(RELEASE_MESSAGE_FILENAME, 'r') as releaseMessageFile:
# releaseMessage = json.loads(releaseMessageFile.read())
print('[SNS] Receiving message from Trebuchet:', end = ' ')
print(releaseMessage)
if os.path.isdir(OUTPUT_PATH):
shutil.rmtree(OUTPUT_PATH)
os.mkdir(OUTPUT_PATH)
os.mkdir(MODELS_OUTPUT_PATH)
with open(os.path.join(OUTPUT_PATH, RELEASE_MESSAGE_FILENAME), 'w') as releaseMessageFile:
releaseMessageFile.write(json.dumps(releaseMessage))
releaseMessageFile.close()
with open(os.path.join(OUTPUT_PATH, RELEASE_ID_FILENAME), 'w') as releaseIdFile:
releaseIdFile.write(releaseMessage['release']['id'])
with open(os.path.join(OUTPUT_PATH, RELEASE_NOTES_FILENAME), 'w') as releaseNotesFile:
releaseNotesFile.write('')
updateStatusMessage['messageToTrebuchet'] = {
'releaseId' : releaseMessage['release']['id'],
'language' : 'CPP',
'releaseState' : 'InProgress',
'statusMessage' : 'Step 0 of 4. Handling release notification from Trebuchet.'
}
updateStatus(updateStatusMessage)
for feature in releaseMessage['release']['features']:
print('Downloading c2j model files for ' + feature['serviceId'])
response = requests.get(feature['c2jModels'])
if response.status_code != 200:
raise Exception('Error downloading c2j model with feature: ' + feature['featureArn'])
with open(os.path.join('/tmp', 'models.tmp.zip'), 'wb') as c2jModelsZipFile:
c2jModelsZipFile.write(response.content)
archive = zipfile.ZipFile(os.path.join('/tmp', 'models.tmp.zip'), 'r')
archive.debug = 3
for info in archive.infolist():
print(' ' + info.filename)
if re.match(r'output/.*\.normal\.json', info.filename):
outputPath = os.path.join(MODELS_OUTPUT_PATH, os.path.basename(info.filename))
print('* copying {0} to {1}'.format(info.filename, outputPath))
fileHandle = archive.open(info.filename, 'r')
fileOutput = fileHandle.read()
with open(outputPath, 'wb') as destination:
destination.write(fileOutput)
fileHandle.close()
releaseNotes = feature['releaseNotes']
print('Append release notes for ' + feature['serviceId'])
with open(os.path.join(OUTPUT_PATH, RELEASE_NOTES_FILENAME), 'a') as releaseNotesFile:
releaseNotesFile.write(releaseNotes + '\n\n')
updateStatusMessage['messageToTrebuchet']['statusMessage'] = 'Step 0 of 4. Handled release notification from Trebuchet.'
updateStatus(updateStatusMessage)
print('Archiving release-message, release-id, release-notes, and models directory into a zip file.')
shutil.make_archive('/tmp/models', 'zip', OUTPUT_PATH)
print('[S3] Sending zip file including json file to S3://{0}/{1}.'.format(S3_BUCKET_NAME, PIPELINE_SOURCE))
response = s3Resource.meta.client.upload_file('/tmp/models.zip', S3_BUCKET_NAME, PIPELINE_SOURCE)
print('Response:', end = ' ')
print(response)
except Exception:
traceback.print_exc()
updateStatusMessage['internalMessage'] = traceback.format_exc()
updateStatusMessage['messageToTrebuchet']['releaseState'] = 'Blocked'
updateStatusMessage['messageToTrebuchet']['statusMessage'] = 'Step 0 of 4. Failed to handle release notification from Trebuchet.'
updateStatus(updateStatusMessage)
def updateStatus(releaseStatus):
print('[Lambda] Triggering Lambda function to update status.')
response = lambdaClient.invoke(
FunctionName = UPDATE_STATUS_LAMBDA_FUNCTION_NAME,
InvocationType = 'RequestResponse',
Payload = json.dumps(releaseStatus)
)
print('Response:', end = ' ')
print(response)
# lambda_handler('', '') | [] | [] | [
"AWS_REGION",
"RELEASE_NOTES_FILENAME",
"RELEASE_MESSAGE_FILENAME",
"RELEASE_ID_FILENAME",
"S3_BUCKET_NAME",
"UPDATE_STATUS_LAMBDA_FUNCTION_NAME",
"PIPELINE_SOURCE"
] | [] | ["AWS_REGION", "RELEASE_NOTES_FILENAME", "RELEASE_MESSAGE_FILENAME", "RELEASE_ID_FILENAME", "S3_BUCKET_NAME", "UPDATE_STATUS_LAMBDA_FUNCTION_NAME", "PIPELINE_SOURCE"] | python | 7 | 0 | |
cmd/utils.go | package cmd
import (
"fmt"
"os"
"strings"
"time"
deis "github.com/deis/controller-sdk-go"
"github.com/deis/workflow-cli/pkg/git"
"github.com/deis/workflow-cli/settings"
)
var defaultLimit = -1
func progress() chan bool {
frames := []string{"...", "o..", ".o.", "..o"}
backspaces := strings.Repeat("\b", 3)
tick := time.Tick(400 * time.Millisecond)
quit := make(chan bool)
go func() {
for {
for _, frame := range frames {
fmt.Print(frame)
select {
case <-quit:
fmt.Print(backspaces)
close(quit)
return
case <-tick:
fmt.Print(backspaces)
}
}
}
}()
return quit
}
// load loads settings file and looks up the app name
func load(cf string, appID string) (*settings.Settings, string, error) {
s, err := settings.Load(cf)
if err != nil {
return nil, "", err
}
if appID == "" {
appID, err = git.DetectAppName(s.Client.ControllerURL.Host)
if err != nil {
return nil, "", err
}
}
return s, appID, nil
}
func drinkOfChoice() string {
drink := os.Getenv("DEIS_DRINK_OF_CHOICE")
if drink == "" {
drink = "coffee"
}
return drink
}
func limitCount(objs, total int) string {
if objs == total {
return "\n"
}
return fmt.Sprintf(" (%d of %d)\n", objs, total)
}
// checkAPICompatibility handles specific behavior for certain errors,
// such as printing an warning for the API mismatch error
func checkAPICompatibility(c *deis.Client, err error) error {
if err == deis.ErrAPIMismatch {
fmt.Printf(`! WARNING: Client and server API versions do not match. Please consider upgrading.
! Client version: %s
! Server version: %s
`, deis.APIVersion, c.ControllerAPIVersion)
// API mismatch isn't fatal, so after warning continue on.
return nil
}
return err
}
| [
"\"DEIS_DRINK_OF_CHOICE\""
] | [] | [
"DEIS_DRINK_OF_CHOICE"
] | [] | ["DEIS_DRINK_OF_CHOICE"] | go | 1 | 0 | |
auth/api/server.go | package api
import (
"fmt"
"log"
"os"
"github.com/joho/godotenv"
"github.com/tapfunds/tf/auth/api/controllers"
)
var server = controllers.Server{}
func init() {
// loads values from .env into the system
if err := godotenv.Load(); err != nil {
log.Print("sad .env file found")
}
}
func Run() {
var err error
err = godotenv.Load()
if err != nil {
log.Fatalf("Error getting env, %v", err)
}
server.Initialize(os.Getenv("DB_DRIVER"), os.Getenv("DB_USER"), os.Getenv("DB_PASSWORD"), os.Getenv("DB_PORT"), os.Getenv("DB_HOST"), os.Getenv("DB_NAME"))
// This is for testing, when done, do well to comment
// seed.Load(server.DB)
apiPort := fmt.Sprintf(":%s", os.Getenv("AUTH_API_PORT"))
server.Run(apiPort)
fmt.Printf("Listening to port %s", apiPort)
}
| [
"\"DB_DRIVER\"",
"\"DB_USER\"",
"\"DB_PASSWORD\"",
"\"DB_PORT\"",
"\"DB_HOST\"",
"\"DB_NAME\"",
"\"AUTH_API_PORT\""
] | [] | [
"DB_PASSWORD",
"DB_HOST",
"DB_PORT",
"DB_NAME",
"DB_DRIVER",
"AUTH_API_PORT",
"DB_USER"
] | [] | ["DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_NAME", "DB_DRIVER", "AUTH_API_PORT", "DB_USER"] | go | 7 | 0 | |
commands/config.go | package commands
import (
"fmt"
"net"
"os"
"path/filepath"
"strings"
"github.com/sirupsen/logrus"
"github.com/debu99/cicd-runner/common"
"github.com/debu99/cicd-runner/network"
)
func getDefaultConfigFile() string {
return filepath.Join(getDefaultConfigDirectory(), "config.toml")
}
func getDefaultCertificateDirectory() string {
return filepath.Join(getDefaultConfigDirectory(), "certs")
}
type configOptions struct {
config *common.Config
ConfigFile string `short:"c" long:"config" env:"CONFIG_FILE" description:"Config file"`
}
func (c *configOptions) saveConfig() error {
return c.config.SaveConfig(c.ConfigFile)
}
func (c *configOptions) loadConfig() error {
config := common.NewConfig()
err := config.LoadConfig(c.ConfigFile)
if err != nil {
return err
}
c.config = config
return nil
}
func (c *configOptions) RunnerByName(name string) (*common.RunnerConfig, error) {
if c.config == nil {
return nil, fmt.Errorf("config has not been loaded")
}
for _, runner := range c.config.Runners {
if runner.Name == name {
return runner, nil
}
}
return nil, fmt.Errorf("could not find a runner with the name '%s'", name)
}
//nolint:lll
type configOptionsWithListenAddress struct {
configOptions
ListenAddress string `long:"listen-address" env:"LISTEN_ADDRESS" description:"Metrics / pprof server listening address"`
}
func (c *configOptionsWithListenAddress) listenAddress() (string, error) {
address := c.config.ListenAddress
if c.ListenAddress != "" {
address = c.ListenAddress
}
if address == "" {
return "", nil
}
_, port, err := net.SplitHostPort(address)
if err != nil && !strings.Contains(err.Error(), "missing port in address") {
return "", err
}
if port == "" {
return fmt.Sprintf("%s:%d", address, common.DefaultMetricsServerPort), nil
}
return address, nil
}
func init() {
configFile := os.Getenv("CONFIG_FILE")
if configFile == "" {
err := os.Setenv("CONFIG_FILE", getDefaultConfigFile())
if err != nil {
logrus.WithError(err).Fatal("Couldn't set CONFIG_FILE environment variable")
}
}
network.CertificateDirectory = getDefaultCertificateDirectory()
}
| [
"\"CONFIG_FILE\""
] | [] | [
"CONFIG_FILE"
] | [] | ["CONFIG_FILE"] | go | 1 | 0 | |
pkg/tuned/controller.go | package tuned
import (
"bufio" // scanner
"bytes" // bytes.Buffer
"context" // context.TODO()
"flag" // command-line options parsing
"fmt" // Printf()
"io/ioutil" // ioutil.ReadFile()
"math" // math.Pow()
"net" // net.Conn
"os" // os.Exit(), os.Stderr, ...
"os/exec" // os.Exec()
"strconv" // strconv
"strings" // strings.Join()
"syscall" // syscall.SIGHUP, ...
"time" // time.Second, ...
fsnotify "gopkg.in/fsnotify.v1"
"gopkg.in/ini.v1"
kmeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
ntoclient "github.com/openshift/cluster-node-tuning-operator/pkg/client"
ntoconfig "github.com/openshift/cluster-node-tuning-operator/pkg/config"
tunedset "github.com/openshift/cluster-node-tuning-operator/pkg/generated/clientset/versioned"
tunedinformers "github.com/openshift/cluster-node-tuning-operator/pkg/generated/informers/externalversions"
"github.com/openshift/cluster-node-tuning-operator/pkg/util"
)
// Constants
const (
// Constants used for instantiating Profile status conditions;
// they will be set to 2^0, 2^1, 2^2, ..., 2^n
scApplied Bits = 1 << iota
scWarn
scError
scTimeout
scUnknown
)
// Constants
const (
operandNamespace = "openshift-cluster-node-tuning-operator"
programName = "openshift-tuned"
tunedProfilesDirCustom = "/etc/tuned"
tunedProfilesDirSystem = "/usr/lib/tuned"
tunedConfFile = "tuned.conf"
tunedMainConfFile = "tuned-main.conf"
tunedActiveProfileFile = tunedProfilesDirCustom + "/active_profile"
tunedRecommendDir = tunedProfilesDirCustom + "/recommend.d"
tunedRecommendFile = tunedRecommendDir + "/50-openshift.conf"
tunedBootcmdlineEnvVar = "TUNED_BOOT_CMDLINE"
tunedBootcmdlineFile = tunedProfilesDirCustom + "/bootcmdline"
// A couple of seconds should be more than enough for TuneD daemon to gracefully stop;
// be generous and give it 10s.
tunedGracefulExitWait = time.Second * time.Duration(10)
// TuneD profile application typically takes ~0.5s and should never take more than ~5s.
// However, there were cases where TuneD daemon got stuck during application of a profile.
// Experience shows that subsequent restarts of TuneD can resolve this in certain situations,
// but not in others -- an extreme example is a TuneD profile including a profile that does
// not exist. TuneD itself has no mechanism for restarting a profile application that takes
// too long. The tunedTimeout below is time to wait for "profile applied/reload failed" from
// TuneD logs before restarting TuneD and thus retrying the profile application. Keep this
// reasonably low to workaround system/TuneD issues as soon as possible, but not too low
// to increase the system load by retrying profile applications that can never succeed.
openshiftTunedHome = "/var/lib/tuned"
openshiftTunedRunDir = "/run/" + programName
openshiftTunedPidFile = openshiftTunedRunDir + "/" + programName + ".pid"
openshiftTunedProvider = openshiftTunedHome + "/provider"
openshiftTunedSocket = openshiftTunedHome + "/openshift-tuned.sock"
tunedInitialTimeout = 60 // timeout in seconds
// With the less aggressive rate limiter, retries will happen at 100ms*2^(retry_n-1):
// 100ms, 200ms, 400ms, 800ms, 1.6s, 3.2s, 6.4s, 12.8s, 25.6s, 51.2s, 102.4s, 3.4m, 6.8m, 13.7m, 27.3m
maxRetries = 15
// workqueue related constants
wqKindDaemon = "daemon"
wqKindTuned = "tuned"
wqKindProfile = "profile"
// If useSystemStalld is set to true, use the OS-shipped stalld; otherwise, use the
// NTO-shipped version. The aim here is to switch back to the legacy code easily just
// by setting this constant to false.
useSystemStalld = true
)
// Types
type arrayFlags []string
type Bits uint8
type sockAccepted struct {
conn net.Conn
err error
}
type Controller struct {
kubeconfig *restclient.Config
// workqueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens.
wqKube workqueue.RateLimitingInterface
wqTuneD workqueue.RateLimitingInterface
listers *ntoclient.Listers
clients *ntoclient.Clients
change struct {
// Did the node Profile k8s object change?
profile bool
// Did the "rendered" Tuned k8s object change?
rendered bool
// Did tunedBootcmdlineFile change on the filesystem?
// It is set to false on successful Profile update.
bootcmdline bool
// Did the command-line parameters to run the TuneD daemon change?
// In other words, is a complete restart of the TuneD daemon needed?
daemon bool
}
daemon struct {
// reloading is true during the TuneD daemon reload.
reloading bool
// reloaded is true immediately after the TuneD daemon finished reloading.
// and the node Profile k8s object's Status needs to be set for the operator;
// it is set to false on successful Profile update.
reloaded bool
// debugging flag
debug bool
// bit/set representaton of Profile status conditions to report back via API.
status Bits
// stderr log from TuneD daemon to report back via API.
stderr string
// stopping is true while the controller tries to stop the TuneD daemon.
stopping bool
}
tunedCmd *exec.Cmd // external command (tuned) being prepared or run
tunedExit chan bool // bi-directional channel to signal and register TuneD daemon exit
stopCh <-chan struct{} // receive-only channel to stop the openshift-tuned controller
changeCh chan bool // bi-directional channel to wake-up the main thread to process accrued changes
changeChRet chan bool // bi-directional channel to announce success/failure of change processing
tunedTicker *time.Ticker // ticker that fires if TuneD daemon fails to report "profile applied/reload failed" within tunedTimeout
tunedTimeout int // timeout for TuneD daemon to report "profile applied/reload failed" [s]
tunedMainCfg *ini.File // global TuneD configuration as defined in tuned-main.conf
}
type wqKey struct {
kind string // object kind
name string // object name
event string // object event type (add/update/delete) or pass the full object on delete
}
// Functions
func mkdir(dir string) error {
if _, err := os.Stat(dir); os.IsNotExist(err) {
err = os.MkdirAll(dir, os.ModePerm)
if err != nil {
return err
}
}
return nil
}
func (a *arrayFlags) String() string {
return strings.Join(*a, ",")
}
func (a *arrayFlags) Set(value string) error {
*a = append(*a, value)
return nil
}
func parseCmdOpts() {
klog.InitFlags(nil)
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s [options]\n", programName)
fmt.Fprintf(os.Stderr, "Example: %s\n\n", programName)
fmt.Fprintf(os.Stderr, "Options:\n")
flag.PrintDefaults()
}
flag.Parse()
}
func newController(stopCh <-chan struct{}) (*Controller, error) {
kubeconfig, err := ntoclient.GetConfig()
if err != nil {
return nil, err
}
listers := &ntoclient.Listers{}
clients := &ntoclient.Clients{}
controller := &Controller{
kubeconfig: kubeconfig,
listers: listers,
clients: clients,
tunedExit: make(chan bool, 1),
stopCh: stopCh,
changeCh: make(chan bool, 1),
changeChRet: make(chan bool, 1),
tunedTicker: time.NewTicker(tunedInitialTimeout),
tunedTimeout: tunedInitialTimeout,
}
controller.tunedTicker.Stop() // The ticker will be started/reset when TuneD starts.
return controller, nil
}
// eventProcessorKube is a long-running method that will continually
// read and process messages on the wqKube workqueue.
func (c *Controller) eventProcessorKube() {
for {
// Wait until there is a new item in the working queue.
obj, shutdown := c.wqKube.Get()
if shutdown {
return
}
klog.V(2).Infof("got event from workqueue")
func() {
defer c.wqKube.Done(obj)
var workqueueKey wqKey
var ok bool
if workqueueKey, ok = obj.(wqKey); !ok {
c.wqKube.Forget(obj)
klog.Errorf("expected wqKey in workqueue but got %#v", obj)
return
}
if err := c.sync(workqueueKey); err != nil {
requeued := c.wqKube.NumRequeues(workqueueKey)
// Limit retries to maxRetries. After that, stop trying.
if requeued < maxRetries {
klog.Errorf("unable to sync(%s/%s) requeued (%d): %v", workqueueKey.kind, workqueueKey.name, requeued, err)
// Re-enqueue the workqueueKey. Based on the rate limiter on the queue
// and the re-enqueue history, the workqueueKey will be processed later again.
c.wqKube.AddRateLimited(workqueueKey)
return
}
klog.Errorf("unable to sync(%s/%s) reached max retries (%d): %v", workqueueKey.kind, workqueueKey.name, maxRetries, err)
// Dropping the item after maxRetries unsuccessful retries.
c.wqKube.Forget(obj)
return
}
klog.V(1).Infof("event from workqueue (%s/%s) successfully processed", workqueueKey.kind, workqueueKey.name)
// Successful processing.
c.wqKube.Forget(obj)
}()
}
}
func (c *Controller) sync(key wqKey) error {
switch {
case key.kind == wqKindTuned:
if key.name != tunedv1.TunedRenderedResourceName {
return nil
}
klog.V(2).Infof("sync(): Tuned %s", key.name)
tuned, err := c.listers.TunedResources.Get(key.name)
if err != nil {
return fmt.Errorf("failed to get Tuned %s: %v", key.name, err)
}
change, err := profilesSync(tuned.Spec.Profile)
if err != nil {
return err
}
c.change.rendered = change
// Notify the event processor that the Tuned k8s object containing TuneD profiles changed.
c.wqTuneD.Add(wqKey{kind: wqKindDaemon})
return nil
case key.kind == wqKindProfile:
if key.name != getNodeName() {
return nil
}
klog.V(2).Infof("sync(): Profile %s", key.name)
profile, err := c.listers.TunedProfiles.Get(getNodeName())
if err != nil {
return fmt.Errorf("failed to get Profile %s: %v", key.name, err)
}
err = providerExtract(profile.Spec.Config.ProviderName)
if err != nil {
return err
}
err = tunedRecommendFileWrite(profile.Spec.Config.TunedProfile)
if err != nil {
return err
}
c.change.profile = true
if c.daemon.debug != profile.Spec.Config.Debug {
c.change.daemon = true // A complete restart of the TuneD daemon is needed due to a debugging request switched on or off.
c.daemon.debug = profile.Spec.Config.Debug
}
if profile.Spec.Config.TuneDConfig.ReapplySysctl != nil {
reapplySysctl := c.tunedMainCfg.Section("").Key("reapply_sysctl").MustBool()
if *profile.Spec.Config.TuneDConfig.ReapplySysctl != reapplySysctl {
iniCfgSetKey(c.tunedMainCfg, "reapply_sysctl", !reapplySysctl)
err = iniFileSave(tunedProfilesDirCustom+"/"+tunedMainConfFile, c.tunedMainCfg)
if err != nil {
return fmt.Errorf("failed to write global TuneD configuration file: %v", err)
}
c.change.daemon = true // A complete restart of the TuneD daemon is needed due to configuration change in tunedMainConfFile.
}
}
// Notify the event processor that the Profile k8s object containing information about which TuneD profile to apply changed.
c.wqTuneD.Add(wqKey{kind: wqKindDaemon})
return nil
default:
}
return nil
}
func newUnixListener(addr string) (net.Listener, error) {
if err := os.Remove(addr); err != nil && !os.IsNotExist(err) {
return nil, err
}
l, err := net.Listen("unix", addr)
if err != nil {
return nil, err
}
return l, nil
}
func disableSystemTuned() {
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
klog.Infof("disabling system tuned...")
cmd := exec.Command("/usr/bin/systemctl", "disable", "tuned", "--now")
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
klog.V(1).Infof("failed to disable system tuned: %v: %s", err, stderr.String()) // do not use log.Printf(), tuned has its own timestamping
}
}
func profilesEqual(profileFile string, profileData string) bool {
content, err := ioutil.ReadFile(profileFile)
if err != nil {
content = []byte{}
}
return profileData == string(content)
}
// profilesExtract extracts TuneD daemon profiles to the daemon configuration directory.
// Returns:
// - True if the data in the to-be-extracted recommended profile or the profiles being
// included from the current recommended profile have changed.
// - A map with successfully extracted TuneD profile names.
// - A map with names of TuneD profiles the current TuneD recommended profile depends on.
// - Error if any or nil.
func profilesExtract(profiles []tunedv1.TunedProfile) (bool, map[string]bool, map[string]bool, error) {
var (
change bool
)
klog.Infof("extracting TuneD profiles")
recommendedProfile, err := getRecommendedProfile()
if err != nil {
return change, map[string]bool{}, map[string]bool{}, err
}
// Get a list of TuneD profiles names the recommended profile depends on.
recommendedProfileDeps := profileDepends(recommendedProfile)
// Add the recommended profile itself.
recommendedProfileDeps[recommendedProfile] = true
extracted := map[string]bool{} // TuneD profile names present in TuneD CR and successfully extracted to /etc/tuned/<profile>/
for index, profile := range profiles {
if profile.Name == nil {
klog.Warningf("profilesExtract(): profile name missing for Profile %v", index)
continue
}
if profile.Data == nil {
klog.Warningf("profilesExtract(): profile data missing for Profile %v", index)
continue
}
profileDir := fmt.Sprintf("%s/%s", tunedProfilesDirCustom, *profile.Name)
profileFile := fmt.Sprintf("%s/%s", profileDir, tunedConfFile)
if err := mkdir(profileDir); err != nil {
return change, extracted, recommendedProfileDeps, fmt.Errorf("failed to create TuneD profile directory %q: %v", profileDir, err)
}
if recommendedProfileDeps[*profile.Name] {
// Recommended profile (dependency) name matches profile name of the profile
// currently being extracted, compare their content.
var un string
change = change || !profilesEqual(profileFile, *profile.Data)
if !change {
un = "un"
}
klog.Infof("recommended TuneD profile %s content %schanged [%s]", recommendedProfile, un, *profile.Name)
}
f, err := os.Create(profileFile)
if err != nil {
return change, extracted, recommendedProfileDeps, fmt.Errorf("failed to create TuneD profile file %q: %v", profileFile, err)
}
defer f.Close()
if _, err = f.WriteString(*profile.Data); err != nil {
return change, extracted, recommendedProfileDeps, fmt.Errorf("failed to write TuneD profile file %q: %v", profileFile, err)
}
extracted[*profile.Name] = true
}
return change, extracted, recommendedProfileDeps, nil
}
// profilesSync extracts TuneD daemon profiles to the daemon configuration directory
// and removes any TuneD profiles from /etc/tuned/<profile>/ once the same TuneD
// <profile> is no longer defined in the 'profiles' slice.
// Returns:
// - True if the data in the to-be-extracted recommended profile or the profiles being
// included from the current recommended profile have changed.
// - Error if any or nil.
func profilesSync(profiles []tunedv1.TunedProfile) (bool, error) {
change, extractedNew, recommendedProfileDeps, err := profilesExtract(profiles)
if err != nil {
return change, err
}
// Deal with TuneD profiles absent from Tuned CRs, but still present in /etc/tuned/<profile>/ the recommended profile depends on.
for profile := range recommendedProfileDeps {
if !extractedNew[profile] {
// TuneD profile does not exist in the Tuned CR, but the recommended profile depends on it.
profileDir := fmt.Sprintf("%s/%s", tunedProfilesDirCustom, profile)
if _, err := os.Stat(profileDir); err == nil {
// We have a stale TuneD profile directory in /etc/tuned/<profile>/
// Remove it.
err := os.RemoveAll(profileDir)
if err != nil {
return change, fmt.Errorf("failed to remove %q: %v", profileDir, err)
}
change = true
klog.Infof("removed TuneD profile %q", profileDir)
}
}
}
return change, nil
}
// providerExtract extracts Cloud Provider name into openshiftTunedProvider file.
func providerExtract(provider string) error {
klog.Infof("extracting cloud provider name to %v", openshiftTunedProvider)
f, err := os.Create(openshiftTunedProvider)
if err != nil {
return fmt.Errorf("failed to create cloud provider name file %q: %v", openshiftTunedProvider, err)
}
defer f.Close()
if _, err = f.WriteString(provider); err != nil {
return fmt.Errorf("failed to write cloud provider name file %q: %v", openshiftTunedProvider, err)
}
return nil
}
func openshiftTunedPidFileWrite() error {
if err := mkdir(openshiftTunedRunDir); err != nil {
return fmt.Errorf("failed to create %s run directory %q: %v", programName, openshiftTunedRunDir, err)
}
f, err := os.Create(openshiftTunedPidFile)
if err != nil {
return fmt.Errorf("failed to create %s pid file %q: %v", programName, openshiftTunedPidFile, err)
}
defer f.Close()
if _, err = f.WriteString(strconv.Itoa(os.Getpid())); err != nil {
return fmt.Errorf("failed to write %s pid file %q: %v", programName, openshiftTunedPidFile, err)
}
return nil
}
func tunedRecommendFileWrite(profileName string) error {
klog.V(2).Infof("tunedRecommendFileWrite(): %s", profileName)
if err := mkdir(tunedRecommendDir); err != nil {
return fmt.Errorf("failed to create directory %q: %v", tunedRecommendDir, err)
}
f, err := os.Create(tunedRecommendFile)
if err != nil {
return fmt.Errorf("failed to create file %q: %v", tunedRecommendFile, err)
}
defer f.Close()
if _, err = f.WriteString(fmt.Sprintf("[%s]\n", profileName)); err != nil {
return fmt.Errorf("failed to write file %q: %v", tunedRecommendFile, err)
}
klog.Infof("written %q to set TuneD profile %s", tunedRecommendFile, profileName)
return nil
}
func (c *Controller) tunedCreateCmd() *exec.Cmd {
args := []string{"--no-dbus"}
if c.daemon.debug {
args = append(args, "--debug")
}
return exec.Command("/usr/sbin/tuned", args...)
}
func (c *Controller) tunedRun() {
klog.Infof("starting tuned...")
defer func() {
close(c.tunedExit)
}()
cmdReader, err := c.tunedCmd.StderrPipe()
if err != nil {
klog.Errorf("error creating StderrPipe for tuned: %v", err)
return
}
scanner := bufio.NewScanner(cmdReader)
go func() {
for scanner.Scan() {
l := scanner.Text()
fmt.Printf("%s\n", l)
if c.daemon.stopping {
// We have decided to stop TuneD. Apart from showing the logs it is
// now unnecessary/undesirable to perform any of the following actions.
// The undesirability comes from extra processing which will come if
// TuneD manages to "get unstuck" during this phase before it receives
// SIGKILL (note the time window between SIGTERM/SIGKILL).
continue
}
profileApplied := strings.Index(l, " tuned.daemon.daemon: static tuning from profile ") >= 0 && strings.Index(l, " applied") >= 0
reloadFailed := strings.Index(l, " tuned.daemon.controller: Failed to reload TuneD: ") >= 0
if profileApplied {
c.daemon.status |= scApplied
}
strIndex := strings.Index(l, " WARNING ")
if strIndex >= 0 {
c.daemon.status |= scWarn
c.daemon.stderr = l[strIndex:] // trim timestamp from log
}
strIndex = strings.Index(l, " ERROR ")
if strIndex >= 0 {
c.daemon.status |= scError
c.daemon.stderr = l[strIndex:] // trim timestamp from log
}
if c.daemon.reloading {
c.daemon.reloading = !profileApplied && !reloadFailed
c.daemon.reloaded = !c.daemon.reloading
if c.daemon.reloaded {
klog.V(2).Infof("profile applied or reload failed, stopping the TuneD watcher")
c.tunedTimeout = tunedInitialTimeout // initialize the timeout
c.daemon.status &= ^scTimeout // clear the scTimeout status bit
c.tunedTicker.Stop() // profile applied or reload failed, stop the TuneD watcher
// Notify the event processor that the TuneD daemon finished reloading.
c.wqTuneD.Add(wqKey{kind: wqKindDaemon})
}
}
}
}()
c.daemon.reloading = true
// Clear the set out of which Profile status conditions are created. Keep timeout condition if already set.
c.daemon.status &= scTimeout
c.daemon.stderr = ""
if err = c.tunedCmd.Start(); err != nil {
klog.Errorf("error starting tuned: %v", err)
return
}
if err = c.tunedCmd.Wait(); err != nil {
// The command exited with non 0 exit status, e.g. terminated by a signal.
klog.Errorf("error waiting for tuned: %v", err)
return
}
return
}
// tunedStop tries to gracefully stop the TuneD daemon process by sending it SIGTERM.
// If the TuneD daemon does not respond by terminating within tunedGracefulExitWait
// duration, SIGKILL is sent. This method returns an indication whether the TuneD
// daemon exitted gracefully (true) or SIGKILL had to be sent (false).
func (c *Controller) tunedStop() (bool, error) {
c.daemon.stopping = true
defer func() {
c.daemon.stopping = false
}()
if c.tunedCmd == nil {
// Looks like there has been a termination signal prior to starting tuned.
return false, nil
}
if c.tunedCmd.Process != nil {
// The TuneD daemon rolls back the current profile and should terminate on SIGTERM.
klog.V(1).Infof("sending SIGTERM to PID %d", c.tunedCmd.Process.Pid)
c.tunedCmd.Process.Signal(syscall.SIGTERM)
} else {
// This should never happen!
return false, fmt.Errorf("cannot find the TuneD process!")
}
// Wait for TuneD process to stop -- this will enable node-level tuning rollback.
select {
case <-c.tunedExit:
case <-time.After(tunedGracefulExitWait):
// It looks like the TuneD daemon refuses to terminate gracefully on SIGTERM
// within tunedGracefulExitWait.
klog.V(1).Infof("sending SIGKILL to PID %d", c.tunedCmd.Process.Pid)
c.tunedCmd.Process.Signal(syscall.SIGKILL)
<-c.tunedExit
return false, nil
}
klog.V(1).Infof("TuneD process terminated gracefully")
return true, nil
}
func (c *Controller) tunedReload(timeoutInitiated bool) error {
c.daemon.reloading = true
c.daemon.status = 0 // clear the set out of which Profile status conditions are created
c.daemon.stderr = ""
tunedTimeout := time.Second * time.Duration(c.tunedTimeout)
if c.tunedTicker == nil {
// This should never happen as the ticker is initialized at controller creation time.
c.tunedTicker = time.NewTicker(tunedTimeout)
} else {
c.tunedTicker.Reset(tunedTimeout)
}
if timeoutInitiated {
c.daemon.status = scTimeout // timeout waiting for the daemon should be reported to Profile status
c.tunedTimeout *= 2
}
if c.tunedCmd == nil {
// TuneD hasn't been started by openshift-tuned, start it.
c.tunedCmd = c.tunedCreateCmd()
go c.tunedRun()
return nil
}
klog.Infof("reloading tuned...")
if c.tunedCmd.Process != nil {
klog.Infof("sending HUP to PID %d", c.tunedCmd.Process.Pid)
err := c.tunedCmd.Process.Signal(syscall.SIGHUP)
if err != nil {
return fmt.Errorf("error sending SIGHUP to PID %d: %v\n", c.tunedCmd.Process.Pid, err)
}
} else {
// This should never happen!
return fmt.Errorf("cannot find the TuneD process!")
}
return nil
}
// tunedRestart restarts the TuneD daemon. The "stop" part is synchronous
// to ensure proper termination of TuneD, the "start" part is asynchronous.
func (c *Controller) tunedRestart(timeoutInitiated bool) (err error) {
if _, err = c.tunedStop(); err != nil {
return err
}
c.tunedCmd = nil // Cmd.Start() cannot be used more than once
c.tunedExit = make(chan bool, 1) // Once tunedStop() terminates, the tunedExit channel is closed!
if err = c.tunedReload(timeoutInitiated); err != nil {
return err
}
return nil
}
// getActiveProfile returns active profile currently in use by the TuneD daemon.
// On error, an empty string is returned.
func getActiveProfile() (string, error) {
var responseString = ""
f, err := os.Open(tunedActiveProfileFile)
if err != nil {
return "", fmt.Errorf("error opening Tuned active profile file %s: %v", tunedActiveProfileFile, err)
}
defer f.Close()
var scanner = bufio.NewScanner(f)
for scanner.Scan() {
responseString = strings.TrimSpace(scanner.Text())
}
return responseString, nil
}
func getBootcmdline() (string, error) {
var responseString = ""
f, err := os.Open(tunedBootcmdlineFile)
if err != nil {
return "", fmt.Errorf("error opening Tuned bootcmdline file %s: %v", tunedBootcmdlineFile, err)
}
defer f.Close()
var scanner = bufio.NewScanner(f)
for scanner.Scan() {
s := strings.TrimSpace(scanner.Text())
if i := strings.Index(s, "TUNED_BOOT_CMDLINE="); i == 0 {
responseString = s[len("TUNED_BOOT_CMDLINE="):]
if len(responseString) > 0 && (responseString[0] == '"' || responseString[0] == '\'') {
responseString = responseString[1 : len(responseString)-1]
}
// Don't break here to behave more like shell evaluation.
}
}
return responseString, nil
}
func getRecommendedProfile() (string, error) {
var stdout, stderr bytes.Buffer
klog.V(1).Infof("getting recommended profile...")
cmd := exec.Command("/usr/sbin/tuned-adm", "recommend")
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
return "", fmt.Errorf("error getting recommended profile: %v: %v", err, stderr.String())
}
responseString := strings.TrimSpace(stdout.String())
return responseString, nil
}
// Method changeSyncer performs k8s Profile object updates and TuneD daemon
// reloads as needed. Returns indication whether the change was successfully
// synced and an error. Only critical errors are returned, as non-nil errors
// will cause restart of the main control loop -- the changeWatcher() method.
func (c *Controller) changeSyncer() (synced bool, err error) {
var reload bool
if c.daemon.reloading {
// This should not be necessary, but keep this here as a reminder.
return false, fmt.Errorf("changeSyncer(): called while the TuneD daemon was reloading")
}
if c.change.bootcmdline || c.daemon.reloaded {
// One or both of the following happened:
// 1) tunedBootcmdlineFile changed on the filesystem. This is very likely the result of
// applying a TuneD profile by the TuneD daemon. Make sure the node Profile k8s object
// is in sync with tunedBootcmdlineFile so the operator can take an appropriate action.
// 2) TuneD daemon was reloaded. Make sure the node Profile k8s object is in sync with
// the active profile, e.g. the Profile indicates the presence of the stall daemon on
// the host if requested by the current active profile.
if err = c.updateTunedProfile(); err != nil {
klog.Error(err.Error())
return false, nil // retry later
} else {
// The node Profile k8s object was updated successfully. Clear the flags indicating
// a check for syncing the object is needed.
c.change.bootcmdline = false
c.daemon.reloaded = false
}
}
// Check whether reload of the TuneD daemon is really necessary due to a Profile change.
if c.change.profile {
// The node Profile k8s object changed.
var activeProfile, recommendedProfile string
if activeProfile, err = getActiveProfile(); err != nil {
return false, err
}
if recommendedProfile, err = getRecommendedProfile(); err != nil {
return false, err
}
if (c.daemon.status & scApplied) == 0 {
if len(activeProfile) > 0 {
// activeProfile == "" means we have not started TuneD daemon yet; do not log that case
klog.Infof("re-applying profile (%s) as the previous application did not complete", activeProfile)
}
reload = true
} else if (c.daemon.status & scError) != 0 {
klog.Infof("re-applying profile (%s) as the previous application ended with error(s)", activeProfile)
reload = true
} else if activeProfile != recommendedProfile {
klog.Infof("active profile (%s) != recommended profile (%s)", activeProfile, recommendedProfile)
reload = true
c.daemon.status = scUnknown
} else {
klog.Infof("active and recommended profile (%s) match; profile change will not trigger profile reload", activeProfile)
// We do not need to reload the TuneD daemon, however, someone may have tampered with the k8s Profile for this node.
// Make sure it is up-to-date.
if err = c.updateTunedProfile(); err != nil {
klog.Error(err.Error())
return false, nil // retry later
}
}
c.change.profile = false
}
if c.change.rendered {
// The "rendered" Tuned k8s object changed.
c.change.rendered = false
reload = true
c.daemon.status = scUnknown
}
if c.change.daemon {
// Complete restart of the TuneD daemon needed (e.g. using --debug option).
c.change.daemon = false
c.daemon.status = scUnknown
err = c.tunedRestart(false)
return err == nil, err
}
if reload {
err = c.tunedReload(false)
}
return err == nil, err
}
// eventProcessorTuneD is a long-running method that will continually
// read and process messages on the wqTuneD workqueue.
func (c *Controller) eventProcessorTuneD() {
for {
// Wait until there is a new item in the working queue.
obj, shutdown := c.wqTuneD.Get()
if shutdown {
return
}
klog.V(2).Infof("got event from workqueue")
func() {
defer c.wqTuneD.Done(obj)
var workqueueKey wqKey
var ok bool
if workqueueKey, ok = obj.(wqKey); !ok {
c.wqTuneD.Forget(obj)
klog.Errorf("expected wqKey in workqueue but got %#v", obj)
return
}
c.changeCh <- true
eventProcessed := <-c.changeChRet
if !eventProcessed {
requeued := c.wqTuneD.NumRequeues(workqueueKey)
// Limit retries to maxRetries. After that, stop trying.
if requeued < maxRetries {
klog.Errorf("unable to sync(%s/%s) requeued (%d)", workqueueKey.kind, workqueueKey.name, requeued)
// Re-enqueue the workqueueKey. Based on the rate limiter on the queue
// and the re-enqueue history, the workqueueKey will be processed later again.
c.wqTuneD.AddRateLimited(workqueueKey)
return
}
klog.Errorf("unable to sync(%s/%s) reached max retries (%d)", workqueueKey.kind, workqueueKey.name, maxRetries)
// Dropping the item after maxRetries unsuccessful retries.
c.wqTuneD.Forget(obj)
return
}
klog.V(1).Infof("event from workqueue (%s/%s) successfully processed", workqueueKey.kind, workqueueKey.name)
// Successful processing.
c.wqTuneD.Forget(obj)
}()
}
}
func getTuned(obj interface{}) (tuned *tunedv1.Tuned, err error) {
tuned, ok := obj.(*tunedv1.Tuned)
if !ok {
return nil, fmt.Errorf("could not convert object to a Tuned object: %+v", obj)
}
return tuned, nil
}
func getTunedProfile(obj interface{}) (profile *tunedv1.Profile, err error) {
profile, ok := obj.(*tunedv1.Profile)
if !ok {
return nil, fmt.Errorf("could not convert object to a Tuned Profile object: %+v", obj)
}
return profile, nil
}
func getNodeName() string {
name := os.Getenv("OCP_NODE_NAME")
if len(name) == 0 {
// Something is seriously wrong, OCP_NODE_NAME must be defined via Tuned DaemonSet.
panic("OCP_NODE_NAME unset or empty")
}
return name
}
func (c *Controller) stalldRequested(profileName string) (*bool, error) {
var ret bool
tuned, err := c.listers.TunedResources.Get(tunedv1.TunedRenderedResourceName)
if err != nil {
return &ret, fmt.Errorf("failed to get Tuned %s: %v", tunedv1.TunedRenderedResourceName, err)
}
for index, profile := range tuned.Spec.Profile {
if profile.Name == nil {
klog.Warningf("tunedHasStalld(): profile name missing for Profile %v", index)
continue
}
if *profile.Name != profileName {
continue
}
if profile.Data == nil {
klog.Warningf("tunedHasStalld(): profile data missing for Profile %v", index)
continue
}
return profileHasStalld(profile.Data), nil
}
return &ret, nil
}
// Method updateTunedProfile updates a Tuned Profile with information to report back
// to the operator. Note this method must be called only when the TuneD daemon is
// not reloading.
func (c *Controller) updateTunedProfile() (err error) {
var (
bootcmdline string
stalldRequested *bool
)
if bootcmdline, err = getBootcmdline(); err != nil {
// This should never happen unless something is seriously wrong (e.g. TuneD
// daemon no longer uses tunedBootcmdlineFile). Do not continue.
return fmt.Errorf("unable to get kernel command-line parameters: %v", err)
}
profile, err := c.listers.TunedProfiles.Get(getNodeName())
if err != nil {
return fmt.Errorf("failed to get Profile %s: %v", profile.Name, err)
}
if !useSystemStalld {
if stalldRequested, err = c.stalldRequested(profile.Spec.Config.TunedProfile); err != nil {
return fmt.Errorf("unable to assess whether stalld is requested: %v", err)
}
}
activeProfile, err := getActiveProfile()
if err != nil {
return err
}
statusConditions := computeStatusConditions(c.daemon.status, c.daemon.stderr, profile.Status.Conditions)
stalldUnchanged := util.PtrBoolEqual(profile.Status.Stalld, stalldRequested)
if profile.Status.Bootcmdline == bootcmdline && stalldUnchanged &&
profile.Status.TunedProfile == activeProfile && conditionsEqual(profile.Status.Conditions, statusConditions) {
// Do not update node Profile unnecessarily (e.g. bootcmdline did not change).
// This will save operator CPU cycles trying to reconcile objects that do not
// need reconciling.
klog.V(2).Infof("updateTunedProfile(): no need to update Profile %s", profile.Name)
return nil
}
profile = profile.DeepCopy() // never update the objects from cache
profile.Status.Bootcmdline = bootcmdline
profile.Status.Stalld = stalldRequested
profile.Status.TunedProfile = activeProfile
profile.Status.Conditions = statusConditions
_, err = c.clients.Tuned.TunedV1().Profiles(operandNamespace).Update(context.TODO(), profile, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update Profile %s status: %v", profile.Name, err)
}
klog.Infof("updated Profile %s stalld=%v, bootcmdline: %s", profile.Name, stalldRequested, bootcmdline)
return nil
}
func (c *Controller) informerEventHandler(workqueueKey wqKey) cache.ResourceEventHandlerFuncs {
return cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
accessor, err := kmeta.Accessor(o)
if err != nil {
klog.Errorf("unable to get accessor for added object: %s", err)
return
}
workqueueKey.name = accessor.GetName()
if workqueueKey.kind == wqKindProfile && workqueueKey.name == getNodeName() {
// When moving this code elsewhere, consider whether it is desirable
// to disable system tuned on nodes that should not be managed by
// openshift-tuned.
disableSystemTuned()
}
klog.V(2).Infof("add event to workqueue due to %s (add)", util.ObjectInfo(o))
c.wqKube.Add(wqKey{kind: workqueueKey.kind, name: workqueueKey.name})
},
UpdateFunc: func(o, n interface{}) {
newAccessor, err := kmeta.Accessor(n)
if err != nil {
klog.Errorf("unable to get accessor for new object: %s", err)
return
}
oldAccessor, err := kmeta.Accessor(o)
if err != nil {
klog.Errorf("unable to get accessor for old object: %s", err)
return
}
if newAccessor.GetResourceVersion() == oldAccessor.GetResourceVersion() {
// Periodic resync will send update events for all known resources.
// Two different versions of the same resource will always have different RVs.
return
}
klog.V(2).Infof("add event to workqueue due to %s (update)", util.ObjectInfo(n))
c.wqKube.Add(wqKey{kind: workqueueKey.kind, name: newAccessor.GetName()})
},
DeleteFunc: func(o interface{}) {
object, ok := o.(metav1.Object)
if !ok {
tombstone, ok := o.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("error decoding object, invalid type")
return
}
object, ok = tombstone.Obj.(metav1.Object)
if !ok {
klog.Errorf("error decoding object tombstone, invalid type")
return
}
klog.V(4).Infof("recovered deleted object %s from tombstone", object.GetName())
}
klog.V(2).Infof("add event to workqueue due to %s (delete)", util.ObjectInfo(object))
c.wqKube.Add(wqKey{kind: workqueueKey.kind, name: object.GetName()})
},
}
}
// The changeWatcher method is the main control loop watching for changes to be applied
// and supervising the TuneD daemon. On successful (error == nil) exit, no attempt at
// reentering this control loop should be made as it is an indication of an intentional
// exit on request.
func (c *Controller) changeWatcher() (err error) {
var (
lStop bool
)
c.tunedMainCfg, err = iniFileLoad(tunedProfilesDirCustom + "/" + tunedMainConfFile)
if err != nil {
return fmt.Errorf("failed to load global TuneD configuration file: %v", err)
}
// Use less aggressive per-item only exponential rate limiting for both wqKube and wqTuneD.
// Start retrying at 100ms with a maximum of 1800s.
c.wqKube = workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(100*time.Millisecond, 1800*time.Second))
c.wqTuneD = workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(100*time.Millisecond, 1800*time.Second))
c.clients.Tuned, err = tunedset.NewForConfig(c.kubeconfig)
if err != nil {
return err
}
tunedInformerFactory := tunedinformers.NewSharedInformerFactoryWithOptions(
c.clients.Tuned,
ntoconfig.ResyncPeriod(),
tunedinformers.WithNamespace(operandNamespace))
trInformer := tunedInformerFactory.Tuned().V1().Tuneds()
c.listers.TunedResources = trInformer.Lister().Tuneds(operandNamespace)
trInformer.Informer().AddEventHandler(c.informerEventHandler(wqKey{kind: wqKindTuned}))
tpInformer := tunedInformerFactory.Tuned().V1().Profiles()
c.listers.TunedProfiles = tpInformer.Lister().Profiles(operandNamespace)
tpInformer.Informer().AddEventHandler(c.informerEventHandler(wqKey{kind: wqKindProfile}))
tunedInformerFactory.Start(c.stopCh) // Tuned/Profile
// Wait for the caches to be synced before starting worker(s).
klog.V(1).Info("waiting for informer caches to sync")
ok := cache.WaitForCacheSync(c.stopCh,
trInformer.Informer().HasSynced,
tpInformer.Informer().HasSynced,
)
if !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
klog.V(1).Info("starting events processors")
go wait.Until(c.eventProcessorKube, time.Second, c.stopCh)
defer c.wqKube.ShutDown()
go wait.Until(c.eventProcessorTuneD, time.Second, c.stopCh)
defer c.wqTuneD.ShutDown()
klog.Info("started events processors")
// Watch for filesystem changes on the tunedBootcmdlineFile file.
wFs, err := fsnotify.NewWatcher()
if err != nil {
return fmt.Errorf("failed to create filesystem watcher: %v", err)
}
defer wFs.Close()
// Register fsnotify watchers.
for _, element := range []string{tunedBootcmdlineFile} {
err = wFs.Add(element)
if err != nil {
return fmt.Errorf("failed to start watching %q: %v", element, err)
}
}
l, err := newUnixListener(openshiftTunedSocket)
if err != nil {
return fmt.Errorf("cannot create %q listener: %v", openshiftTunedSocket, err)
}
defer func() {
lStop = true
l.Close()
}()
sockConns := make(chan sockAccepted, 1)
go func() {
for {
conn, err := l.Accept()
if lStop {
// The listener was closed on the return from mainLoop(); exit the goroutine.
return
}
sockConns <- sockAccepted{conn, err}
}
}()
klog.Info("started controller")
for {
select {
case <-c.stopCh:
klog.Infof("termination signal received, stop")
return nil
case s := <-sockConns:
const socketCmdStop = "stop"
var rolledBack bool
if s.err != nil {
return fmt.Errorf("connection accept error: %v", err)
}
buf := make([]byte, len(socketCmdStop))
nr, _ := s.conn.Read(buf)
data := buf[0:nr]
if string(data) != socketCmdStop {
// We only support one command over the socket interface at this point.
klog.Warningf("ignoring unsupported command received over socket: %s", string(data))
continue
}
// At this point we know there was a request to exit, do not return any more errors,
// just log them.
if rolledBack, err = c.tunedStop(); err != nil {
klog.Errorf("%s", err.Error())
}
resp := make([]byte, 2)
if rolledBack {
// Indicate a successful settings rollback.
resp = append(resp, 'o', 'k')
}
_, err := s.conn.Write(resp)
if err != nil {
klog.Errorf("cannot write a response via %q: %v", openshiftTunedSocket, err)
}
return nil
case <-c.tunedExit:
c.tunedCmd = nil // Cmd.Start() cannot be used more than once
klog.Infof("TuneD process exitted...")
// Do not be too aggressive about keeping the TuneD daemon around.
// TuneD daemon might have exitted after receiving SIGTERM during
// system reboot/shutdown.
return nil
case fsEvent := <-wFs.Events:
klog.V(2).Infof("fsEvent")
if fsEvent.Op&fsnotify.Write == fsnotify.Write {
klog.V(1).Infof("write event on: %s", fsEvent.Name)
c.change.bootcmdline = true
// Notify the event processor that the TuneD daemon calculated new kernel command-line parameters.
c.wqTuneD.Add(wqKey{kind: wqKindDaemon})
}
case err := <-wFs.Errors:
return fmt.Errorf("error watching filesystem: %v", err)
case <-c.tunedTicker.C:
klog.Errorf("timeout (%d) to apply TuneD profile; restarting TuneD daemon", c.tunedTimeout)
err := c.tunedRestart(true)
if err != nil {
return err
}
// TuneD profile application is failing, make this visible in "oc get profile" output.
if err = c.updateTunedProfile(); err != nil {
klog.Error(err.Error())
}
case <-c.changeCh:
var synced bool
klog.V(2).Infof("changeCh")
if c.tunedTimeout > tunedInitialTimeout {
// TuneD is "degraded" as the previous profile application did not succeed in
// tunedInitialTimeout [s]. There has been a change we must act upon though
// fairly quickly.
c.tunedTimeout = tunedInitialTimeout
klog.Infof("previous application of TuneD profile failed; change detected, scheduling full restart in 1s")
c.tunedTicker.Reset(time.Second * time.Duration(1))
c.changeChRet <- true
continue
}
if c.daemon.reloading {
// Do not reload the TuneD daemon unless it finished with the previous reload.
c.changeChRet <- false
continue
}
synced, err := c.changeSyncer()
if err != nil {
return err
}
c.changeChRet <- synced
}
}
}
func retryLoop(c *Controller) (err error) {
const (
errsMax = 5 // the maximum number of consecutive errors within errsMaxWithinSeconds
sleepRetryInit = 10 // the initial retry period [s]
)
var (
errs int
sleepRetry int64 = sleepRetryInit
// sum of the series: S_n = x(1)*(q^n-1)/(q-1) + add 60s for each changeWatcher() call
errsMaxWithinSeconds int64 = (sleepRetry*int64(math.Pow(2, errsMax)) - sleepRetry) + errsMax*60
)
defer func() {
if c.tunedCmd == nil {
return
}
if c.tunedCmd.Process != nil {
if _, err := c.tunedStop(); err != nil {
klog.Errorf("%s", err.Error())
}
} else {
// This should never happen!
klog.Errorf("cannot find the TuneD process!")
}
}()
errsTimeStart := time.Now().Unix()
for {
err = c.changeWatcher()
if err == nil {
return nil
}
select {
case <-c.stopCh:
return err
default:
}
klog.Errorf("%s", err.Error())
sleepRetry *= 2
klog.Infof("increased retry period to %d", sleepRetry)
if errs++; errs >= errsMax {
now := time.Now().Unix()
if (now - errsTimeStart) <= errsMaxWithinSeconds {
klog.Errorf("seen %d errors in %d seconds (limit was %d), terminating...", errs, now-errsTimeStart, errsMaxWithinSeconds)
return err
}
errs = 0
sleepRetry = sleepRetryInit
errsTimeStart = time.Now().Unix()
klog.Infof("initialized retry period to %d", sleepRetry)
}
select {
case <-c.stopCh:
return nil
case <-time.After(time.Second * time.Duration(sleepRetry)):
continue
}
}
}
func Run(stopCh <-chan struct{}, boolVersion *bool, version string) {
klog.Infof("starting %s %s", programName, version)
parseCmdOpts()
if *boolVersion {
fmt.Fprintf(os.Stderr, "%s %s\n", programName, version)
os.Exit(0)
}
err := openshiftTunedPidFileWrite()
if err != nil {
// openshift-tuned PID file is not really used by anything, remove it in the future?
panic(err.Error())
}
c, err := newController(stopCh)
if err != nil {
// This looks really bad, there was an error creating the Controller.
panic(err.Error())
}
err = retryLoop(c)
if err != nil {
panic(err.Error())
}
}
| [
"\"OCP_NODE_NAME\""
] | [] | [
"OCP_NODE_NAME"
] | [] | ["OCP_NODE_NAME"] | go | 1 | 0 | |
srgan-g1d1-with-lr-decay/train.py | # import ipdb
import argparse
import os
import numpy as np
import math
import itertools
import sys
import time
import datetime
import glob
import random
import cv2
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torch.utils.data import Dataset
from torch.optim.lr_scheduler import MultiStepLR
import torchvision
import torchvision.transforms as transforms
#import torchvision.utils as utils
#import torchvision.transforms.functional as F
from torchvision.utils import save_image, make_grid
from torchvision.models import vgg19
from math import log10
from tqdm import tqdm
import pandas as pd
from PIL import Image
from visualize import Visualizer
from torchnet.meter import AverageValueMeter
from models import *
from datasets import *
import pytorch_ssim
parser = argparse.ArgumentParser()
parser.add_argument("--epoch", type=int, default=0, help="epoch to start training from")
parser.add_argument("--n_epochs", type=int, default=100, help="number of epochs of training")
parser.add_argument("--train_dataset_name", type=str, default="train", help="name of the train dataset")
parser.add_argument("--val_dataset_name", type=str, default="val", help="name of the val dataset")
parser.add_argument("--train_batch_size", type=int, default=128, help="size of the train batches")
parser.add_argument("--val_batch_size", type=int, default=1, help="size of the val batches")
parser.add_argument('--generatorLR', type=float, default=0.0002, help='learning rate for generator')
parser.add_argument('--discriminatorLR', type=float, default=0.0002, help='learning rate for discriminator')
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of second order momentum of gradient")
parser.add_argument("--decay_epoch", type=int, default=50, help="start lr decay every decay_epoch epochs")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--hr_height", type=int, default=128, help="high res. image height")
parser.add_argument("--hr_width", type=int, default=128, help="high res. image width")
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
parser.add_argument('--scale_factor', default=4, type=int, choices=[2, 4, 8], help='super resolution scale factor')
parser.add_argument("--g_every", type=int, default=1, help="train the generator every g_every batches")
parser.add_argument("--d_every", type=int, default=1, help="train the discriminator every d_every batches")
parser.add_argument("--plot_every", type=int, default=100, help="plot using visdom every plot_every samples")
parser.add_argument("--save_every", type=int, default=1, help="save the model every save_every epochs")
opt = parser.parse_args()
print(opt)
os.environ["CUDA_VISIBLE_DEVICES"] = "1, 4, 6"
os.makedirs("saved_models", exist_ok=True)
os.makedirs("images", exist_ok=True)
vis = Visualizer('SRGAN')
# cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
hr_shape = (opt.hr_height, opt.hr_width)
# Initialize generator and discriminator
generator = GeneratorResNet(in_channels=opt.channels, out_channels=opt.channels, n_residual_blocks=16) # change
discriminator = Discriminator(input_shape=(opt.channels, *hr_shape))
feature_extractor = FeatureExtractor()
print('# generator parameters:', sum(param.numel() for param in generator.parameters())) # change
print('# discriminator parameters:', sum(param.numel() for param in discriminator.parameters())) # change
print('# feature_extractor parameters:', sum(param.numel() for param in feature_extractor.parameters())) # change
# print (generator)
# print (discriminator)
# print (feature_extractor)
# Set feature extractor to inference mode
feature_extractor.eval()
# Losses
criterion_GAN = torch.nn.MSELoss(reduction='none')
criterion_content = torch.nn.L1Loss(reduction='none')
# Configure model
generator = nn.DataParallel(generator, device_ids=[0, 1, 2])
generator.to(device)
discriminator = nn.DataParallel(discriminator, device_ids=[0, 1, 2])
discriminator.to(device)
feature_extractor = nn.DataParallel(feature_extractor, device_ids=[0, 1, 2])
feature_extractor.to(device)
# criterion_GAN = nn.DataParallel(criterion_GAN, device_ids=[0, 1, 2])
# criterion_GAN = criterion_GAN.to(device)
# criterion_content = nn.DataParallel(criterion_content, device_ids=[0, 1, 2])
# criterion_content = criterion_content.to(device)
if opt.epoch != 0:
# Load pretrained models
generator.load_state_dict(torch.load("saved_models/generator_%d_%d.pth" % (opt.scale_factor,opt.epoch)))
discriminator.load_state_dict(torch.load("saved_models/discriminator_%d_%d.pth" % (opt.scale_factor,opt.epoch)))
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.generatorLR, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.discriminatorLR, betas=(opt.b1, opt.b2))
scheduler_G = MultiStepLR(optimizer_G, milestones=[opt.decay_epoch], gamma=0.1)
scheduler_D = MultiStepLR(optimizer_D, milestones=[opt.decay_epoch], gamma=0.1)
# Configure data loader
train_dataloader = DataLoader(
TrainImageDataset("../../Datasets/My_dataset/single_channel_100000/%s" % opt.train_dataset_name, hr_shape=hr_shape, scale_factor = opt.scale_factor), # change
batch_size=opt.train_batch_size,
shuffle=True,
num_workers=opt.n_cpu,
)
val_dataloader = DataLoader(
ValImageDataset("../../Datasets/My_dataset/single_channel_100000/%s" % opt.val_dataset_name, hr_shape=hr_shape, scale_factor = opt.scale_factor), # change
batch_size=opt.val_batch_size,
shuffle=False,
num_workers=opt.n_cpu,
)
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.Tensor
loss_GAN_meter = AverageValueMeter()
loss_content_meter = AverageValueMeter()
loss_G_meter= AverageValueMeter()
loss_real_meter = AverageValueMeter()
loss_fake_meter = AverageValueMeter()
loss_D_meter = AverageValueMeter()
# ----------
# Training
# ----------
results = {'loss_G': [], 'loss_D': [], 'loss_GAN': [],'loss_content': [], 'loss_real': [], 'loss_fake': [], 'psnr': [], 'ssim': []}
epoch_start = time.time()
for epoch in range(opt.epoch, opt.n_epochs):
training_results = {'batch_sizes': 0, 'loss_G': 0, 'loss_D': 0, 'loss_GAN': 0, 'loss_content': 0, 'loss_real': 0, 'loss_fake': 0}
generator.train()
discriminator.train()
training_out_path = 'training_results/SR_factor_' + str(opt.scale_factor) + '/' + 'epoch_' + str(epoch) + '/'
os.makedirs(training_out_path, exist_ok=True)
for i, imgs in enumerate(train_dataloader):
start = time.time()
training_results['batch_sizes'] += opt.train_batch_size
# Configure model input
imgs_lr = Variable(imgs["lr"].type(Tensor))
imgs_hr = Variable(imgs["hr"].type(Tensor))
# Adversarial ground truths
valid = Variable(Tensor(np.ones((imgs_lr.size(0), *discriminator.module.output_shape))), requires_grad=False)
fake = Variable(Tensor(np.zeros((imgs_lr.size(0), *discriminator.module.output_shape))), requires_grad=False)
# ------------------
# Train Generators
# ------------------
if i % opt.g_every == 0:
optimizer_G.zero_grad()
# Generate a high resolution image from low resolution input
gen_hr = generator(imgs_lr)
# Adversarial loss
loss_GAN = criterion_GAN(discriminator(gen_hr), valid)
loss_GAN = loss_GAN.mean()
# Content loss
gen_features = feature_extractor(gen_hr)
real_features = feature_extractor(imgs_hr)
loss_content = criterion_content(gen_features, real_features.detach())
loss_content = loss_content.mean()
# Total loss
loss_G = loss_content + 1e-3 * loss_GAN
loss_G = loss_G.mean()
loss_G.backward(torch.ones_like(loss_G))
optimizer_G.step()
scheduler_G.step()
loss_GAN_meter.add(loss_GAN.item())
loss_content_meter.add(loss_content.item())
loss_G_meter.add(loss_G.item())
# ---------------------
# Train Discriminator
# ---------------------
if i % opt.d_every == 0:
optimizer_D.zero_grad()
# Loss of real and fake images
loss_real = criterion_GAN(discriminator(imgs_hr), valid)
loss_fake = criterion_GAN(discriminator(gen_hr.detach()), fake)
loss_real = loss_real.mean()
loss_fake = loss_fake.mean()
# Total loss
loss_D = (loss_real + loss_fake) / 2
loss_D = loss_D.mean()
loss_D.backward(torch.ones_like(loss_D))
optimizer_D.step()
scheduler_D.step()
loss_real_meter.add(loss_real.item())
loss_fake_meter.add(loss_fake.item())
loss_D_meter.add(loss_D.item())
# --------------
# Log Progress
# --------------
# loss for current batch before optimization
training_results['loss_G'] += loss_G.item() * opt.train_batch_size
training_results['loss_D'] += loss_D.item() * opt.train_batch_size
training_results['loss_GAN'] += loss_GAN.item() * opt.train_batch_size
training_results['loss_content'] += loss_content.item() * opt.train_batch_size
training_results['loss_real'] += loss_real.item() * opt.train_batch_size
training_results['loss_fake'] += loss_fake.item() * opt.train_batch_size
batch_time = time.time() - start
print('[Epoch %d/%d] [Batch %d/%d] [loss_G: %.4f] [loss_D: %.4f] [loss_GAN: %.4f] [loss_content: %.4f] [loss_real: %.4f] [loss_fake: %.4f] [batch time: %.4fs]' % (
epoch, opt.n_epochs, i, len(train_dataloader), training_results['loss_G'] / training_results['batch_sizes'],
training_results['loss_D'] / training_results['batch_sizes'],
training_results['loss_GAN'] / training_results['batch_sizes'],
training_results['loss_real'] / training_results['batch_sizes'],
training_results['loss_content'] / training_results['batch_sizes'],
training_results['loss_fake'] / training_results['batch_sizes'],
batch_time))
# Save training image and plot loss
batches_done = epoch * len(train_dataloader) + i
if batches_done % opt.plot_every == 0:
# imgs_lr = nn.functional.interpolate(imgs_lr, scale_factor=opt.scale_factor)
# gen_hr = make_grid(gen_hr, nrow=8, normalize=True)
# imgs_lr = make_grid(imgs_lr, nrow=8, normalize=True)
# img_grid = torch.cat((imgs_lr, gen_hr), -1)
# save_image(img_grid, "images/%d.png" % batches_done, normalize=True)
imgs_lr = nn.functional.interpolate(imgs_lr, scale_factor=opt.scale_factor)
training_out_imgs_lr_path = training_out_path + "imgs_lr/"
training_out_imgs_hr_path = training_out_path + "imgs_hr/"
training_out_gen_hr_path = training_out_path + "gen_hr/"
os.makedirs(training_out_imgs_lr_path, exist_ok=True)
os.makedirs(training_out_imgs_hr_path, exist_ok=True)
os.makedirs(training_out_gen_hr_path, exist_ok=True)
# save_image(imgs_lr.detach()[:1], training_out_imgs_lr_path + "imgs_lr_%d.png" % batches_done, normalize=True)
# save_image(imgs_hr.data[:1], training_out_imgs_hr_path + "imgs_hr_%d.png" % batches_done, normalize=True)
# save_image(gen_hr.data[:1], training_out_gen_hr_path + "gen_hr_%d.png" % batches_done, normalize=True)
save_image(imgs_lr[:1], training_out_imgs_lr_path + "imgs_lr_%d.png" % batches_done, normalize=True)
save_image(imgs_hr[:1], training_out_imgs_hr_path + "imgs_hr_%d.png" % batches_done, normalize=True)
save_image(gen_hr[:1], training_out_gen_hr_path + "gen_hr_%d.png" % batches_done, normalize=True)
gen_hr = make_grid(gen_hr, nrow=8, normalize=True)
imgs_lr = make_grid(imgs_lr, nrow=8, normalize=True)
imgs_hr = make_grid(imgs_hr, nrow=8, normalize=True)
img_grid_gl = torch.cat((gen_hr, imgs_lr), -1)
img_grid_hg = torch.cat((imgs_hr, gen_hr), -1)
save_image(img_grid_hg, "images/%d_hg.png" % batches_done, normalize=True)
save_image(img_grid_gl, "images/%d_gl.png" % batches_done, normalize=True)
# vis.images(imgs_lr.detach().cpu().numpy()[:1] * 0.5 + 0.5, win='imgs_lr_train')
# vis.images(gen_hr.data.cpu().numpy()[:1] * 0.5 + 0.5, win='img_gen_train')
# vis.images(imgs_hr.data.cpu().numpy()[:1] * 0.5 + 0.5, win='img_hr_train')
vis.plot('loss_G_train', loss_G_meter.value()[0])
vis.plot('loss_D_train', loss_D_meter.value()[0])
vis.plot('loss_GAN_train', loss_GAN_meter.value()[0])
vis.plot('loss_content_train', loss_content_meter.value()[0])
vis.plot('loss_real_train', loss_real_meter.value()[0])
vis.plot('loss_fake_train', loss_fake_meter.value()[0])
loss_GAN_meter.reset()
loss_content_meter.reset()
loss_G_meter.reset()
loss_real_meter.reset()
loss_fake_meter.reset()
loss_D_meter.reset()
# validate the generator model
generator.eval()
valing_out_path = 'valing_results/SR_factor_' + str(opt.scale_factor) + '/' + 'epoch_' + str(epoch) + '/'
os.makedirs(valing_out_path, exist_ok=True)
with torch.no_grad():
# val_bar = tqdm(val_dataloader)
valing_results = {'mse': 0, 'ssims': 0, 'psnr': 0, 'ssim': 0, 'batch_sizes': 0}
val_images = []
for i, imgs in enumerate(val_dataloader):
start = time.time()
valing_results['batch_sizes'] += opt.val_batch_size
# Configure model input
img_lr, img_hr, img_hr_restore = imgs
imgs_lr = Variable(imgs["lr"].type(Tensor))
imgs_hr = Variable(imgs["hr"].type(Tensor))
img_hr_restore = Variable(imgs["hr_restore"].type(Tensor))
gen_hr = generator(imgs_lr)
batch_mse = ((gen_hr - imgs_hr) ** 2).data.mean()
valing_results['mse'] += batch_mse * opt.val_batch_size
batch_ssim = pytorch_ssim.ssim(gen_hr, imgs_hr).item()
valing_results['ssims'] += batch_ssim * opt.val_batch_size
valing_results['psnr'] = 10 * log10(1 / (valing_results['mse'] / valing_results['batch_sizes']))
valing_results['ssim'] = valing_results['ssims'] / valing_results['batch_sizes']
# val_bar.set_description(desc='[converting LR images to SR images] PSNR: %.4f dB SSIM: %.4f' % (valing_results['psnr'], valing_results['ssim']), refresh=True)
print('[converting LR images to SR images] PSNR: %.4f dB SSIM: %.4f' % (valing_results['psnr'], valing_results['ssim']))
val_images.extend(
[imgs_hr.data.cpu().squeeze(0), gen_hr.data.cpu().squeeze(0),
img_hr_restore.data.cpu().squeeze(0)])
val_images = torch.stack(val_images) # 将list重新堆成4维张量
# val_images = torch.chunk(val_images, val_images.size(0) // 15) # 若验证集大小为3000,则3000=15*200,15=3*5,生成的每张图片中共有15张子图
val_images = torch.chunk(val_images, val_images.size(0) // 3)
val_save_bar = tqdm(val_images, desc='[saving training results]')
index = 1
for image in val_save_bar:
image = make_grid(image, nrow=3, padding=5, normalize=True)
save_image(image, valing_out_path + 'epoch_%d_index_%d.png' % (epoch, index), padding=5, normalize=True)
index += 1
# save loss\scores\psnr\ssim and visualize
results['loss_G'].append(training_results['loss_G'] / training_results['batch_sizes'])
results['loss_D'].append(training_results['loss_D'] / training_results['batch_sizes'])
results['loss_GAN'].append(training_results['loss_GAN'] / training_results['batch_sizes'])
results['loss_content'].append(training_results['loss_content'] / training_results['batch_sizes'])
results['loss_real'].append(training_results['loss_real'] / training_results['batch_sizes'])
results['loss_fake'].append(training_results['loss_fake'] / training_results['batch_sizes'])
results['psnr'].append(valing_results['psnr'])
results['ssim'].append(valing_results['ssim'])
vis.plot('loss_G_epoch', results['loss_G'][epoch])
vis.plot('loss_D_epoch', results['loss_D'][epoch])
vis.plot('loss_GAN_epoch', results['loss_GAN'][epoch])
vis.plot('loss_content_epoch', results['loss_content'][epoch])
vis.plot('loss_real_epoch', results['loss_real'][epoch])
vis.plot('loss_fake_epoch', results['loss_fake'][epoch])
vis.plot('psnr_epoch', results['psnr'][epoch])
vis.plot('ssim_epoch', results['ssim'][epoch])
# save model parameters
data_out_path = './statistics/'
os.makedirs(data_out_path, exist_ok=True)
if epoch % opt.save_every == 0:
# save_image(gen_hr.data[:16], 'images/%s.png' % epoch, normalize=True,range=(-1, 1))
torch.save(generator.state_dict(), "saved_models/generator_%d_%d.pth" % (opt.scale_factor,epoch))
torch.save(discriminator.state_dict(), "saved_models/discriminator_%d_%d.pth" % (opt.scale_factor,epoch))
data_frame = pd.DataFrame(
data={'loss_G': results['loss_G'], 'loss_D': results['loss_D'],
'loss_GAN': results['loss_GAN'], 'loss_content': results['loss_content'],
'loss_real': results['loss_real'], 'loss_fake': results['loss_fake'],
'PSNR': results['psnr'], 'SSIM': results['ssim']},
# index=range(0, epoch)
index=None
)
data_frame.to_csv(data_out_path + 'SR_factor_' + str(opt.scale_factor) + '_train_results.csv', index_label='Epoch')
elapse_time = time.time() - epoch_start
elapse_time = datetime.timedelta(seconds=elapse_time)
print("Training and validating time {}".format(elapse_time)) | [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
gpaw/examples/taito-1.4.0/setup/customize-taito.py | # User provided customizations for the gpaw setup
import os
# compiler
compiler = './gcc.py'
mpicompiler = './gcc.py'
mpilinker = 'mpicc'
extra_compile_args = ['-std=c99', '-O3', '-fopenmp-simd']
# libz
libraries = ['z']
# libxc
library_dirs += [os.environ['LIBXCDIR'] + '/lib']
include_dirs += [os.environ['LIBXCDIR'] + '/include']
libraries += ['xc']
# MKL
libraries += ['mkl_intel_lp64' ,'mkl_sequential' ,'mkl_core']
mpi_libraries += ['mkl_scalapack_lp64', 'mkl_blacs_intelmpi_lp64']
# use ScaLAPACK and HDF5
scalapack = True
hdf5 = True
libraries += ['hdf5']
library_dirs += [os.environ['H5ROOT'] + '/lib']
include_dirs += [os.environ['H5ROOT'] + '/include']
# GPAW defines
define_macros += [('GPAW_NO_UNDERSCORE_CBLACS', '1')]
define_macros += [('GPAW_NO_UNDERSCORE_CSCALAPACK', '1')]
define_macros += [("GPAW_ASYNC",1)]
define_macros += [("GPAW_MPI2",1)]
| [] | [] | [
"LIBXCDIR",
"H5ROOT"
] | [] | ["LIBXCDIR", "H5ROOT"] | python | 2 | 0 | |
api/versions_test.go | package api
import (
"bytes"
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"os"
"strconv"
"strings"
"testing"
"time"
errs "github.com/ONSdigital/dp-dataset-api/apierrors"
"github.com/ONSdigital/dp-dataset-api/config"
"github.com/ONSdigital/dp-dataset-api/mocks"
"github.com/ONSdigital/dp-dataset-api/models"
storetest "github.com/ONSdigital/dp-dataset-api/store/datastoretest"
"github.com/ONSdigital/log.go/log"
"github.com/pkg/errors"
. "github.com/smartystreets/goconvey/convey"
)
const (
versionPayload = `{"instance_id":"a1b2c3","edition":"2017","license":"ONS","release_date":"2017-04-04"}`
versionAssociatedPayload = `{"instance_id":"a1b2c3","edition":"2017","license":"ONS","release_date":"2017-04-04","state":"associated","collection_id":"12345"}`
versionPublishedPayload = `{"instance_id":"a1b2c3","edition":"2017","license":"ONS","release_date":"2017-04-04","state":"published","collection_id":"12345"}`
)
func TestGetVersionsReturnsOK(t *testing.T) {
t.Parallel()
Convey("A successful request to get version returns 200 OK response", t, func() {
r := httptest.NewRequest("GET", "http://localhost:22000/datasets/123-456/editions/678/versions", nil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(datasetID, state string) error {
return nil
},
CheckEditionExistsFunc: func(datasetID, editionID, state string) error {
return nil
},
GetVersionsFunc: func(ctx context.Context, datasetID, editionID, state string) (*models.VersionResults, error) {
return &models.VersionResults{}, nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionsCalls()), ShouldEqual, 1)
})
}
func TestGetVersionsReturnsError(t *testing.T) {
t.Parallel()
Convey("When the api cannot connect to datastore return an internal server error", t, func() {
r := httptest.NewRequest("GET", "http://localhost:22000/datasets/123-456/editions/678/versions", nil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(datasetID, state string) error {
return errs.ErrInternalServer
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
assertInternalServerErr(w)
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 0)
So(len(mockedDataStore.GetVersionsCalls()), ShouldEqual, 0)
})
Convey("When the dataset does not exist return status not found", t, func() {
r := httptest.NewRequest("GET", "http://localhost:22000/datasets/123-456/editions/678/versions", nil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(datasetID, state string) error {
return errs.ErrDatasetNotFound
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldContainSubstring, errs.ErrDatasetNotFound.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 0)
So(len(mockedDataStore.GetVersionsCalls()), ShouldEqual, 0)
})
Convey("When the edition of a dataset does not exist return status not found", t, func() {
r := httptest.NewRequest("GET", "http://localhost:22000/datasets/123-456/editions/678/versions", nil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(datasetID, state string) error {
return nil
},
CheckEditionExistsFunc: func(datasetID, editionID, state string) error {
return errs.ErrEditionNotFound
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldContainSubstring, errs.ErrEditionNotFound.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionsCalls()), ShouldEqual, 0)
})
Convey("When version does not exist for an edition of a dataset returns status not found", t, func() {
r := httptest.NewRequest("GET", "http://localhost:22000/datasets/123-456/editions/678/versions", nil)
r.Header.Add("internal_token", "coffee")
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(datasetID, state string) error {
return nil
},
CheckEditionExistsFunc: func(datasetID, editionID, state string) error {
return nil
},
GetVersionsFunc: func(ctx context.Context, datasetID, editionID, state string) (*models.VersionResults, error) {
return nil, errs.ErrVersionNotFound
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldContainSubstring, errs.ErrVersionNotFound.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionsCalls()), ShouldEqual, 1)
})
Convey("When version is not published against an edition of a dataset return status not found", t, func() {
r := httptest.NewRequest("GET", "http://localhost:22000/datasets/123-456/editions/678/versions", nil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(datasetID, state string) error {
return nil
},
CheckEditionExistsFunc: func(datasetID, editionID, state string) error {
return nil
},
GetVersionsFunc: func(ctx context.Context, datasetID, editionID, state string) (*models.VersionResults, error) {
return nil, errs.ErrVersionNotFound
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldContainSubstring, errs.ErrVersionNotFound.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionsCalls()), ShouldEqual, 1)
})
Convey("When a published version has an incorrect state for an edition of a dataset return an internal error", t, func() {
r := httptest.NewRequest("GET", "http://localhost:22000/datasets/123-456/editions/678/versions", nil)
w := httptest.NewRecorder()
version := models.Version{State: "gobbly-gook"}
items := []models.Version{version}
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(datasetID, state string) error {
return nil
},
CheckEditionExistsFunc: func(datasetID, editionID, state string) error {
return nil
},
GetVersionsFunc: func(ctx context.Context, datasetID, editionID, state string) (*models.VersionResults, error) {
return &models.VersionResults{Items: items}, nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(w.Body.String(), ShouldContainSubstring, errs.ErrResourceState.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionsCalls()), ShouldEqual, 1)
})
}
func TestGetVersionReturnsOK(t *testing.T) {
t.Parallel()
Convey("A successful request to get version returns 200 OK response", t, func() {
r := httptest.NewRequest("GET", "http://localhost:22000/datasets/123-456/editions/678/versions/1", nil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(datasetID, state string) error {
return nil
},
CheckEditionExistsFunc: func(datasetID, editionID, state string) error {
return nil
},
GetVersionFunc: func(datasetID, editionID, version, state string) (*models.Version, error) {
return &models.Version{
State: models.EditionConfirmedState,
Links: &models.VersionLinks{
Self: &models.LinkObject{},
Version: &models.LinkObject{
HRef: "href",
},
},
}, nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
})
}
func TestGetVersionReturnsError(t *testing.T) {
t.Parallel()
Convey("When the api cannot connect to datastore return an internal server error", t, func() {
r := httptest.NewRequest("GET", "http://localhost:22000/datasets/123-456/editions/678/versions/1", nil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(datasetID, state string) error {
return errs.ErrInternalServer
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
assertInternalServerErr(w)
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
})
Convey("When the dataset does not exist for return status not found", t, func() {
r := httptest.NewRequest("GET", "http://localhost:22000/datasets/123-456/editions/678/versions/1", nil)
r.Header.Add("internal_token", "coffee")
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(datasetID, state string) error {
return errs.ErrDatasetNotFound
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldContainSubstring, errs.ErrDatasetNotFound.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 0)
})
Convey("When the edition of a dataset does not exist return status not found", t, func() {
r := httptest.NewRequest("GET", "http://localhost:22000/datasets/123-456/editions/678/versions/1", nil)
r.Header.Add("internal_token", "coffee")
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(datasetID, state string) error {
return nil
},
CheckEditionExistsFunc: func(datasetID, editionID, state string) error {
return errs.ErrEditionNotFound
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldContainSubstring, errs.ErrEditionNotFound.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 0)
})
Convey("When version does not exist for an edition of a dataset return status not found", t, func() {
r := httptest.NewRequest("GET", "http://localhost:22000/datasets/123-456/editions/678/versions/1", nil)
r.Header.Add("internal_token", "coffee")
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(datasetID, state string) error {
return nil
},
CheckEditionExistsFunc: func(datasetID, editionID, state string) error {
return nil
},
GetVersionFunc: func(datasetID, editionID, version, state string) (*models.Version, error) {
return nil, errs.ErrVersionNotFound
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldContainSubstring, errs.ErrVersionNotFound.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
})
Convey("When version is not published for an edition of a dataset return status not found", t, func() {
r := httptest.NewRequest("GET", "http://localhost:22000/datasets/123-456/editions/678/versions/1", nil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(datasetID, state string) error {
return nil
},
CheckEditionExistsFunc: func(datasetID, editionID, state string) error {
return nil
},
GetVersionFunc: func(datasetID, editionID, version, state string) (*models.Version, error) {
return nil, errs.ErrVersionNotFound
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldContainSubstring, errs.ErrVersionNotFound.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
})
Convey("When an unpublished version has an incorrect state for an edition of a dataset return an internal error", t, func() {
r := httptest.NewRequest("GET", "http://localhost:22000/datasets/123-456/editions/678/versions/1", nil)
r.Header.Add("internal_token", "coffee")
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
CheckDatasetExistsFunc: func(datasetID, state string) error {
return nil
},
CheckEditionExistsFunc: func(datasetID, editionID, state string) error {
return nil
},
GetVersionFunc: func(datasetID, editionID, version, state string) (*models.Version, error) {
return &models.Version{
State: "gobbly-gook",
Links: &models.VersionLinks{
Self: &models.LinkObject{},
Version: &models.LinkObject{
HRef: "href",
},
},
}, nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(w.Body.String(), ShouldContainSubstring, errs.ErrResourceState.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.CheckDatasetExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
})
}
func TestPutVersionReturnsSuccessfully(t *testing.T) {
t.Parallel()
Convey("When state is unchanged", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
var b string
b = versionPayload
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetDatasetFunc: func(datasetID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{}, nil
},
CheckEditionExistsFunc: func(string, string, string) error {
return nil
},
GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
return &models.Version{
ID: "789",
Links: &models.VersionLinks{
Dataset: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123",
ID: "123",
},
Dimensions: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017/versions/1/dimensions",
},
Edition: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017",
ID: "456",
},
Self: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017/versions/1",
},
},
ReleaseDate: "2017-12-12",
State: models.EditionConfirmedState,
}, nil
},
UpdateVersionFunc: func(string, *models.Version) error {
return nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 2)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpsertEditionCalls()), ShouldEqual, 0)
So(len(mockedDataStore.SetInstanceIsPublishedCalls()), ShouldEqual, 0)
So(len(mockedDataStore.UpsertDatasetCalls()), ShouldEqual, 0)
So(len(mockedDataStore.UpdateDatasetWithAssociationCalls()), ShouldEqual, 0)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
Convey("then the request body has been drained", func() {
_, err = r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
Convey("When state is set to associated", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
var b string
b = versionAssociatedPayload
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetDatasetFunc: func(datasetID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{}, nil
},
CheckEditionExistsFunc: func(string, string, string) error {
return nil
},
GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
return &models.Version{
State: models.AssociatedState,
}, nil
},
UpdateVersionFunc: func(string, *models.Version) error {
return nil
},
UpdateDatasetWithAssociationFunc: func(string, string, *models.Version) error {
return nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 2)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateDatasetWithAssociationCalls()), ShouldEqual, 0)
So(len(mockedDataStore.UpsertEditionCalls()), ShouldEqual, 0)
So(len(mockedDataStore.SetInstanceIsPublishedCalls()), ShouldEqual, 0)
So(len(mockedDataStore.UpsertDatasetCalls()), ShouldEqual, 0)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
Convey("then the request body has been drained", func() {
_, err = r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
Convey("When state is set to edition-confirmed", t, func() {
downloadsGenerated := make(chan bool, 1)
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
downloadsGenerated <- true
return nil
},
}
var b string
b = versionAssociatedPayload
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetDatasetFunc: func(datasetID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{}, nil
},
CheckEditionExistsFunc: func(string, string, string) error {
return nil
},
GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
return &models.Version{
State: models.EditionConfirmedState,
}, nil
},
UpdateVersionFunc: func(string, *models.Version) error {
return nil
},
UpdateDatasetWithAssociationFunc: func(string, string, *models.Version) error {
return nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
ctx := context.Background()
select {
case <-downloadsGenerated:
log.Event(ctx, "download generated as expected", log.INFO)
case <-time.After(time.Second * 10):
err := errors.New("failing test due to timeout")
log.Event(ctx, "timed out", log.ERROR, log.Error(err))
t.Fail()
}
So(w.Code, ShouldEqual, http.StatusOK)
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 2)
So(len(mockedDataStore.UpdateDatasetWithAssociationCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpsertEditionCalls()), ShouldEqual, 0)
So(len(mockedDataStore.SetInstanceIsPublishedCalls()), ShouldEqual, 0)
So(len(mockedDataStore.UpsertDatasetCalls()), ShouldEqual, 0)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 1)
Convey("then the request body has been drained", func() {
_, err = r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
Convey("When state is set to published", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
var b string
b = versionPublishedPayload
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
CheckEditionExistsFunc: func(string, string, string) error {
return nil
},
GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
return &models.Version{
ID: "789",
Links: &models.VersionLinks{
Dataset: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123",
ID: "123",
},
Dimensions: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017/versions/1/dimensions",
},
Edition: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017",
ID: "2017",
},
Self: &models.LinkObject{
HRef: "http://localhost:22000/instances/765",
},
Version: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017/versions/1",
ID: "1",
},
},
ReleaseDate: "2017-12-12",
Downloads: &models.DownloadList{
CSV: &models.DownloadObject{
Private: "s3://csv-exported/myfile.csv",
HRef: "http://localhost:23600/datasets/123/editions/2017/versions/1.csv",
Size: "1234",
},
},
State: models.EditionConfirmedState,
}, nil
},
UpdateVersionFunc: func(string, *models.Version) error {
return nil
},
GetDatasetFunc: func(string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{
ID: "123",
Next: &models.Dataset{Links: &models.DatasetLinks{}},
Current: &models.Dataset{Links: &models.DatasetLinks{}},
}, nil
},
UpsertDatasetFunc: func(string, *models.DatasetUpdate) error {
return nil
},
GetEditionFunc: func(string, string, string) (*models.EditionUpdate, error) {
return &models.EditionUpdate{
ID: "123",
Next: &models.Edition{
State: models.PublishedState,
Links: &models.EditionUpdateLinks{
Self: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017",
},
LatestVersion: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017/versions/1",
ID: "1",
},
},
},
Current: &models.Edition{},
}, nil
},
UpsertEditionFunc: func(string, string, *models.EditionUpdate) error {
return nil
},
SetInstanceIsPublishedFunc: func(ctx context.Context, instanceID string) error {
return nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 2)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpsertEditionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpsertDatasetCalls()), ShouldEqual, 1)
So(len(mockedDataStore.SetInstanceIsPublishedCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateDatasetWithAssociationCalls()), ShouldEqual, 0)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 1)
Convey("then the request body has been drained", func() {
_, err = r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
Convey("When version is already published and update includes downloads object only", t, func() {
Convey("And downloads object contains only a csv object", func() {
var b string
b = `{"downloads": { "csv": { "public": "http://cmd-dev/test-site/cpih01", "size": "12", "href": "http://localhost:8080/cpih01"}}}`
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
So(err, ShouldBeNil)
updateVersionDownloadTest(r)
Convey("then the request body has been drained", func() {
_, err := r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
Convey("And downloads object contains only a xls object", func() {
var b string
b = `{"downloads": { "xls": { "public": "http://cmd-dev/test-site/cpih01", "size": "12", "href": "http://localhost:8080/cpih01"}}}`
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
So(err, ShouldBeNil)
updateVersionDownloadTest(r)
Convey("then the request body has been drained", func() {
_, err = r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
})
}
func updateVersionDownloadTest(r *http.Request) {
w := httptest.NewRecorder()
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
mockedDataStore := &storetest.StorerMock{
GetDatasetFunc: func(string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{
ID: "123",
Next: &models.Dataset{Links: &models.DatasetLinks{}},
Current: &models.Dataset{Links: &models.DatasetLinks{}},
}, nil
},
CheckEditionExistsFunc: func(string, string, string) error {
return nil
},
GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
return &models.Version{
ID: "789",
Links: &models.VersionLinks{
Dataset: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123",
ID: "123",
},
Dimensions: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017/versions/1/dimensions",
},
Edition: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017",
ID: "2017",
},
Self: &models.LinkObject{
HRef: "http://localhost:22000/instances/765",
},
Version: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017/versions/1",
},
},
ReleaseDate: "2017-12-12",
Downloads: &models.DownloadList{
CSV: &models.DownloadObject{
Private: "s3://csv-exported/myfile.csv",
HRef: "http://localhost:23600/datasets/123/editions/2017/versions/1.csv",
Size: "1234",
},
},
State: models.PublishedState,
}, nil
},
UpdateVersionFunc: func(string, *models.Version) error {
return nil
},
GetEditionFunc: func(string, string, string) (*models.EditionUpdate, error) {
return &models.EditionUpdate{
ID: "123",
Next: &models.Edition{
State: models.PublishedState,
},
Current: &models.Edition{},
}, nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 2)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
// Check updates to edition and dataset resources were not called
So(len(mockedDataStore.UpsertEditionCalls()), ShouldEqual, 0)
So(len(mockedDataStore.UpsertDatasetCalls()), ShouldEqual, 0)
So(len(mockedDataStore.UpdateDatasetWithAssociationCalls()), ShouldEqual, 0)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
}
func TestPutVersionGenerateDownloadsError(t *testing.T) {
Convey("given download generator returns an error", t, func() {
mockedErr := errors.New("spectacular explosion")
var v models.Version
json.Unmarshal([]byte(versionAssociatedPayload), &v)
v.State = models.EditionConfirmedState
mockedDataStore := &storetest.StorerMock{
GetVersionFunc: func(datasetID string, editionID string, version string, state string) (*models.Version, error) {
return &v, nil
},
GetDatasetFunc: func(datasetID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{}, nil
},
CheckEditionExistsFunc: func(ID string, editionID string, state string) error {
return nil
},
UpdateVersionFunc: func(ID string, version *models.Version) error {
return nil
},
UpdateDatasetWithAssociationFunc: func(ID string, state string, version *models.Version) error {
return nil
},
}
mockDownloadGenerator := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return mockedErr
},
}
Convey("when put version is called with a valid request", func() {
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(versionAssociatedPayload))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
cfg, err := config.Get()
So(err, ShouldBeNil)
cfg.EnablePrivateEndpoints = true
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, mockDownloadGenerator, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
Convey("then an internal server error response is returned", func() {
So(w.Code, ShouldEqual, http.StatusInternalServerError)
})
Convey("and the expected store calls are made with the expected parameters", func() {
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
genCalls := mockDownloadGenerator.GenerateCalls()
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
So(mockedDataStore.GetDatasetCalls()[0].ID, ShouldEqual, "123")
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(mockedDataStore.CheckEditionExistsCalls()[0].ID, ShouldEqual, "123")
So(mockedDataStore.CheckEditionExistsCalls()[0].EditionID, ShouldEqual, "2017")
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 2)
So(mockedDataStore.GetVersionCalls()[0].DatasetID, ShouldEqual, "123")
So(mockedDataStore.GetVersionCalls()[0].EditionID, ShouldEqual, "2017")
So(mockedDataStore.GetVersionCalls()[0].Version, ShouldEqual, "1")
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
So(len(genCalls), ShouldEqual, 1)
So(genCalls[0].DatasetID, ShouldEqual, "123")
So(genCalls[0].Edition, ShouldEqual, "2017")
So(genCalls[0].Version, ShouldEqual, "1")
})
Convey("then the request body has been drained", func() {
_, err = r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
})
}
func TestPutEmptyVersion(t *testing.T) {
var v models.Version
json.Unmarshal([]byte(versionAssociatedPayload), &v)
v.State = models.AssociatedState
xlsDownload := &models.DownloadList{XLS: &models.DownloadObject{Size: "1", HRef: "/hello"}}
Convey("given an existing version with empty downloads", t, func() {
mockedDataStore := &storetest.StorerMock{
GetVersionFunc: func(datasetID string, editionID string, version string, state string) (*models.Version, error) {
return &v, nil
},
GetDatasetFunc: func(datasetID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{}, nil
},
CheckEditionExistsFunc: func(ID string, editionID string, state string) error {
return nil
},
UpdateVersionFunc: func(ID string, version *models.Version) error {
return nil
},
}
Convey("when put version is called with an associated version with empty downloads", func() {
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(versionAssociatedPayload))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
Convey("then a http status ok is returned", func() {
So(w.Code, ShouldEqual, http.StatusOK)
})
Convey("and the updated version is as expected", func() {
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 2)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
So(mockedDataStore.UpdateVersionCalls()[0].Version.Downloads, ShouldBeNil)
})
})
})
Convey("given an existing version with a xls download already exists", t, func() {
mockedDataStore := &storetest.StorerMock{
GetVersionFunc: func(datasetID string, editionID string, version string, state string) (*models.Version, error) {
v.Downloads = xlsDownload
return &v, nil
},
GetDatasetFunc: func(datasetID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{}, nil
},
CheckEditionExistsFunc: func(ID string, editionID string, state string) error {
return nil
},
UpdateVersionFunc: func(ID string, version *models.Version) error {
return nil
},
}
mockDownloadGenerator := &mocks.DownloadsGeneratorMock{}
Convey("when put version is called with an associated version with empty downloads", func() {
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(versionAssociatedPayload))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, &mocks.DownloadsGeneratorMock{}, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
Convey("then a http status ok is returned", func() {
So(w.Code, ShouldEqual, http.StatusOK)
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
})
Convey("and any existing version downloads are not overwritten", func() {
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
So(mockedDataStore.UpdateVersionCalls()[0].Version.Downloads, ShouldResemble, xlsDownload)
})
Convey("and the expected external calls are made with the correct parameters", func() {
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
So(mockedDataStore.GetDatasetCalls()[0].ID, ShouldEqual, "123")
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(mockedDataStore.CheckEditionExistsCalls()[0].ID, ShouldEqual, "123")
So(mockedDataStore.CheckEditionExistsCalls()[0].EditionID, ShouldEqual, "2017")
So(mockedDataStore.CheckEditionExistsCalls()[0].State, ShouldEqual, "")
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 2)
So(mockedDataStore.GetVersionCalls()[0].DatasetID, ShouldEqual, "123")
So(mockedDataStore.GetVersionCalls()[0].EditionID, ShouldEqual, "2017")
So(mockedDataStore.GetVersionCalls()[0].Version, ShouldEqual, "1")
So(mockedDataStore.GetVersionCalls()[0].State, ShouldEqual, "")
So(len(mockedDataStore.UpsertEditionCalls()), ShouldEqual, 0)
So(len(mockedDataStore.UpdateDatasetWithAssociationCalls()), ShouldEqual, 0)
So(len(mockDownloadGenerator.GenerateCalls()), ShouldEqual, 0)
})
})
})
}
func TestPutVersionReturnsError(t *testing.T) {
t.Parallel()
Convey("When the request contain malformed json a bad request status is returned", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
var b string
b = "{"
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
return &models.Version{State: models.AssociatedState}, nil
},
GetDatasetFunc: func(datasetID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{}, nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(w.Body.String(), ShouldContainSubstring, errs.ErrUnableToParseJSON.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 0)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
Convey("then the request body has been drained", func() {
_, err = r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
Convey("When the api cannot connect to datastore return an internal server error", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
var b string
b = versionPayload
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
return nil, errs.ErrInternalServer
},
GetDatasetFunc: func(datasetID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{}, nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(w.Body.String(), ShouldContainSubstring, errs.ErrInternalServer.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 0)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
Convey("then the request body has been drained", func() {
_, err = r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
Convey("When the dataset document cannot be found for version return status not found", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(ctx context.Context, datasetID string, edition string, versionID string, version string) error {
return nil
},
}
var b string
b = versionPayload
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
return &models.Version{}, errs.ErrVersionNotFound
},
GetDatasetFunc: func(datasetID string) (*models.DatasetUpdate, error) {
return nil, errs.ErrDatasetNotFound
},
CheckEditionExistsFunc: func(string, string, string) error {
return nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldContainSubstring, errs.ErrDatasetNotFound.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 0)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
Convey("then the request body has been drained", func() {
_, err = r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
Convey("When the edition document cannot be found for version return status not found", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
var b string
b = versionPayload
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
return &models.Version{}, errs.ErrVersionNotFound
},
GetDatasetFunc: func(datasetID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{}, nil
},
CheckEditionExistsFunc: func(string, string, string) error {
return errs.ErrEditionNotFound
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldContainSubstring, errs.ErrEditionNotFound.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
Convey("then the request body has been drained", func() {
_, err = r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
Convey("When the version document cannot be found return status not found", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
var b string
b = versionPayload
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
return &models.Version{}, errs.ErrVersionNotFound
},
GetDatasetFunc: func(datasetID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{}, nil
},
CheckEditionExistsFunc: func(string, string, string) error {
return nil
},
UpdateVersionFunc: func(string, *models.Version) error {
return nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldContainSubstring, errs.ErrVersionNotFound.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 2)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 0)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
Convey("then the request body has been drained", func() {
_, err = r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
Convey("When the request is not authorised to update version then response returns status not found", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
var b string
b = versionPayload
r, err := http.NewRequest("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
return &models.Version{
State: "associated",
}, nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusUnauthorized)
So(w.Body.String(), ShouldEqual, "unauthenticated request\n")
So(datasetPermissions.Required.Calls, ShouldEqual, 0)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 0)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
Convey("then the request body has been drained", func() {
_, err = r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
Convey("When the version document has already been published return status forbidden", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
var b string
b = versionPayload
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
return &models.Version{
State: models.PublishedState,
}, nil
},
GetDatasetFunc: func(datasetID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{}, nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusForbidden)
So(w.Body.String(), ShouldEqual, "unable to update version as it has been published\n")
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 0)
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
Convey("then the request body has been drained", func() {
_, err = r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
Convey("When the request body is invalid return status bad request", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
var b string
b = `{"instance_id":"a1b2c3","edition":"2017","license":"ONS","release_date":"2017-04-04","state":"associated"}`
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
return &models.Version{State: "associated"}, nil
},
GetDatasetFunc: func(datasetID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{}, nil
},
CheckEditionExistsFunc: func(string, string, string) error {
return nil
},
UpdateVersionFunc: func(string, *models.Version) error {
return nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusBadRequest)
So(w.Body.String(), ShouldEqual, "missing collection_id for association between version and a collection\n")
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 2)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 0)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
Convey("then the request body has been drained", func() {
_, err = r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
Convey("When setting the instance node to published fails", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
var b string
b = versionPublishedPayload
r, err := createRequestWithAuth("PUT", "http://localhost:22000/datasets/123/editions/2017/versions/1", bytes.NewBufferString(b))
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
CheckEditionExistsFunc: func(string, string, string) error {
return nil
},
GetVersionFunc: func(string, string, string, string) (*models.Version, error) {
return &models.Version{
ID: "789",
Links: &models.VersionLinks{
Dataset: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123",
ID: "123",
},
Dimensions: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017/versions/1/dimensions",
},
Edition: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017",
ID: "2017",
},
Self: &models.LinkObject{
HRef: "http://localhost:22000/instances/765",
},
Version: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017/versions/1",
ID: "1",
},
},
ReleaseDate: "2017-12-12",
Downloads: &models.DownloadList{
CSV: &models.DownloadObject{
Private: "s3://csv-exported/myfile.csv",
HRef: "http://localhost:23600/datasets/123/editions/2017/versions/1.csv",
Size: "1234",
},
},
State: models.EditionConfirmedState,
}, nil
},
UpdateVersionFunc: func(string, *models.Version) error {
return nil
},
GetDatasetFunc: func(string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{
ID: "123",
Next: &models.Dataset{Links: &models.DatasetLinks{}},
Current: &models.Dataset{Links: &models.DatasetLinks{}},
}, nil
},
UpsertDatasetFunc: func(string, *models.DatasetUpdate) error {
return nil
},
GetEditionFunc: func(string, string, string) (*models.EditionUpdate, error) {
return &models.EditionUpdate{
ID: "123",
Next: &models.Edition{
State: models.PublishedState,
Links: &models.EditionUpdateLinks{
Self: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017",
ID: "2017",
},
LatestVersion: &models.LinkObject{
HRef: "http://localhost:22000/datasets/123/editions/2017/versions/1",
ID: "1",
},
},
},
Current: &models.Edition{},
}, nil
},
UpsertEditionFunc: func(string, string, *models.EditionUpdate) error {
return nil
},
SetInstanceIsPublishedFunc: func(ctx context.Context, instanceID string) error {
return errors.New("failed to set is_published on the instance node")
},
}
mockedDataStore.GetVersion("789", "2017", "1", "")
mockedDataStore.GetEdition("123", "2017", "")
mockedDataStore.UpdateVersion("a1b2c3", &models.Version{})
mockedDataStore.GetDataset("123")
mockedDataStore.UpsertDataset("123", &models.DatasetUpdate{Next: &models.Dataset{}})
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.CheckEditionExistsCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 3)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 2)
So(len(mockedDataStore.UpsertEditionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 2)
So(len(mockedDataStore.SetInstanceIsPublishedCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpsertDatasetCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateDatasetWithAssociationCalls()), ShouldEqual, 0)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
Convey("then the request body has been drained", func() {
_, err = r.Body.Read(make([]byte, 1))
So(err, ShouldEqual, io.EOF)
})
})
}
func TestCreateNewVersionDoc(t *testing.T) {
t.Parallel()
Convey("Check the version has the new collection id when request contains a collection_id", t, func() {
currentVersion := &models.Version{}
version := &models.Version{
CollectionID: "4321",
}
populateNewVersionDoc(currentVersion, version)
So(version.CollectionID, ShouldNotBeNil)
So(version.CollectionID, ShouldEqual, "4321")
})
Convey("Check the version collection id does not get replaced by the current collection id when request contains a collection_id", t, func() {
currentVersion := &models.Version{
CollectionID: "1234",
}
version := &models.Version{
CollectionID: "4321",
}
populateNewVersionDoc(currentVersion, version)
So(version.CollectionID, ShouldNotBeNil)
So(version.CollectionID, ShouldEqual, "4321")
})
Convey("Check the version has the old collection id when request is missing a collection_id", t, func() {
currentVersion := &models.Version{
CollectionID: "1234",
}
version := &models.Version{}
populateNewVersionDoc(currentVersion, version)
So(version.CollectionID, ShouldNotBeNil)
So(version.CollectionID, ShouldEqual, "1234")
})
Convey("check the version collection id is not set when both request body and current version document are missing a collection id", t, func() {
currentVersion := &models.Version{}
version := &models.Version{}
populateNewVersionDoc(currentVersion, version)
So(version.CollectionID, ShouldNotBeNil)
So(version.CollectionID, ShouldEqual, "")
})
Convey("Check the version has the new spatial link when request contains a links.spatial.href", t, func() {
currentVersion := &models.Version{}
version := &models.Version{
Links: &models.VersionLinks{
Spatial: &models.LinkObject{
HRef: "http://ons.gov.uk/geographylist",
},
},
}
populateNewVersionDoc(currentVersion, version)
So(version.Links, ShouldNotBeNil)
So(version.Links.Spatial, ShouldNotBeNil)
So(version.Links.Spatial.HRef, ShouldEqual, "http://ons.gov.uk/geographylist")
})
Convey("Check the version links.spatial.href does not get replaced by the current version value", t, func() {
currentVersion := &models.Version{
Links: &models.VersionLinks{
Spatial: &models.LinkObject{
HRef: "http://ons.gov.uk/oldgeographylist",
},
},
}
version := &models.Version{
Links: &models.VersionLinks{
Spatial: &models.LinkObject{
HRef: "http://ons.gov.uk/geographylist",
},
},
}
populateNewVersionDoc(currentVersion, version)
So(version.Links, ShouldNotBeNil)
So(version.Links.Spatial, ShouldNotBeNil)
So(version.Links.Spatial.HRef, ShouldEqual, "http://ons.gov.uk/geographylist")
})
Convey("Check the links.spatial.href has the old value when request does not contain a links.spatial.href", t, func() {
currentVersion := &models.Version{
Links: &models.VersionLinks{
Spatial: &models.LinkObject{
HRef: "http://ons.gov.uk/oldgeographylist",
},
},
}
version := &models.Version{}
populateNewVersionDoc(currentVersion, version)
So(version.Links, ShouldNotBeNil)
So(version.Links.Spatial, ShouldNotBeNil)
So(version.Links.Spatial.HRef, ShouldEqual, "http://ons.gov.uk/oldgeographylist")
})
Convey("check the version links.spatial.href is not set when both request body and current version document do not contain a links.spatial.href", t, func() {
currentVersion := &models.Version{
Links: &models.VersionLinks{
Dataset: &models.LinkObject{
HRef: "http://ons.gov.uk/datasets/123",
},
},
}
version := &models.Version{}
populateNewVersionDoc(currentVersion, version)
So(version.Links, ShouldNotBeNil)
So(version.Links.Spatial, ShouldBeNil)
})
}
func TestDetachVersionReturnOK(t *testing.T) {
// TODO conditional test for feature flagged functionality. Will need tidying up eventually.
featureEnvString := os.Getenv("ENABLE_DETACH_DATASET")
featureOn, _ := strconv.ParseBool(featureEnvString)
if !featureOn {
return
}
t.Parallel()
Convey("A successful detach request against a version of a published dataset returns 200 OK response.", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
r, err := createRequestWithAuth("DELETE", "http://localhost:22000/datasets/123/editions/2017/versions/1", nil)
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetEditionFunc: func(datasetID, editionID, state string) (*models.EditionUpdate, error) {
return &models.EditionUpdate{
ID: "test",
Current: &models.Edition{},
Next: &models.Edition{
Edition: "yep",
State: models.EditionConfirmedState,
Links: &models.EditionUpdateLinks{
LatestVersion: &models.LinkObject{
ID: "1"}}}}, nil
},
GetVersionFunc: func(datasetID string, editionID string, version string, state string) (*models.Version, error) {
return &models.Version{}, nil
},
GetDatasetFunc: func(ID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{Current: &models.Dataset{}}, nil
},
UpdateVersionFunc: func(ID string, version *models.Version) error {
return nil
},
UpsertEditionFunc: func(datasetID string, edition string, editionDoc *models.EditionUpdate) error {
return nil
},
UpsertDatasetFunc: func(ID string, datasetDoc *models.DatasetUpdate) error {
return nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetEditionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpsertEditionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpsertDatasetCalls()), ShouldEqual, 1)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
Convey("A successful detach request against a version of a unpublished dataset returns 200 OK response.", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
r, err := createRequestWithAuth("DELETE", "http://localhost:22000/datasets/123/editions/2017/versions/1", nil)
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetEditionFunc: func(datasetID, editionID, state string) (*models.EditionUpdate, error) {
return &models.EditionUpdate{
ID: "test",
Current: &models.Edition{},
Next: &models.Edition{
Edition: "yep",
State: models.EditionConfirmedState,
Links: &models.EditionUpdateLinks{
LatestVersion: &models.LinkObject{
ID: "1"}}}}, nil
},
GetVersionFunc: func(datasetID string, editionID string, version string, state string) (*models.Version, error) {
return &models.Version{}, nil
},
GetDatasetFunc: func(ID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{}, nil
},
UpdateVersionFunc: func(ID string, version *models.Version) error {
return nil
},
UpsertEditionFunc: func(datasetID string, edition string, editionDoc *models.EditionUpdate) error {
return nil
},
UpsertDatasetFunc: func(ID string, datasetDoc *models.DatasetUpdate) error {
return nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusOK)
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetEditionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetDatasetCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpsertEditionCalls()), ShouldEqual, 0)
So(len(mockedDataStore.UpsertDatasetCalls()), ShouldEqual, 0)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
}
func TestDetachVersionReturnsError(t *testing.T) {
// TODO conditional test for feature flagged functionality. Will need tidying up eventually.
featureEnvString := os.Getenv("ENABLE_DETACH_DATASET")
featureOn, _ := strconv.ParseBool(featureEnvString)
if !featureOn {
return
}
t.Parallel()
Convey("When the api cannot connect to datastore return an internal server error.", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
r, err := createRequestWithAuth("DELETE", "http://localhost:22000/datasets/123/editions/2017/versions/1", nil)
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetEditionFunc: func(datasetID, editionID, state string) (*models.EditionUpdate, error) {
return nil, errs.ErrInternalServer
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(w.Body.String(), ShouldContainSubstring, errs.ErrInternalServer.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 0)
So(len(mockedDataStore.GetEditionCalls()), ShouldEqual, 1)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
Convey("When the provided edition cannot be found, return a 404 not found error.", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
r, err := createRequestWithAuth("DELETE", "http://localhost:22000/datasets/123/editions/2017/versions/1", nil)
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetEditionFunc: func(datasetID, editionID, state string) (*models.EditionUpdate, error) {
return nil, errs.ErrEditionNotFound
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldContainSubstring, errs.ErrEditionNotFound.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 0)
So(len(mockedDataStore.GetEditionCalls()), ShouldEqual, 1)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
Convey("When detached is called against a version other than latest, return an internal server error", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
r, err := createRequestWithAuth("DELETE", "http://localhost:22000/datasets/123/editions/2017/versions/1", nil)
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetEditionFunc: func(datasetID, editionID, state string) (*models.EditionUpdate, error) {
return &models.EditionUpdate{
Next: &models.Edition{
State: models.EditionConfirmedState,
Links: &models.EditionUpdateLinks{LatestVersion: &models.LinkObject{ID: "2"}}}}, nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(w.Body.String(), ShouldContainSubstring, errs.ErrInternalServer.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 0)
So(len(mockedDataStore.GetEditionCalls()), ShouldEqual, 1)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
Convey("When state is neither edition-confirmed or associated, return an internal server error", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
r, err := createRequestWithAuth("DELETE", "http://localhost:22000/datasets/123/editions/2017/versions/1", nil)
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetEditionFunc: func(datasetID, editionID, state string) (*models.EditionUpdate, error) {
return &models.EditionUpdate{
Next: &models.Edition{
State: models.PublishedState,
Links: &models.EditionUpdateLinks{LatestVersion: &models.LinkObject{ID: "1"}}}}, nil
},
GetVersionFunc: func(datasetID, editionID, version, state string) (*models.Version, error) {
return &models.Version{}, nil
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(w.Body.String(), ShouldContainSubstring, errs.ErrInternalServer.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 0)
So(len(mockedDataStore.GetEditionCalls()), ShouldEqual, 1)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
Convey("When the requested version cannot be found, return a not found error", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
r, err := createRequestWithAuth("DELETE", "http://localhost:22000/datasets/123/editions/2017/versions/1", nil)
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetEditionFunc: func(datasetID, editionID, state string) (*models.EditionUpdate, error) {
return &models.EditionUpdate{
Next: &models.Edition{
State: models.EditionConfirmedState,
Links: &models.EditionUpdateLinks{LatestVersion: &models.LinkObject{ID: "1"}}}}, nil
},
GetVersionFunc: func(datasetID, editionID, version, state string) (*models.Version, error) {
return nil, errs.ErrVersionNotFound
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusNotFound)
So(w.Body.String(), ShouldContainSubstring, errs.ErrVersionNotFound.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetEditionCalls()), ShouldEqual, 1)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
Convey("When updating the version fails, return an internal server error", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
r, err := createRequestWithAuth("DELETE", "http://localhost:22000/datasets/123/editions/2017/versions/1", nil)
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetEditionFunc: func(datasetID, editionID, state string) (*models.EditionUpdate, error) {
return &models.EditionUpdate{
Next: &models.Edition{
State: models.EditionConfirmedState,
Links: &models.EditionUpdateLinks{LatestVersion: &models.LinkObject{ID: "1"}}}}, nil
},
GetDatasetFunc: func(ID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{}, nil
},
GetVersionFunc: func(datasetID, editionID, version, state string) (*models.Version, error) {
return &models.Version{}, nil
},
UpdateVersionFunc: func(ID string, version *models.Version) error {
return errs.ErrInternalServer
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(w.Body.String(), ShouldContainSubstring, errs.ErrInternalServer.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetEditionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
Convey("When edition update fails whilst rolling back the edition, return an internal server error", t, func() {
generatorMock := &mocks.DownloadsGeneratorMock{
GenerateFunc: func(context.Context, string, string, string, string) error {
return nil
},
}
r, err := createRequestWithAuth("DELETE", "http://localhost:22000/datasets/123/editions/2017/versions/1", nil)
So(err, ShouldBeNil)
w := httptest.NewRecorder()
mockedDataStore := &storetest.StorerMock{
GetEditionFunc: func(datasetID, editionID, state string) (*models.EditionUpdate, error) {
return &models.EditionUpdate{
Next: &models.Edition{
State: models.EditionConfirmedState,
Links: &models.EditionUpdateLinks{LatestVersion: &models.LinkObject{ID: "1"}}}}, nil
},
GetVersionFunc: func(datasetID, editionID, version, state string) (*models.Version, error) {
return &models.Version{}, nil
},
GetDatasetFunc: func(ID string) (*models.DatasetUpdate, error) {
return &models.DatasetUpdate{Current: &models.Dataset{}}, nil
},
UpdateVersionFunc: func(ID string, version *models.Version) error {
return nil
},
UpsertEditionFunc: func(datasetID string, edition string, editionDoc *models.EditionUpdate) error {
return errs.ErrInternalServer
},
}
datasetPermissions := getAuthorisationHandlerMock()
permissions := getAuthorisationHandlerMock()
api := GetAPIWithMocks(mockedDataStore, generatorMock, datasetPermissions, permissions)
api.Router.ServeHTTP(w, r)
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(w.Body.String(), ShouldContainSubstring, errs.ErrInternalServer.Error())
So(datasetPermissions.Required.Calls, ShouldEqual, 1)
So(permissions.Required.Calls, ShouldEqual, 0)
So(len(mockedDataStore.GetVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.GetEditionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpdateVersionCalls()), ShouldEqual, 1)
So(len(mockedDataStore.UpsertEditionCalls()), ShouldEqual, 1)
So(len(generatorMock.GenerateCalls()), ShouldEqual, 0)
})
}
func assertInternalServerErr(w *httptest.ResponseRecorder) {
So(w.Code, ShouldEqual, http.StatusInternalServerError)
So(strings.TrimSpace(w.Body.String()), ShouldEqual, errs.ErrInternalServer.Error())
}
| [
"\"ENABLE_DETACH_DATASET\"",
"\"ENABLE_DETACH_DATASET\""
] | [] | [
"ENABLE_DETACH_DATASET"
] | [] | ["ENABLE_DETACH_DATASET"] | go | 1 | 0 | |
tests/prometheus_tests.py | import json
import os
import random
import requests
import unittest
from locust import HttpLocust, TaskSet, task, events
import util_copy as util
base_url = os.environ.get('base_url', 'http://172.17.0.1:8000')
HERE = os.path.dirname(os.path.abspath(__file__))
def get_credentials():
config = json.load(open(
os.path.join(HERE, 'locust_credentials_local.json')))
config['headers'] = {}
config['url_suffix'] = ''
if config.get('use_jwt'):
config['headers'] = util.get_jwt_headers()
else:
if config.get('access_token'):
config['headers']['Authorization'] = 'Bearer {}'.format(config.get('access_token'))
config['url_suffix'] = '/v2'
else:
print("Must provide either use_jwt or an access_token.")
raise Exception()
return config
credentials = get_credentials()
class BasicAbacoTasks(object):
def __init__(self):
self.actor_ids = []
self.message_count = 0
def get_headers(self):
with open('jwt-abaco_admin', 'r') as f:
jwt_default = f.read()
headers = {'X-Jwt-Assertion-AGAVE-PROD': jwt_default}
return headers
def create_actor(self, max_workers):
headers = self.get_headers()
data = {'image': 'jstubbs/abaco_test',
'name': 'abaco_test_suite_python',
'maxWorkers':max_workers
}
rsp = requests.post('{}/actors'.format(base_url), data=data, headers=headers)
result = util.basic_response_checks(rsp)
aid = result.get('id')
self.actor_ids.append(aid)
print("Created actor: {}".format(aid))
return aid
def send_actor_message(self, aid):
headers = self.get_headers()
url = '{}/actors/{}/messages'.format(base_url, aid)
data = {'message': 'testing execution'}
rsp = requests.post(url, data=data, headers=headers)
result = util.basic_response_checks(rsp)
# rsp = self.client.post('/actors{}/{}/messages'.format(credentials['url_suffix'], aid),
# headers=credentials['headers'],
# data={'message': '{"sleep": 1, "iterations": 3}'})
self.message_count += 1
def get_random_aid(self):
if len(self.actor_ids) <= 0:
return
return self.actor_ids[random.randint(0, len(self.actor_ids)-1)]
def register_simple_actor(self):
rsp = self.client.post('/actors{}'.format(credentials['url_suffix']),
headers=credentials['headers'],
json={'image': 'abacosamples/sleep_loop'})
self.actor_ids.append(rsp.json().get('result').get('id'))
def delete_actor(self):
aid = self.actor_ids.pop()
self.client.delete('/actors{}/{}'.format(credentials['url_suffix'], aid),
headers=credentials['headers'])
def on_start(self):
self.register_simple_actor()
def on_stop(self):
self.delete_actor()
# def send_actor_message(self):
# aid = self.get_random_aid()
# rsp = self.client.post('/actors{}/{}/messages'.format(credentials['url_suffix'], aid),
# headers=credentials['headers'],
# data={'message': '{"sleep": 1, "iterations": 3}'})
# self.message_count += 1
def check_actor_msg_count(self, aid):
msg_count = 0
# prom query to get actor message count
return msg_count
def main():
a = BasicAbacoTasks()
# a.register_simple_actor()
actor_1 = a.create_actor(2)
actor_2 = a.create_actor(100)
for i in range(100):
a.send_actor_message(actor_1)
a.send_actor_message(actor_2)
if __name__ == '__main__':
main()
| [] | [] | [
"base_url"
] | [] | ["base_url"] | python | 1 | 0 | |
vendor/github.com/percona/percona-backup-mongodb/internal/utils/utils.go | package utils
import (
"io/ioutil"
"os"
"os/user"
"path/filepath"
"reflect"
"strconv"
"strings"
"github.com/alecthomas/kingpin"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
)
func LoadOptionsFromFile(filename string, c *kingpin.ParseContext, opts interface{}) error {
filename = Expand(filename)
buf, err := ioutil.ReadFile(filepath.Clean(filename))
if err != nil {
return errors.Wrap(err, "cannot load configuration from file")
}
if err = yaml.Unmarshal(buf, opts); err != nil {
return errors.Wrapf(err, "cannot unmarshal yaml file %s: %s", filename, err)
}
s := reflect.ValueOf(opts).Elem()
walk(c, s)
return nil
}
func walk(c *kingpin.ParseContext, opts reflect.Value) {
// Overwrite values from the config with the values from the command line (kingpin.ParseContext)
t := opts.Type()
for i := 0; i < opts.NumField(); i++ {
f := opts.Field(i)
flagName := t.Field(i).Tag.Get("kingpin")
if f.Kind() == reflect.Struct {
walk(c, f)
}
if f.CanSet() && flagName != "" {
argFromContext := getArg(c, flagName)
if argFromContext != "" {
switch f.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if iv, err := strconv.Atoi(argFromContext); err == nil {
f.SetInt(int64(iv))
}
case reflect.Bool:
if bv, err := strconv.ParseBool(argFromContext); err == nil {
f.SetBool(bv)
}
case reflect.String:
f.SetString(argFromContext)
}
}
}
}
}
func getArg(c *kingpin.ParseContext, argName string) string {
for _, v := range c.Elements {
if value, ok := v.Clause.(*kingpin.FlagClause); ok {
if value.Model().Name == argName {
return *v.Value
}
}
}
return ""
}
func Expand(path string) string {
path = os.ExpandEnv(path)
dir := os.Getenv("HOME")
usr, err := user.Current()
if err == nil {
dir = usr.HomeDir
}
if path == "~" {
return dir
}
if strings.HasPrefix(path, "~/") {
return filepath.Join(dir, path[2:])
}
return path
}
| [
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
PCN/PyPCN.py | #!/usr/bin/python3
from ctypes import *
import cv2
import numpy as np
import sys
import os
import time
from ipdb import set_trace as dbg
from enum import IntEnum
class CPoint(Structure):
_fields_ = [("x", c_int),
("y", c_int)]
FEAT_POINTS = 14
class CWindow(Structure):
_fields_ = [("x", c_int),
("y", c_int),
("width", c_int),
("angle", c_int),
("score", c_float),
("points",CPoint*FEAT_POINTS)]
class FeatEnam(IntEnum):
CHIN_0 = 0
CHIN_1 = 1
CHIN_2 = 2
CHIN_3 = 3
CHIN_4 = 4
CHIN_5 = 5
CHIN_6 = 6
CHIN_7 = 7
CHIN_8 = 8
NOSE = 9
EYE_LEFT = 10
EYE_RIGHT = 11
MOUTH_LEFT = 12
MOUTH_RIGHT = 13
FEAT_POINTS = 14
lib = CDLL("/usr/local/lib/libPCN.so")
init_detector = lib.init_detector
#void *init_detector(const char *detection_model_path,
# const char *pcn1_proto, const char *pcn2_proto, const char *pcn3_proto,
# const char *tracking_model_path, const char *tracking_proto,
# int min_face_size, float pyramid_scale_factor, float detection_thresh_stage1,
# float detection_thresh_stage2, float detection_thresh_stage3, int tracking_period,
# float tracking_thresh, int do_smooth)
init_detector.argtypes = [
c_char_p, c_char_p, c_char_p,
c_char_p, c_char_p, c_char_p,
c_int,c_float,c_float,c_float,
c_float,c_int,c_float,c_int]
init_detector.restype = c_void_p
#CWindow* detect_faces(void* pcn, unsigned char* raw_img,size_t rows, size_t cols, int *lwin)
detect_faces = lib.detect_faces
detect_faces.argtypes = [c_void_p, POINTER(c_ubyte),c_size_t,c_size_t,POINTER(c_int)]
detect_faces.restype = POINTER(CWindow)
#CWindow* detect_track_faces(void* pcn, unsigned char* raw_img,size_t rows, size_t cols, int *lwin)
detect_track_faces = lib.detect_track_faces
detect_track_faces.argtypes = [c_void_p, POINTER(c_ubyte),c_size_t,c_size_t,POINTER(c_int)]
detect_track_faces.restype = POINTER(CWindow)
#void free_faces(CWindow* wins)
free_faces = lib.free_faces
free_faces.argtypes= [c_void_p]
# void free_detector(void *pcn)
free_detector = lib.free_detector
free_detector.argtypes= [c_void_p]
CYAN=(255,255,0)
BLUE=(255,0,0)
RED=(0,0,255)
GREEN=(0,255,0)
YELLOW=(0,255,255)
def DrawFace(win,img):
width = 2
x1 = win.x
y1 = win.y
x2 = win.width + win.x - 1
y2 = win.width + win.y - 1
centerX = (x1 + x2) / 2
centerY = (y1 + y2) / 2
angle = win.angle
R = cv2.getRotationMatrix2D((centerX,centerY),angle,1)
pts = np.array([[x1,y1,1],[x1,y2,1],[x2,y2,1],[x2,y1,1]], np.int32)
pts = (pts @ R.T).astype(int) #Rotate points
pts = pts.reshape((-1,1,2))
cv2.polylines(img,[pts],True,CYAN,width)
cv2.line(img, (pts[0][0][0],pts[0][0][1]), (pts[3][0][0],pts[3][0][1]), BLUE, width)
def DrawPoints(win,img):
width = 2
f = FeatEnam.NOSE
cv2.circle(img,(win.points[f].x,win.points[f].y),width,GREEN,-1)
f = FeatEnam.EYE_LEFT
cv2.circle(img,(win.points[f].x,win.points[f].y),width,YELLOW,-1)
f = FeatEnam.EYE_RIGHT
cv2.circle(img,(win.points[f].x,win.points[f].y),width,YELLOW,-1)
f = FeatEnam.MOUTH_LEFT
cv2.circle(img,(win.points[f].x,win.points[f].y),width,RED,-1)
f = FeatEnam.MOUTH_RIGHT
cv2.circle(img,(win.points[f].x,win.points[f].y),width,RED,-1)
for i in range(8):
cv2.circle(img,(win.points[i].x,win.points[i].y),width,BLUE,-1)
def SetThreadCount(threads):
os.environ['OMP_NUM_THREADS'] = str(threads)
def c_str(str_in):
return c_char_p(str_in.encode('utf-8'))
video_flag = 0
if __name__=="__main__":
SetThreadCount(1)
path = '/usr/local/share/pcn/'
detection_model_path = c_str(path + "PCN.caffemodel")
pcn1_proto = c_str(path + "PCN-1.prototxt")
pcn2_proto = c_str(path + "PCN-2.prototxt")
pcn3_proto = c_str(path + "PCN-3.prototxt")
tracking_model_path = c_str(path + "PCN-Tracking.caffemodel")
tracking_proto = c_str(path + "PCN-Tracking.prototxt")
if video_flag:
cap = cv2.VideoCapture(0)
detector = init_detector(detection_model_path,pcn1_proto,pcn2_proto,pcn3_proto,
tracking_model_path,tracking_proto,
40,1.45,0.5,0.5,0.98,30,0.9,1)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = cap.get(cv2.CAP_PROP_FPS)
while cap.isOpened():
ret, frame = cap.read()
if ret == False:
break
start = time.time()
face_count = c_int(0)
raw_data = frame.ctypes.data_as(POINTER(c_ubyte))
windows = detect_track_faces(detector, raw_data,
int(height), int(width),
pointer(face_count))
end = time.time()
for i in range(face_count.value):
DrawFace(windows[i],frame)
DrawPoints(windows[i],frame)
free_faces(windows)
fps = int(1 / (end - start))
cv2.putText(frame, str(fps) + "fps", (20, 45), 4, 1, (0, 0, 125))
cv2.imshow('PCN', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
detector = init_detector(detection_model_path,pcn1_proto,pcn2_proto,pcn3_proto,
tracking_model_path,tracking_proto,
40,1.45,0.5,0.5,0.98,30,0.9,0)
for i in range(1, 27):
frame = cv2.imread("imgs/" + str(i) + ".jpg")
start = time.time()
face_count = c_int(0)
raw_data = frame.ctypes.data_as(POINTER(c_ubyte))
windows = detect_faces(detector, raw_data,
frame.shape[0], frame.shape[1],
pointer(face_count))
end = time.time()
print(i, end - start, "s")
for i in range(face_count.value):
DrawFace(windows[i],frame)
DrawPoints(windows[i],frame)
free_faces(windows)
cv2.imshow('PCN', frame)
cv2.waitKey()
free_detector(detector)
| [] | [] | [
"OMP_NUM_THREADS"
] | [] | ["OMP_NUM_THREADS"] | python | 1 | 0 | |
train_src/train_teacher_building.py | import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
from os import path, makedirs, listdir
import sys
import numpy as np
np.random.seed(1)
import random
random.seed(1)
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch.optim.lr_scheduler as lr_scheduler
from apex import amp
from util.adamw import AdamW
from util.losses import dice_round, ComboLoss
import pandas as pd
from tqdm import tqdm
import timeit
import cv2
from zoo.models import SeResNext50_Unet_Loc
from imgaug import augmenters as iaa
from util.utils import *
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import gc
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
train_dirs = ['../data/train', '../data/tier3']
models_folder = '../weights'
input_shape = (512, 512)
all_files = []
for d in train_dirs:
for f in sorted(listdir(path.join(d, 'images'))):
if '_pre_disaster.png' in f:
all_files.append(path.join(d, 'images', f))
class TrainData(Dataset):
def __init__(self, train_idxs):
super().__init__()
self.train_idxs = train_idxs
self.elastic = iaa.ElasticTransformation(alpha=(0.25, 1.2), sigma=0.2)
def __len__(self):
return len(self.train_idxs)
def __getitem__(self, idx):
_idx = self.train_idxs[idx]
fn = all_files[_idx]
img = cv2.imread(fn, cv2.IMREAD_COLOR)
if random.random() > 0.985:
img = cv2.imread(fn.replace('_pre_disaster', '_post_disaster'), cv2.IMREAD_COLOR)
msk0 = cv2.imread(fn.replace('/images/', '/masks/'), cv2.IMREAD_UNCHANGED)
if random.random() > 0.5:
img = img[::-1, ...]
msk0 = msk0[::-1, ...]
if random.random() > 0.05:
rot = random.randrange(4)
if rot > 0:
img = np.rot90(img, k=rot)
msk0 = np.rot90(msk0, k=rot)
if random.random() > 0.9:
shift_pnt = (random.randint(-320, 320), random.randint(-320, 320))
img = shift_image(img, shift_pnt)
msk0 = shift_image(msk0, shift_pnt)
if random.random() > 0.9:
rot_pnt = (img.shape[0] // 2 + random.randint(-320, 320), img.shape[1] // 2 + random.randint(-320, 320))
scale = 0.9 + random.random() * 0.2
angle = random.randint(0, 20) - 10
if (angle != 0) or (scale != 1):
img = rotate_image(img, angle, scale, rot_pnt)
msk0 = rotate_image(msk0, angle, scale, rot_pnt)
crop_size = input_shape[0]
if random.random() > 0.3:
crop_size = random.randint(int(input_shape[0] / 1.1), int(input_shape[0] / 0.9))
bst_x0 = random.randint(0, img.shape[1] - crop_size)
bst_y0 = random.randint(0, img.shape[0] - crop_size)
bst_sc = -1
try_cnt = random.randint(1, 5)
for i in range(try_cnt):
x0 = random.randint(0, img.shape[1] - crop_size)
y0 = random.randint(0, img.shape[0] - crop_size)
_sc = msk0[y0:y0+crop_size, x0:x0+crop_size].sum()
if _sc > bst_sc:
bst_sc = _sc
bst_x0 = x0
bst_y0 = y0
x0 = bst_x0
y0 = bst_y0
img = img[y0:y0+crop_size, x0:x0+crop_size, :]
msk0 = msk0[y0:y0+crop_size, x0:x0+crop_size]
if crop_size != input_shape[0]:
img = cv2.resize(img, input_shape, interpolation=cv2.INTER_LINEAR)
msk0 = cv2.resize(msk0, input_shape, interpolation=cv2.INTER_LINEAR)
if random.random() > 0.99:
img = shift_channels(img, random.randint(-5, 5), random.randint(-5, 5), random.randint(-5, 5))
if random.random() > 0.99:
img = change_hsv(img, random.randint(-5, 5), random.randint(-5, 5), random.randint(-5, 5))
if random.random() > 0.99:
if random.random() > 0.99:
img = clahe(img)
elif random.random() > 0.99:
img = gauss_noise(img)
elif random.random() > 0.99:
img = cv2.blur(img, (3, 3))
elif random.random() > 0.99:
if random.random() > 0.99:
img = saturation(img, 0.9 + random.random() * 0.2)
elif random.random() > 0.99:
img = brightness(img, 0.9 + random.random() * 0.2)
elif random.random() > 0.99:
img = contrast(img, 0.9 + random.random() * 0.2)
if random.random() > 0.999:
el_det = self.elastic.to_deterministic()
img = el_det.augment_image(img)
msk = msk0[..., np.newaxis]
msk = (msk > 127) * 1
img = preprocess_inputs(img)
img = torch.from_numpy(img.transpose((2, 0, 1))).float()
msk = torch.from_numpy(msk.transpose((2, 0, 1))).long()
sample = {'img': img, 'msk': msk, 'fn': fn}
return sample
class ValData(Dataset):
def __init__(self, image_idxs):
super().__init__()
self.image_idxs = image_idxs
def __len__(self):
return len(self.image_idxs)
def __getitem__(self, idx):
_idx = self.image_idxs[idx]
fn = all_files[_idx]
img = cv2.imread(fn, cv2.IMREAD_COLOR)
msk0 = cv2.imread(fn.replace('/images/', '/masks/'), cv2.IMREAD_UNCHANGED)
msk = msk0[..., np.newaxis]
msk = (msk > 127) * 1
img = preprocess_inputs(img)
img = torch.from_numpy(img.transpose((2, 0, 1))).float()
msk = torch.from_numpy(msk.transpose((2, 0, 1))).long()
sample = {'img': img, 'msk': msk, 'fn': fn}
return sample
def validate(net, data_loader):
dices0 = []
_thr = 0.5
with torch.no_grad():
for i, sample in enumerate(tqdm(data_loader)):
msks = sample["msk"].numpy()
imgs = sample["img"].cuda(non_blocking=True)
out = model(imgs)
msk_pred = torch.sigmoid(out[:, 0, ...]).cpu().numpy()
for j in range(msks.shape[0]):
dices0.append(dice(msks[j, 0], msk_pred[j] > _thr))
d0 = np.mean(dices0)
print("Val Dice: {}".format(d0))
return d0
def evaluate_val(data_val, best_score, model, snapshot_name, current_epoch):
model = model.eval()
d = validate(model, data_loader=data_val)
if d > best_score:
torch.save({
'epoch': current_epoch + 1,
'state_dict': model.state_dict(),
'best_score': d,
}, path.join(models_folder, snapshot_name + '_best'))
best_score = d
print("score: {}\tscore_best: {}".format(d, best_score))
return best_score
def train_epoch(current_epoch, seg_loss, model, optimizer, scheduler, train_data_loader):
losses = AverageMeter()
dices = AverageMeter()
iterator = tqdm(train_data_loader)
model.train()
for i, sample in enumerate(iterator):
imgs = sample["img"].cuda(non_blocking=True)
msks = sample["msk"].cuda(non_blocking=True)
out = model(imgs)
loss = seg_loss(out, msks)
with torch.no_grad():
_probs = torch.sigmoid(out[:, 0, ...])
dice_sc = 1 - dice_round(_probs, msks[:, 0, ...])
losses.update(loss.item(), imgs.size(0))
dices.update(dice_sc, imgs.size(0))
iterator.set_description(
"epoch: {}; lr {:.7f}; Loss {loss.val:.4f} ({loss.avg:.4f}); Dice {dice.val:.4f} ({dice.avg:.4f})".format(
current_epoch, scheduler.get_lr()[-1], loss=losses, dice=dices))
optimizer.zero_grad()
# loss.backward()
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), 1.1)
optimizer.step()
scheduler.step(current_epoch)
print("epoch: {}; lr {:.7f}; Loss {loss.avg:.4f}; Dice {dice.avg:.4f}".format(
current_epoch, scheduler.get_lr()[-1], loss=losses, dice=dices))
if __name__ == '__main__':
t0 = timeit.default_timer()
makedirs(models_folder, exist_ok=True)
seed = int(sys.argv[1])
vis_dev = sys.argv[2]
# os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ["CUDA_VISIBLE_DEVICES"] = vis_dev
cudnn.benchmark = True
batch_size = 15
val_batch_size = 4
snapshot_name = 'res50_loc_{}_0'.format(seed)
train_idxs, val_idxs = train_test_split(np.arange(len(all_files)), test_size=0.1, random_state=seed)
np.random.seed(seed+123)
random.seed(seed+123)
steps_per_epoch = len(train_idxs) // batch_size
validation_steps = len(val_idxs) // val_batch_size
print('steps_per_epoch', steps_per_epoch, 'validation_steps', validation_steps)
data_train = TrainData(train_idxs)
val_train = ValData(val_idxs)
train_data_loader = DataLoader(data_train, batch_size=batch_size, num_workers=5, shuffle=True, pin_memory=False, drop_last=True)
val_data_loader = DataLoader(val_train, batch_size=val_batch_size, num_workers=5, shuffle=False, pin_memory=False)
model = SeResNext50_Unet_Loc().cuda()
params = model.parameters()
optimizer = AdamW(params, lr=0.00015, weight_decay=1e-6)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[15, 29, 43, 53, 65, 80, 90, 100, 110, 130, 150, 170, 180, 190], gamma=0.5)
seg_loss = ComboLoss({'dice': 1.0, 'focal': 10.0}, per_image=False).cuda()
best_score = 0
_cnt = -1
torch.cuda.empty_cache()
for epoch in range(150):
train_epoch(epoch, seg_loss, model, optimizer, scheduler, train_data_loader)
if epoch % 1 == 0:
_cnt += 1
torch.cuda.empty_cache()
best_score = evaluate_val(val_data_loader, best_score, model, snapshot_name, epoch)
elapsed = timeit.default_timer() - t0
print('Time: {:.3f} min'.format(elapsed / 60)) | [] | [] | [
"MKL_NUM_THREADS",
"NUMEXPR_NUM_THREADS",
"CUDA_VISIBLE_DEVICES",
"CUDA_DEVICE_ORDER",
"OMP_NUM_THREADS"
] | [] | ["MKL_NUM_THREADS", "NUMEXPR_NUM_THREADS", "CUDA_VISIBLE_DEVICES", "CUDA_DEVICE_ORDER", "OMP_NUM_THREADS"] | python | 5 | 0 | |
internal/build/env.go | package build
import (
"flag"
"fmt"
"os"
"strings"
)
var (
// These flags override values in build env.
GitCommitFlag = flag.String("git-commit", "", `Overrides git commit hash embedded into executables`)
GitBranchFlag = flag.String("git-branch", "", `Overrides git branch being built`)
GitTagFlag = flag.String("git-tag", "", `Overrides git tag being built`)
BuildnumFlag = flag.String("buildnum", "", `Overrides CI build number`)
PullRequestFlag = flag.Bool("pull-request", false, `Overrides pull request status of the build`)
CronJobFlag = flag.Bool("cron-job", false, `Overrides cron job status of the build`)
)
// Environment contains metadata provided by the build environment.
type Environment struct {
Name string // name of the environment
Repo string // name of GitHub repo
Commit, Branch, Tag string // Git info
Buildnum string
IsPullRequest bool
IsCronJob bool
}
func (env Environment) String() string {
return fmt.Sprintf("%s env (commit:%s branch:%s tag:%s buildnum:%s pr:%t)",
env.Name, env.Commit, env.Branch, env.Tag, env.Buildnum, env.IsPullRequest)
}
// Env returns metadata about the current CI environment, falling back to LocalEnv
// if not running on CI.
func Env() Environment {
switch {
case os.Getenv("CI") == "true" && os.Getenv("TRAVIS") == "true":
return Environment{
Name: "travis",
Repo: os.Getenv("TRAVIS_REPO_SLUG"),
Commit: os.Getenv("TRAVIS_COMMIT"),
Branch: os.Getenv("TRAVIS_BRANCH"),
Tag: os.Getenv("TRAVIS_TAG"),
Buildnum: os.Getenv("TRAVIS_BUILD_NUMBER"),
IsPullRequest: os.Getenv("TRAVIS_PULL_REQUEST") != "false",
IsCronJob: os.Getenv("TRAVIS_EVENT_TYPE") == "cron",
}
case os.Getenv("CI") == "True" && os.Getenv("APPVEYOR") == "True":
return Environment{
Name: "appveyor",
Repo: os.Getenv("APPVEYOR_REPO_NAME"),
Commit: os.Getenv("APPVEYOR_REPO_COMMIT"),
Branch: os.Getenv("APPVEYOR_REPO_BRANCH"),
Tag: os.Getenv("APPVEYOR_REPO_TAG_NAME"),
Buildnum: os.Getenv("APPVEYOR_BUILD_NUMBER"),
IsPullRequest: os.Getenv("APPVEYOR_PULL_REQUEST_NUMBER") != "",
IsCronJob: os.Getenv("APPVEYOR_SCHEDULED_BUILD") == "True",
}
default:
return LocalEnv()
}
}
// LocalEnv returns build environment metadata gathered from git.
func LocalEnv() Environment {
env := applyEnvFlags(Environment{Name: "local", Repo: "ubiq/spectrum-backend"})
head := readGitFile("HEAD")
if splits := strings.Split(head, " "); len(splits) == 2 {
head = splits[1]
} else {
return env
}
if env.Commit == "" {
env.Commit = readGitFile(head)
}
if env.Branch == "" {
if head != "HEAD" {
env.Branch = strings.TrimPrefix(head, "refs/heads/")
}
}
if info, err := os.Stat(".git/objects"); err == nil && info.IsDir() && env.Tag == "" {
env.Tag = firstLine(RunGit("tag", "-l", "--points-at", "HEAD"))
}
return env
}
func firstLine(s string) string {
return strings.Split(s, "\n")[0]
}
func applyEnvFlags(env Environment) Environment {
if !flag.Parsed() {
panic("you need to call flag.Parse before Env or LocalEnv")
}
if *GitCommitFlag != "" {
env.Commit = *GitCommitFlag
}
if *GitBranchFlag != "" {
env.Branch = *GitBranchFlag
}
if *GitTagFlag != "" {
env.Tag = *GitTagFlag
}
if *BuildnumFlag != "" {
env.Buildnum = *BuildnumFlag
}
if *PullRequestFlag {
env.IsPullRequest = true
}
if *CronJobFlag {
env.IsCronJob = true
}
return env
}
| [
"\"CI\"",
"\"TRAVIS\"",
"\"TRAVIS_REPO_SLUG\"",
"\"TRAVIS_COMMIT\"",
"\"TRAVIS_BRANCH\"",
"\"TRAVIS_TAG\"",
"\"TRAVIS_BUILD_NUMBER\"",
"\"TRAVIS_PULL_REQUEST\"",
"\"TRAVIS_EVENT_TYPE\"",
"\"CI\"",
"\"APPVEYOR\"",
"\"APPVEYOR_REPO_NAME\"",
"\"APPVEYOR_REPO_COMMIT\"",
"\"APPVEYOR_REPO_BRANCH\"",
"\"APPVEYOR_REPO_TAG_NAME\"",
"\"APPVEYOR_BUILD_NUMBER\"",
"\"APPVEYOR_PULL_REQUEST_NUMBER\"",
"\"APPVEYOR_SCHEDULED_BUILD\""
] | [] | [
"APPVEYOR_REPO_TAG_NAME",
"TRAVIS_BUILD_NUMBER",
"TRAVIS_BRANCH",
"TRAVIS_PULL_REQUEST",
"TRAVIS_EVENT_TYPE",
"APPVEYOR_BUILD_NUMBER",
"CI",
"APPVEYOR",
"APPVEYOR_REPO_COMMIT",
"TRAVIS_REPO_SLUG",
"TRAVIS",
"TRAVIS_COMMIT",
"APPVEYOR_SCHEDULED_BUILD",
"APPVEYOR_REPO_BRANCH",
"APPVEYOR_REPO_NAME",
"APPVEYOR_PULL_REQUEST_NUMBER",
"TRAVIS_TAG"
] | [] | ["APPVEYOR_REPO_TAG_NAME", "TRAVIS_BUILD_NUMBER", "TRAVIS_BRANCH", "TRAVIS_PULL_REQUEST", "TRAVIS_EVENT_TYPE", "APPVEYOR_BUILD_NUMBER", "CI", "APPVEYOR", "APPVEYOR_REPO_COMMIT", "TRAVIS_REPO_SLUG", "TRAVIS", "TRAVIS_COMMIT", "APPVEYOR_SCHEDULED_BUILD", "APPVEYOR_REPO_BRANCH", "APPVEYOR_REPO_NAME", "APPVEYOR_PULL_REQUEST_NUMBER", "TRAVIS_TAG"] | go | 17 | 0 | |
cc/config/global.go | // Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"encoding/json"
"encoding/xml"
"fmt"
"io/ioutil"
"os"
//"path"
//"path/filepath"
"strconv"
"strings"
"android/soong/android"
"android/soong/remoteexec"
)
type QiifaAbiLibs struct {
XMLName xml.Name `xml:"abilibs"`
Library []string `xml:"library"`
}
var (
// Flags used by lots of devices. Putting them in package static variables
// will save bytes in build.ninja so they aren't repeated for every file
commonGlobalCflags = []string{
"-DANDROID",
"-fmessage-length=0",
"-W",
"-Wall",
"-Wno-unused",
"-Winit-self",
"-Wpointer-arith",
"-Wunreachable-code-loop-increment",
// Make paths in deps files relative
"-no-canonical-prefixes",
"-fno-canonical-system-headers",
"-DNDEBUG",
"-UDEBUG",
"-fno-exceptions",
"-Wno-multichar",
"-O2",
"-g",
"-fdebug-info-for-profiling",
"-fno-strict-aliasing",
"-Werror=date-time",
"-Werror=pragma-pack",
"-Werror=pragma-pack-suspicious-include",
"-Werror=string-plus-int",
"-Werror=unreachable-code-loop-increment",
}
commonGlobalConlyflags = []string{}
deviceGlobalCflags = []string{
"-fdiagnostics-color",
"-ffunction-sections",
"-fdata-sections",
"-fno-short-enums",
"-funwind-tables",
"-fstack-protector-strong",
"-Wa,--noexecstack",
"-D_FORTIFY_SOURCE=2",
"-Wstrict-aliasing=2",
"-Werror=return-type",
"-Werror=non-virtual-dtor",
"-Werror=address",
"-Werror=sequence-point",
"-Werror=format-security",
}
deviceGlobalCppflags = []string{
"-fvisibility-inlines-hidden",
}
deviceGlobalLdflags = []string{
"-Wl,-z,noexecstack",
"-Wl,-z,relro",
"-Wl,-z,now",
"-Wl,--build-id=md5",
"-Wl,--warn-shared-textrel",
"-Wl,--fatal-warnings",
"-Wl,--no-undefined-version",
// TODO: Eventually we should link against a libunwind.a with hidden symbols, and then these
// --exclude-libs arguments can be removed.
"-Wl,--exclude-libs,libgcc.a",
"-Wl,--exclude-libs,libgcc_stripped.a",
"-Wl,--exclude-libs,libunwind_llvm.a",
"-Wl,--exclude-libs,libunwind.a",
"-Wl,--icf=safe",
}
deviceGlobalLldflags = append(ClangFilterUnknownLldflags(deviceGlobalLdflags),
[]string{
"-fuse-ld=lld",
}...)
hostGlobalCflags = []string{}
hostGlobalCppflags = []string{}
hostGlobalLdflags = []string{}
hostGlobalLldflags = []string{"-fuse-ld=lld"}
commonGlobalCppflags = []string{
"-Wsign-promo",
}
noOverrideGlobalCflags = []string{
"-Werror=bool-operation",
"-Werror=implicit-int-float-conversion",
"-Werror=int-in-bool-context",
"-Werror=int-to-pointer-cast",
"-Werror=pointer-to-int-cast",
"-Werror=string-compare",
"-Werror=xor-used-as-pow",
// http://b/161386391 for -Wno-void-pointer-to-enum-cast
"-Wno-void-pointer-to-enum-cast",
// http://b/161386391 for -Wno-void-pointer-to-int-cast
"-Wno-void-pointer-to-int-cast",
// http://b/161386391 for -Wno-pointer-to-int-cast
"-Wno-pointer-to-int-cast",
// SDClang does not support -Werror=fortify-source.
// TODO: b/142476859
// "-Werror=fortify-source",
}
IllegalFlags = []string{
"-w",
}
CStdVersion = "gnu99"
CppStdVersion = "gnu++17"
ExperimentalCStdVersion = "gnu11"
ExperimentalCppStdVersion = "gnu++2a"
SDClang = false
SDClangPath = ""
ForceSDClangOff = false
// prebuilts/clang default settings.
ClangDefaultBase = "prebuilts/clang/host"
ClangDefaultVersion = "clang-r416183b1"
ClangDefaultShortVersion = "12.0.7"
// Directories with warnings from Android.bp files.
WarningAllowedProjects = []string{
"device/",
"vendor/",
}
// Directories with warnings from Android.mk files.
WarningAllowedOldProjects = []string{}
QiifaAbiLibraryList = []string{}
)
var pctx = android.NewPackageContext("android/soong/cc/config")
func init() {
if android.BuildOs == android.Linux {
commonGlobalCflags = append(commonGlobalCflags, "-fdebug-prefix-map=/proc/self/cwd=")
}
qiifaBuildConfig := os.Getenv("QIIFA_BUILD_CONFIG")
if _, err := os.Stat(qiifaBuildConfig); !os.IsNotExist(err) {
data, _ := ioutil.ReadFile(qiifaBuildConfig)
var qiifalibs QiifaAbiLibs
_ = xml.Unmarshal([]byte(data), &qiifalibs)
for i := 0; i < len(qiifalibs.Library); i++ {
QiifaAbiLibraryList = append(QiifaAbiLibraryList, qiifalibs.Library[i])
}
}
staticVariableExportedToBazel("CommonGlobalConlyflags", commonGlobalConlyflags)
staticVariableExportedToBazel("DeviceGlobalCppflags", deviceGlobalCppflags)
staticVariableExportedToBazel("DeviceGlobalLdflags", deviceGlobalLdflags)
staticVariableExportedToBazel("DeviceGlobalLldflags", deviceGlobalLldflags)
staticVariableExportedToBazel("HostGlobalCppflags", hostGlobalCppflags)
staticVariableExportedToBazel("HostGlobalLdflags", hostGlobalLdflags)
staticVariableExportedToBazel("HostGlobalLldflags", hostGlobalLldflags)
// Export the static default CommonClangGlobalCflags to Bazel.
// TODO(187086342): handle cflags that are set in VariableFuncs.
commonClangGlobalCFlags := append(
ClangFilterUnknownCflags(commonGlobalCflags),
[]string{
"${ClangExtraCflags}",
// Default to zero initialization.
"-ftrivial-auto-var-init=zero",
"-enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang",
}...)
exportedVars.Set("CommonClangGlobalCflags", variableValue(commonClangGlobalCFlags))
pctx.VariableFunc("CommonClangGlobalCflags", func(ctx android.PackageVarContext) string {
flags := ClangFilterUnknownCflags(commonGlobalCflags)
flags = append(flags, "${ClangExtraCflags}")
// http://b/131390872
// Automatically initialize any uninitialized stack variables.
// Prefer zero-init if multiple options are set.
if ctx.Config().IsEnvTrue("AUTO_ZERO_INITIALIZE") {
flags = append(flags, "-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang")
} else if ctx.Config().IsEnvTrue("AUTO_PATTERN_INITIALIZE") {
flags = append(flags, "-ftrivial-auto-var-init=pattern")
} else if ctx.Config().IsEnvTrue("AUTO_UNINITIALIZE") {
flags = append(flags, "-ftrivial-auto-var-init=uninitialized")
} else {
// Default to zero initialization.
flags = append(flags, "-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang")
}
return strings.Join(flags, " ")
})
// Export the static default DeviceClangGlobalCflags to Bazel.
// TODO(187086342): handle cflags that are set in VariableFuncs.
deviceClangGlobalCflags := append(ClangFilterUnknownCflags(deviceGlobalCflags), "${ClangExtraTargetCflags}")
exportedVars.Set("DeviceClangGlobalCflags", variableValue(deviceClangGlobalCflags))
pctx.VariableFunc("DeviceClangGlobalCflags", func(ctx android.PackageVarContext) string {
if ctx.Config().Fuchsia() {
return strings.Join(ClangFilterUnknownCflags(deviceGlobalCflags), " ")
} else {
return strings.Join(deviceClangGlobalCflags, " ")
}
})
staticVariableExportedToBazel("HostClangGlobalCflags", ClangFilterUnknownCflags(hostGlobalCflags))
staticVariableExportedToBazel("NoOverrideClangGlobalCflags", append(ClangFilterUnknownCflags(noOverrideGlobalCflags), "${ClangExtraNoOverrideCflags}"))
staticVariableExportedToBazel("CommonClangGlobalCppflags", append(ClangFilterUnknownCflags(commonGlobalCppflags), "${ClangExtraCppflags}"))
staticVariableExportedToBazel("ClangExternalCflags", []string{"${ClangExtraExternalCflags}"})
// Everything in these lists is a crime against abstraction and dependency tracking.
// Do not add anything to this list.
pctx.PrefixedExistentPathsForSourcesVariable("CommonGlobalIncludes", "-I",
[]string{
"system/core/include",
"system/logging/liblog/include",
"system/media/audio/include",
"hardware/libhardware/include",
"hardware/libhardware_legacy/include",
"hardware/ril/include",
"frameworks/native/include",
"frameworks/native/opengl/include",
"frameworks/av/include",
})
setSdclangVars()
pctx.SourcePathVariable("ClangDefaultBase", ClangDefaultBase)
pctx.VariableFunc("ClangBase", func(ctx android.PackageVarContext) string {
if override := ctx.Config().Getenv("LLVM_PREBUILTS_BASE"); override != "" {
return override
}
return "${ClangDefaultBase}"
})
pctx.VariableFunc("ClangVersion", func(ctx android.PackageVarContext) string {
if override := ctx.Config().Getenv("LLVM_PREBUILTS_VERSION"); override != "" {
return override
}
return ClangDefaultVersion
})
pctx.StaticVariable("ClangPath", "${ClangBase}/${HostPrebuiltTag}/${ClangVersion}")
pctx.StaticVariable("ClangBin", "${ClangPath}/bin")
pctx.VariableFunc("ClangShortVersion", func(ctx android.PackageVarContext) string {
if override := ctx.Config().Getenv("LLVM_RELEASE_VERSION"); override != "" {
return override
}
return ClangDefaultShortVersion
})
pctx.StaticVariable("ClangAsanLibDir", "${ClangBase}/linux-x86/${ClangVersion}/lib64/clang/${ClangShortVersion}/lib/linux")
// These are tied to the version of LLVM directly in external/llvm, so they might trail the host prebuilts
// being used for the rest of the build process.
pctx.SourcePathVariable("RSClangBase", "prebuilts/clang/host")
pctx.SourcePathVariable("RSClangVersion", "clang-3289846")
pctx.SourcePathVariable("RSReleaseVersion", "3.8")
pctx.StaticVariable("RSLLVMPrebuiltsPath", "${RSClangBase}/${HostPrebuiltTag}/${RSClangVersion}/bin")
pctx.StaticVariable("RSIncludePath", "${RSLLVMPrebuiltsPath}/../lib64/clang/${RSReleaseVersion}/include")
pctx.PrefixedExistentPathsForSourcesVariable("RsGlobalIncludes", "-I",
[]string{
"external/clang/lib/Headers",
"frameworks/rs/script_api/include",
})
pctx.VariableFunc("CcWrapper", func(ctx android.PackageVarContext) string {
if override := ctx.Config().Getenv("CC_WRAPPER"); override != "" {
return override + " "
}
return ""
})
pctx.StaticVariableWithEnvOverride("RECXXPool", "RBE_CXX_POOL", remoteexec.DefaultPool)
pctx.StaticVariableWithEnvOverride("RECXXLinksPool", "RBE_CXX_LINKS_POOL", remoteexec.DefaultPool)
pctx.StaticVariableWithEnvOverride("REClangTidyPool", "RBE_CLANG_TIDY_POOL", remoteexec.DefaultPool)
pctx.StaticVariableWithEnvOverride("RECXXLinksExecStrategy", "RBE_CXX_LINKS_EXEC_STRATEGY", remoteexec.LocalExecStrategy)
pctx.StaticVariableWithEnvOverride("REClangTidyExecStrategy", "RBE_CLANG_TIDY_EXEC_STRATEGY", remoteexec.LocalExecStrategy)
pctx.StaticVariableWithEnvOverride("REAbiDumperExecStrategy", "RBE_ABI_DUMPER_EXEC_STRATEGY", remoteexec.LocalExecStrategy)
pctx.StaticVariableWithEnvOverride("REAbiLinkerExecStrategy", "RBE_ABI_LINKER_EXEC_STRATEGY", remoteexec.LocalExecStrategy)
}
func setSdclangVars() {
sdclangPath := ""
sdclangAEFlag := ""
sdclangFlags := ""
product := os.Getenv("TARGET_BOARD_PLATFORM")
aeConfigPath := os.Getenv("SDCLANG_AE_CONFIG")
sdclangConfigPath := os.Getenv("SDCLANG_CONFIG")
sdclangSA := os.Getenv("SDCLANG_SA_ENABLED")
type sdclangAEConfig struct {
SDCLANG_AE_FLAG string
}
// Load AE config file and set AE flag
if file, err := os.Open(aeConfigPath); err == nil {
decoder := json.NewDecoder(file)
aeConfig := sdclangAEConfig{}
if err := decoder.Decode(&aeConfig); err == nil {
sdclangAEFlag = aeConfig.SDCLANG_AE_FLAG
} else {
panic(err)
}
}
// Load SD Clang config file and set SD Clang variables
var sdclangConfig interface{}
if file, err := os.Open(sdclangConfigPath); err == nil {
decoder := json.NewDecoder(file)
// Parse the config file
if err := decoder.Decode(&sdclangConfig); err == nil {
config := sdclangConfig.(map[string]interface{})
// Retrieve the default block
if dev, ok := config["default"]; ok {
devConfig := dev.(map[string]interface{})
// FORCE_SDCLANG_OFF is required in the default block
if _, ok := devConfig["FORCE_SDCLANG_OFF"]; ok {
ForceSDClangOff = devConfig["FORCE_SDCLANG_OFF"].(bool)
}
// SDCLANG is optional in the default block
if _, ok := devConfig["SDCLANG"]; ok {
SDClang = devConfig["SDCLANG"].(bool)
}
// SDCLANG_PATH is required in the default block
if _, ok := devConfig["SDCLANG_PATH"]; ok {
sdclangPath = devConfig["SDCLANG_PATH"].(string)
} else {
panic("SDCLANG_PATH is required in the default block")
}
// SDCLANG_FLAGS is optional in the default block
if _, ok := devConfig["SDCLANG_FLAGS"]; ok {
sdclangFlags = devConfig["SDCLANG_FLAGS"].(string)
}
} else {
panic("Default block is required in the SD Clang config file")
}
// Retrieve the device specific block if it exists in the config file
if dev, ok := config[product]; ok {
devConfig := dev.(map[string]interface{})
// SDCLANG is optional in the device specific block
if _, ok := devConfig["SDCLANG"]; ok {
SDClang = devConfig["SDCLANG"].(bool)
}
// SDCLANG_PATH is optional in the device specific block
if _, ok := devConfig["SDCLANG_PATH"]; ok {
sdclangPath = devConfig["SDCLANG_PATH"].(string)
}
// SDCLANG_FLAGS is optional in the device specific block
if _, ok := devConfig["SDCLANG_FLAGS"]; ok {
sdclangFlags = devConfig["SDCLANG_FLAGS"].(string)
}
}
b, _ := strconv.ParseBool(sdclangSA)
if b {
llvmsa_loc := "llvmsa"
s := []string{sdclangFlags, "--compile-and-analyze", llvmsa_loc}
sdclangFlags = strings.Join(s, " ")
fmt.Println("Clang SA is enabled: ", sdclangFlags)
} else {
fmt.Println("Clang SA is not enabled")
}
} else {
panic(err)
}
} else {
fmt.Println(err)
}
// Override SDCLANG if the varialbe is set in the environment
if sdclang := os.Getenv("SDCLANG"); sdclang != "" {
if override, err := strconv.ParseBool(sdclang); err == nil {
SDClang = override
}
}
// Sanity check SDCLANG_PATH
if envPath := os.Getenv("SDCLANG_PATH"); sdclangPath == "" && envPath == "" {
panic("SDCLANG_PATH can not be empty")
}
// Override SDCLANG_PATH if the variable is set in the environment
pctx.VariableFunc("SDClangBin", func(ctx android.PackageVarContext) string {
if override := ctx.Config().Getenv("SDCLANG_PATH"); override != "" {
return override
}
return sdclangPath
})
// Override SDCLANG_COMMON_FLAGS if the variable is set in the environment
pctx.VariableFunc("SDClangFlags", func(ctx android.PackageVarContext) string {
if override := ctx.Config().Getenv("SDCLANG_COMMON_FLAGS"); override != "" {
return override
}
return sdclangAEFlag + " " + sdclangFlags
})
SDClangPath = sdclangPath
// Find the path to SDLLVM's ASan libraries
// TODO (b/117846004): Disable setting SDClangAsanLibDir due to unit test path issues
//absPath := sdclangPath
//if envPath := android.SdclangEnv["SDCLANG_PATH"]; envPath != "" {
// absPath = envPath
//}
//if !filepath.IsAbs(absPath) {
// absPath = path.Join(androidRoot, absPath)
//}
//
//libDirPrefix := "../lib/clang"
//libDir, err := ioutil.ReadDir(path.Join(absPath, libDirPrefix))
//if err != nil {
// libDirPrefix = "../lib64/clang"
// libDir, err = ioutil.ReadDir(path.Join(absPath, libDirPrefix))
//}
//if err != nil {
// panic(err)
//}
//if len(libDir) != 1 || !libDir[0].IsDir() {
// panic("Failed to find sanitizer libraries")
//}
//
//pctx.StaticVariable("SDClangAsanLibDir", path.Join(absPath, libDirPrefix, libDir[0].Name(), "lib/linux"))
}
var HostPrebuiltTag = pctx.VariableConfigMethod("HostPrebuiltTag", android.Config.PrebuiltOS)
func envOverrideFunc(envVar, defaultVal string) func(ctx android.PackageVarContext) string {
return func(ctx android.PackageVarContext) string {
if override := ctx.Config().Getenv(envVar); override != "" {
return override
}
return defaultVal
}
}
| [
"\"QIIFA_BUILD_CONFIG\"",
"\"TARGET_BOARD_PLATFORM\"",
"\"SDCLANG_AE_CONFIG\"",
"\"SDCLANG_CONFIG\"",
"\"SDCLANG_SA_ENABLED\"",
"\"SDCLANG\"",
"\"SDCLANG_PATH\""
] | [] | [
"SDCLANG_PATH",
"TARGET_BOARD_PLATFORM",
"SDCLANG_AE_CONFIG",
"QIIFA_BUILD_CONFIG",
"SDCLANG",
"SDCLANG_CONFIG",
"SDCLANG_SA_ENABLED"
] | [] | ["SDCLANG_PATH", "TARGET_BOARD_PLATFORM", "SDCLANG_AE_CONFIG", "QIIFA_BUILD_CONFIG", "SDCLANG", "SDCLANG_CONFIG", "SDCLANG_SA_ENABLED"] | go | 7 | 0 | |
uai_pharma/asgi.py | """
ASGI config for uai_pharma project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'uai_pharma.settings')
application = get_asgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
app/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'LightningLunch.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
vendor/github.com/jenkins-x/go-scm/scm/factory/factory.go | package factory
import (
"context"
"fmt"
"net/http"
"net/url"
"os"
"strings"
"github.com/jenkins-x/go-scm/scm"
"github.com/jenkins-x/go-scm/scm/driver/bitbucket"
"github.com/jenkins-x/go-scm/scm/driver/fake"
"github.com/jenkins-x/go-scm/scm/driver/gitea"
"github.com/jenkins-x/go-scm/scm/driver/github"
"github.com/jenkins-x/go-scm/scm/driver/gitlab"
"github.com/jenkins-x/go-scm/scm/driver/gogs"
"github.com/jenkins-x/go-scm/scm/driver/stash"
"github.com/jenkins-x/go-scm/scm/transport"
"golang.org/x/oauth2"
)
// MissingGitServerURL the error returned if you use a git driver that needs a git server URL
var MissingGitServerURL = fmt.Errorf("No git serverURL was specified")
// DefaultIdentifier is the default driver identifier used by FromRepoURL.
var DefaultIdentifier = NewDriverIdentifier()
type clientOptionFunc func(*scm.Client)
// NewClient creates a new client for a given driver, serverURL and OAuth token
func NewClient(driver, serverURL, oauthToken string, opts ...clientOptionFunc) (*scm.Client, error) {
if driver == "" {
driver = "github"
}
var client *scm.Client
var err error
switch driver {
case "bitbucket", "bitbucketcloud":
if serverURL != "" {
client, err = bitbucket.New(ensureBBCEndpoint(serverURL))
} else {
client = bitbucket.NewDefault()
}
case "fake", "fakegit":
client, _ = fake.NewDefault()
case "gitea":
if serverURL == "" {
return nil, MissingGitServerURL
}
client, err = gitea.New(serverURL)
case "github":
if serverURL != "" {
client, err = github.New(ensureGHEEndpoint(serverURL))
} else {
client = github.NewDefault()
}
case "gitlab":
if serverURL != "" {
client, err = gitlab.New(serverURL)
} else {
client = gitlab.NewDefault()
}
case "gogs":
if serverURL == "" {
return nil, MissingGitServerURL
}
client, err = gogs.New(serverURL)
case "stash", "bitbucketserver":
if serverURL == "" {
return nil, MissingGitServerURL
}
client, err = stash.New(serverURL)
default:
return nil, fmt.Errorf("Unsupported $GIT_KIND value: %s", driver)
}
if err != nil {
return client, err
}
if oauthToken != "" {
if driver == "gitlab" || driver == "bitbucketcloud" {
client.Client = &http.Client{
Transport: &transport.PrivateToken{
Token: oauthToken,
},
}
} else {
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: oauthToken},
)
client.Client = oauth2.NewClient(context.Background(), ts)
}
}
for _, o := range opts {
o(client)
}
return client, err
}
// NewClientFromEnvironment creates a new client using environment variables $GIT_KIND, $GIT_SERVER, $GIT_TOKEN
// defaulting to github if no $GIT_KIND or $GIT_SERVER
func NewClientFromEnvironment() (*scm.Client, error) {
if repoURL := os.Getenv("GIT_REPO_URL"); repoURL != "" {
return FromRepoURL(repoURL)
}
driver := os.Getenv("GIT_KIND")
serverURL := os.Getenv("GIT_SERVER")
oauthToken := os.Getenv("GIT_TOKEN")
if oauthToken == "" {
return nil, fmt.Errorf("No Git OAuth token specified for $GIT_TOKEN")
}
client, err := NewClient(driver, serverURL, oauthToken)
if driver == "" {
driver = client.Driver.String()
}
fmt.Printf("using driver: %s and serverURL: %s\n", driver, serverURL)
return client, err
}
// FromRepoURL parses a URL of the form https://:authtoken@host/ and attempts to
// determine the driver and creates a client to authenticate to the endpoint.
func FromRepoURL(repoURL string) (*scm.Client, error) {
u, err := url.Parse(repoURL)
if err != nil {
return nil, err
}
auth := ""
if password, ok := u.User.Password(); ok {
auth = password
}
driver, err := DefaultIdentifier.Identify(u.Host)
if err != nil {
return nil, err
}
u.Path = "/"
u.User = nil
return NewClient(driver, u.String(), auth)
}
// ensureGHEEndpoint lets ensure we have the /api/v3 suffix on the URL
func ensureGHEEndpoint(u string) string {
if strings.HasPrefix(u, "https://github.com") || strings.HasPrefix(u, "http://github.com") {
return "https://api.github.com"
}
// lets ensure we use the API endpoint to login
if !strings.Contains(u, "/api/") {
u = scm.UrlJoin(u, "/api/v3")
}
return u
}
// ensureBBCEndpoint lets ensure we have the /api/v3 suffix on the URL
func ensureBBCEndpoint(u string) string {
if strings.HasPrefix(u, "https://bitbucket.org") || strings.HasPrefix(u, "http://bitbucket.org") {
return "https://api.bitbucket.org"
}
return u
}
func Client(httpClient *http.Client) clientOptionFunc {
return func(c *scm.Client) {
c.Client = httpClient
}
}
func NewWebHookService(driver string) (scm.WebhookService, error) {
if driver == "" {
driver = "github"
}
var service scm.WebhookService = nil
switch driver {
case "bitbucket", "bitbucketcloud":
service = bitbucket.NewWebHookService()
case "fake", "fakegit":
// TODO: support fake
case "gitea":
service = gitea.NewWebHookService()
case "github":
service = github.NewWebHookService()
case "gitlab":
service = gitlab.NewWebHookService()
case "gogs":
service = gogs.NewWebHookService()
case "stash", "bitbucketserver":
service = stash.NewWebHookService()
default:
return nil, fmt.Errorf("Unsupported GIT_KIND value: %s", driver)
}
return service, nil
}
| [
"\"GIT_REPO_URL\"",
"\"GIT_KIND\"",
"\"GIT_SERVER\"",
"\"GIT_TOKEN\""
] | [] | [
"GIT_TOKEN",
"GIT_KIND",
"GIT_REPO_URL",
"GIT_SERVER"
] | [] | ["GIT_TOKEN", "GIT_KIND", "GIT_REPO_URL", "GIT_SERVER"] | go | 4 | 0 | |
main.go | package main
import (
"bytes"
"compress/gzip"
"log"
"net/http"
"os"
"strings"
"time"
"github.com/TwinProduction/gatus/config"
"github.com/TwinProduction/gatus/discovery"
"github.com/TwinProduction/gatus/k8s"
"github.com/TwinProduction/gatus/security"
"github.com/TwinProduction/gatus/watchdog"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const cacheTTL = 10 * time.Second
var (
cachedServiceResults []byte
cachedServiceResultsGzipped []byte
cachedServiceResultsTimestamp time.Time
)
func main() {
cfg := loadConfiguration()
cfg.Client = k8s.NewClient(cfg.K8sClusterMode)
if cfg.AutoDiscoverK8SServices {
discoveredServices := discovery.GetServices(cfg)
cfg.Services = append(cfg.Services, discoveredServices...)
}
resultsHandler := serviceResultsHandler
if cfg.Security != nil && cfg.Security.IsValid() {
resultsHandler = security.Handler(serviceResultsHandler, cfg.Security)
}
http.HandleFunc("/api/v1/results", resultsHandler)
http.HandleFunc("/health", healthHandler)
http.Handle("/", GzipHandler(http.FileServer(http.Dir("./static"))))
if cfg.Metrics {
http.Handle("/metrics", promhttp.Handler())
}
log.Println("[main][main] Listening on port 8080")
go watchdog.Monitor(cfg)
log.Fatal(http.ListenAndServe(":8080", nil))
}
func loadConfiguration() *config.Config {
var err error
customConfigFile := os.Getenv("GATUS_CONFIG_FILE")
if len(customConfigFile) > 0 {
err = config.Load(customConfigFile)
} else {
err = config.LoadDefaultConfiguration()
}
if err != nil {
panic(err)
}
return config.Get()
}
func serviceResultsHandler(writer http.ResponseWriter, r *http.Request) {
if isExpired := cachedServiceResultsTimestamp.IsZero() || time.Now().Sub(cachedServiceResultsTimestamp) > cacheTTL; isExpired {
buffer := &bytes.Buffer{}
gzipWriter := gzip.NewWriter(buffer)
data, err := watchdog.GetJSONEncodedServiceResults()
if err != nil {
log.Printf("[main][serviceResultsHandler] Unable to marshal object to JSON: %s", err.Error())
writer.WriteHeader(http.StatusInternalServerError)
_, _ = writer.Write([]byte("Unable to marshal object to JSON"))
return
}
gzipWriter.Write(data)
gzipWriter.Close()
cachedServiceResults = data
cachedServiceResultsGzipped = buffer.Bytes()
cachedServiceResultsTimestamp = time.Now()
}
var data []byte
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
writer.Header().Set("Content-Encoding", "gzip")
data = cachedServiceResultsGzipped
} else {
data = cachedServiceResults
}
writer.Header().Add("Content-type", "application/json")
writer.WriteHeader(http.StatusOK)
_, _ = writer.Write(data)
}
func healthHandler(writer http.ResponseWriter, _ *http.Request) {
writer.Header().Add("Content-type", "application/json")
writer.WriteHeader(http.StatusOK)
_, _ = writer.Write([]byte("{\"status\":\"UP\"}"))
}
| [
"\"GATUS_CONFIG_FILE\""
] | [] | [
"GATUS_CONFIG_FILE"
] | [] | ["GATUS_CONFIG_FILE"] | go | 1 | 0 | |
tesla/poll.py | import os
import time
from datetime import datetime
from typing import Dict, Tuple
import psycopg2
from tesla_api import ApiError, TeslaApiClient
# 230 volt, 3 phases, 16 amps
CHARGING_SPEED_PER_MINUTE = ((230 * 16 * 3) / 1000) / 60
def is_connected(charge_state: Dict) -> bool:
if charge_state["charging_state"] in {"Disconnected", "Stopped"}:
return False
return True
def est_charge_for_this_hour() -> Tuple[float, bool]:
conn = None
est_charge = 0.0
enabled = False
try:
# read database configuration
# connect to the PostgreSQL database
conn = psycopg2.connect(host="postgres", database="fokko", user="fokko", password="fokko")
# create a new cursor
cur = conn.cursor()
cur.execute(
"""
SELECT
est_charge,
enabled
FROM tesla_charge_schema
WHERE slot_start BETWEEN current_timestamp AND current_timestamp + interval '1h'
ORDER BY enabled DESC, created_at DESC, slot_start ASC
LIMIT 1
"""
)
for row in cur.fetchall():
est_charge = row[0]
enabled = row[1]
# commit the changes to the database
conn.commit()
# close communication with the database
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return est_charge, enabled
def store_tesla_state(charge_state: Dict):
sql = """
INSERT INTO tesla_readings (
battery_level,
charger_actual_current,
charger_power,
charger_voltage
)
VALUES(
%s,
%s,
%s,
%s
);"""
conn = None
try:
# read database configuration
# connect to the PostgreSQL database
conn = psycopg2.connect(host="postgres", database="fokko", user="fokko", password="fokko")
# create a new cursor
cur = conn.cursor()
# execute the INSERT statement
cur.execute(
sql,
(
charge_state["battery_level"],
charge_state["charger_actual_current"],
charge_state["charger_power"],
charge_state["charger_voltage"],
),
)
# commit the changes to the database
conn.commit()
# close communication with the database
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
if __name__ == "__main__":
client = TeslaApiClient(os.getenv("TESLA_EMAIL"), os.getenv("TESLA_PASSWORD"))
# There is just one car...
oto = client.list_vehicles()[0]
print("Optimize charging for: " + oto.display_name)
oto.wake_up()
inc = 0
charge_state = {}
while inc < 22:
try:
charge_state = oto.charge.get_state()
break
except ApiError as e:
inc += 1
print(str(e))
time.sleep(1)
# Summer saving
dt = datetime.now()
store_tesla_state(charge_state)
est_charge, enabled = est_charge_for_this_hour()
# If there is no schema active, we don't want to do anything
if enabled:
should_charge = False
# Check if we still need to charge for this hour
charged_until_now = dt.minute * CHARGING_SPEED_PER_MINUTE
print("Charged until now: " + str(charged_until_now))
print("Expected charge: " + str(est_charge))
print("Remaining kWh for this hour: " + str(charged_until_now - est_charge))
if est_charge > charged_until_now:
should_charge = True
print("Current state: " + charge_state["charging_state"])
if charge_state["charging_state"] not in {"Disconnected"}:
if should_charge and charge_state["charging_state"] == "Stopped":
print("Start charging")
oto.charge.start_charging()
elif not should_charge and charge_state["charging_state"] != "Stopped":
print("Stop charging")
oto.charge.stop_charging()
else:
print("Nothing to do here...")
else:
print("No schedule active")
| [] | [] | [
"TESLA_PASSWORD",
"TESLA_EMAIL"
] | [] | ["TESLA_PASSWORD", "TESLA_EMAIL"] | python | 2 | 0 | |
api/standup/config/common.py | import os
from os.path import join
from distutils.util import strtobool
import dj_database_url
from configurations import Configuration
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class Common(Configuration):
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third party apps
'rest_framework', # utilities for rest apis
'rest_framework.authtoken', # token authentication
'django_filters', # for filtering rest endpoints
'corsheaders',
# Your apps
'standup.users',
'standup.teams',
'standup.goals',
)
# https://docs.djangoproject.com/en/2.0/topics/http/middleware/
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ALLOWED_HOSTS = ["*"]
ROOT_URLCONF = 'standup.urls'
ADMIN_URL = 'admin/'
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY')
WSGI_APPLICATION = 'standup.wsgi.application'
# Email
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
ADMINS = (
('Author', '[email protected]'),
)
# Postgres
DATABASES = {
'default': dj_database_url.config(
default=os.getenv('DATABASE_URL'),
conn_max_age=int(os.getenv('POSTGRES_CONN_MAX_AGE', 600))
)
}
# General
APPEND_SLASH = False
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
USE_L10N = True
USE_TZ = True
LOGIN_REDIRECT_URL = '/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = os.path.normpath(join(os.path.dirname(BASE_DIR), 'static'))
STATICFILES_DIRS = []
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Media files
MEDIA_ROOT = join(os.path.dirname(BASE_DIR), 'media')
MEDIA_URL = '/media/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': STATICFILES_DIRS + [
join(os.path.dirname(BASE_DIR), '/teams/templates/')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Set DEBUG to False as a default for safety
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = strtobool(os.getenv('DJANGO_DEBUG', 'no'))
# Password Validation
# https://docs.djangoproject.com/en/2.0/topics/auth/passwords/#module-django.contrib.auth.password_validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'django.server': {
'()': 'django.utils.log.ServerFormatter',
'format': '[%(server_time)s] %(message)s',
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'django.server': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'django.server',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
},
'django.server': {
'handlers': ['django.server'],
'level': 'INFO',
'propagate': False,
},
'django.request': {
'handlers': ['mail_admins', 'console'],
'level': 'ERROR',
'propagate': False,
},
'django.db.backends': {
'handlers': ['console'],
'level': 'INFO'
},
}
}
# Custom user app
AUTH_USER_MODEL = 'users.User'
# Django Rest Framework
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': int(os.getenv('DJANGO_PAGINATION_LIMIT', 10)),
'DATETIME_FORMAT': '%Y-%m-%dT%H:%M:%S%z',
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
)
}
| [] | [] | [
"DJANGO_SECRET_KEY",
"POSTGRES_CONN_MAX_AGE",
"DATABASE_URL",
"DJANGO_DEBUG",
"DJANGO_PAGINATION_LIMIT"
] | [] | ["DJANGO_SECRET_KEY", "POSTGRES_CONN_MAX_AGE", "DATABASE_URL", "DJANGO_DEBUG", "DJANGO_PAGINATION_LIMIT"] | python | 5 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.