filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
fully_connected.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 25 16:43:33 2017
@author: aditya
"""
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and Evaluates the MNIST network using a feed dictionary."""
# pylint: disable=missing-docstring
import argparse
import os
import sys
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist
# Basic model parameters as external flags.
FLAGS = None
def placeholder_inputs(batch_size):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
mnist.IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
return images_placeholder, labels_placeholder
def fill_feed_dict(data_set, images_pl, labels_pl):
"""Fills the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Args:
data_set: The set of images and labels, from input_data.read_data_sets()
images_pl: The images placeholder, from placeholder_inputs().
labels_pl: The labels placeholder, from placeholder_inputs().
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
# Create the feed_dict for the placeholders filled with the next
# `batch size` examples.
images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,
FLAGS.fake_data)
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
}
return feed_dict
def do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_set):
"""Runs one evaluation against the full epoch of data.
Args:
sess: The session in which the model has been trained.
eval_correct: The Tensor that returns the number of correct predictions.
images_placeholder: The images placeholder.
labels_placeholder: The labels placeholder.
data_set: The set of images and labels to evaluate, from
input_data.read_data_sets().
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = data_set.num_examples // FLAGS.batch_size
num_examples = steps_per_epoch * FLAGS.batch_size
for step in xrange(steps_per_epoch):
feed_dict = fill_feed_dict(data_set,
images_placeholder,
labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = float(true_count) / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
def run_training():
"""Train MNIST for a number of steps."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs(
FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images_placeholder,
FLAGS.hidden1,
FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels_placeholder)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels_placeholder)
# Build the summary Tensor based on the TF collection of Summaries.
summary = tf.summary.merge_all()
# Add the variable initializer Op.
init = tf.global_variables_initializer()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
# And then after everything is built:
# Run the Op to initialize the variables.
sess.run(init)
# Start the training loop.
for step in xrange(FLAGS.max_steps):
start_time = time.time()
# Fill a feed dictionary with the actual set of images and labels
# for this particular training step.
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder)
# Run one step of the model. The return values are the activations
# from the `train_op` (which is discarded) and the `loss` Op. To
# inspect the values of your Ops or variables, you may include them
# in the list passed to sess.run() and the value tensors will be
# returned in the tuple from the call.
_, loss_value = sess.run([train_op, loss],
feed_dict=feed_dict)
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
# Update the events file.
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.train)
# Evaluate against the validation set.
print('Validation Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.validation)
# Evaluate against the test set.
print('Test Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.test)
def main(_):
if tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.DeleteRecursively(FLAGS.log_dir)
tf.gfile.MakeDirs(FLAGS.log_dir)
run_training()
print("reached gherr")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--max_steps',
type=int,
default=2000,
help='Number of steps to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--input_data_dir',
type=str,
default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),
'tensorflow/mnist/input_data'),
help='Directory to put the input data.'
)
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),
'tensorflow/mnist/logs/fully_connected_feed'),
help='Directory to put the log data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) | []
| []
| [
"TEST_TMPDIR"
]
| [] | ["TEST_TMPDIR"] | python | 1 | 0 | |
go/subtle/hybrid/elliptic_curves_test.go | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
package hybrid
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/x509"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"math/big"
"os"
"reflect"
"strings"
"testing"
)
type testEC1 struct {
elliptic.Curve
pubX, pubY string
}
type testEC2 struct {
elliptic.Curve
pointFormat string
encoded string
X, Y string
}
type testData struct {
Algorithm string
GeneratorVersion string
NumberOfTests uint32
Schema string
TestGroups []*testGroup
}
type testGroup struct {
Curve string
Encoding string
Type string
Tests []*testcase
}
type testcase struct {
Comment string
Public string
Private string
Shared string
Result string
Flags []string
TcID uint32
}
// Test cases same as the java tests from
// //third_party/tink/java/src/test/java/com/google/crypto/tink/subtle/EllipticCurvesTest.java
var (
testVectorsDir = os.Getenv("TEST_SRCDIR") + "/tink/third_party/wycheproof/testvectors/"
testVectors = []string{testVectorsDir + "ecdh_test.json",
testVectorsDir + "/ecdh_test.json",
}
tEC1 = []testEC1{
{
elliptic.P256(),
"700c48f77f56584c5cc632ca65640db91b6bacce3a4df6b42ce7cc838833d287",
"db71e509e3fd9b060ddb20ba5c51dcc5948d46fbf640dfe0441782cab85fa4ac",
},
{
elliptic.P256(),
"809f04289c64348c01515eb03d5ce7ac1a8cb9498f5caa50197e58d43a86a7ae",
"b29d84e811197f25eba8f5194092cb6ff440e26d4421011372461f579271cda3",
},
{
elliptic.P256(),
"df3989b9fa55495719b3cf46dccd28b5153f7808191dd518eff0c3cff2b705ed",
"422294ff46003429d739a33206c8752552c8ba54a270defc06e221e0feaf6ac4",
},
{
elliptic.P256(),
"356c5a444c049a52fee0adeb7e5d82ae5aa83030bfff31bbf8ce2096cf161c4b",
"57d128de8b2a57a094d1a001e572173f96e8866ae352bf29cddaf92fc85b2f92",
},
{
elliptic.P384(),
"a7c76b970c3b5fe8b05d2838ae04ab47697b9eaf52e764592efda27fe7513272" +
"734466b400091adbf2d68c58e0c50066",
"ac68f19f2e1cb879aed43a9969b91a0839c4c38a49749b661efedf243451915e" +
"d0905a32b060992b468c64766fc8437a",
},
{
elliptic.P384(),
"30f43fcf2b6b00de53f624f1543090681839717d53c7c955d1d69efaf0349b736" +
"3acb447240101cbb3af6641ce4b88e0",
"25e46c0c54f0162a77efcc27b6ea792002ae2ba82714299c860857a68153ab62e" +
"525ec0530d81b5aa15897981e858757",
},
{
elliptic.P521(),
"000000685a48e86c79f0f0875f7bc18d25eb5fc8c0b07e5da4f4370f3a9490340" +
"854334b1e1b87fa395464c60626124a4e70d0f785601d37c09870ebf176666877a2" +
"046d",
"000001ba52c56fc8776d9e8f5db4f0cc27636d0b741bbe05400697942e80b7398" +
"84a83bde99e0f6716939e632bc8986fa18dccd443a348b6c3e522497955a4f3c302" +
"f676",
},
{
elliptic.P521(),
"000001df277c152108349bc34d539ee0cf06b24f5d3500677b4445453ccc21409" +
"453aafb8a72a0be9ebe54d12270aa51b3ab7f316aa5e74a951c5e53f74cd95fc29a" +
"ee7a",
"0000013d52f33a9f3c14384d1587fa8abe7aed74bc33749ad9c570b471776422c" +
"7d4505d9b0a96b3bfac041e4c6a6990ae7f700e5b4a6640229112deafa0cd8bb0d0" +
"89b0",
},
{
elliptic.P521(),
"00000092db3142564d27a5f0006f819908fba1b85038a5bc2509906a497daac67" +
"fd7aee0fc2daba4e4334eeaef0e0019204b471cd88024f82115d8149cc0cf4f7ce1" +
"a4d5",
"0000016bad0623f517b158d9881841d2571efbad63f85cbe2e581960c5d670601" +
"a6760272675a548996217e4ab2b8ebce31d71fca63fcc3c08e91c1d8edd91cf6fe8" +
"45f8",
},
{
elliptic.P521(),
"0000004f38816681771289ce0cb83a5e29a1ab06fc91f786994b23708ff08a08a" +
"0f675b809ae99e9f9967eb1a49f196057d69e50d6dedb4dd2d9a81c02bdcc8f7f51" +
"8460",
"0000009efb244c8b91087de1eed766500f0e81530752d469256ef79f6b965d8a2" +
"232a0c2dbc4e8e1d09214bab38485be6e357c4200d073b52f04e4a16fc6f5247187" +
"aecb",
},
{
elliptic.P521(),
"000001a32099b02c0bd85371f60b0dd20890e6c7af048c8179890fda308b359db" +
"bc2b7a832bb8c6526c4af99a7ea3f0b3cb96ae1eb7684132795c478ad6f962e4a6f" +
"446d",
"0000017627357b39e9d7632a1370b3e93c1afb5c851b910eb4ead0c9d387df67c" +
"de85003e0e427552f1cd09059aad0262e235cce5fba8cedc4fdc1463da76dcd4b6d" +
"1a46",
},
}
tEC2 = []testEC2{
// NIST_P256
{
elliptic.P256(),
"UNCOMPRESSED",
"04" +
"b0cfc7bc02fc980d858077552947ffb449b10df8949dee4e56fe21e016dcb25a" +
"1886ccdca5487a6772f9401888203f90587cc00a730e2b83d5c6f89b3b568df7",
"79974177209371530366349631093481213364328002500948308276357601809416549347930",
"11093679777528052772423074391650378811758820120351664471899251711300542565879",
},
{
elliptic.P256(),
"DO_NOT_USE_CRUNCHY_UNCOMPRESSED",
"b0cfc7bc02fc980d858077552947ffb449b10df8949dee4e56fe21e016dcb25a" +
"1886ccdca5487a6772f9401888203f90587cc00a730e2b83d5c6f89b3b568df7",
"79974177209371530366349631093481213364328002500948308276357601809416549347930",
"11093679777528052772423074391650378811758820120351664471899251711300542565879",
},
{
elliptic.P256(),
"COMPRESSED",
"03b0cfc7bc02fc980d858077552947ffb449b10df8949dee4e56fe21e016dcb25a",
"79974177209371530366349631093481213364328002500948308276357601809416549347930",
"11093679777528052772423074391650378811758820120351664471899251711300542565879",
},
// Exceptional point: x==0
{
elliptic.P256(),
"UNCOMPRESSED",
"04" +
"0000000000000000000000000000000000000000000000000000000000000000" +
"66485c780e2f83d72433bd5d84a06bb6541c2af31dae871728bf856a174f93f4",
"0",
"46263761741508638697010950048709651021688891777877937875096931459006746039284",
},
{
elliptic.P256(),
"DO_NOT_USE_CRUNCHY_UNCOMPRESSED",
"0000000000000000000000000000000000000000000000000000000000000000" +
"66485c780e2f83d72433bd5d84a06bb6541c2af31dae871728bf856a174f93f4",
"0",
"46263761741508638697010950048709651021688891777877937875096931459006746039284",
},
{
elliptic.P256(),
"COMPRESSED",
"020000000000000000000000000000000000000000000000000000000000000000",
"0",
"46263761741508638697010950048709651021688891777877937875096931459006746039284",
},
// Exceptional point: x==-3
{
elliptic.P256(),
"UNCOMPRESSED",
"04" +
"ffffffff00000001000000000000000000000000fffffffffffffffffffffffc" +
"19719bebf6aea13f25c96dfd7c71f5225d4c8fc09eb5a0ab9f39e9178e55c121",
"115792089210356248762697446949407573530086143415290314195533631308867097853948",
"11508551065151498768481026661199445482476508121209842448718573150489103679777",
},
{
elliptic.P256(),
"DO_NOT_USE_CRUNCHY_UNCOMPRESSED",
"ffffffff00000001000000000000000000000000fffffffffffffffffffffffc" +
"19719bebf6aea13f25c96dfd7c71f5225d4c8fc09eb5a0ab9f39e9178e55c121",
"115792089210356248762697446949407573530086143415290314195533631308867097853948",
"11508551065151498768481026661199445482476508121209842448718573150489103679777",
},
{
elliptic.P256(),
"COMPRESSED",
"03ffffffff00000001000000000000000000000000fffffffffffffffffffffffc",
"115792089210356248762697446949407573530086143415290314195533631308867097853948",
"11508551065151498768481026661199445482476508121209842448718573150489103679777",
},
// NIST_P384
{
elliptic.P384(),
"UNCOMPRESSED",
"04aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a" +
"385502f25dbf55296c3a545e3872760ab73617de4a96262c6f5d9e98bf9292dc" +
"29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e" +
"5f",
"2624703509579968926862315674456698189185292349110921338781561590" +
"0925518854738050089022388053975719786650872476732087",
"8325710961489029985546751289520108179287853048861315594709205902" +
"480503199884419224438643760392947333078086511627871",
},
{
elliptic.P384(),
"COMPRESSED",
"03aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a" +
"385502f25dbf55296c3a545e3872760ab7",
"2624703509579968926862315674456698189185292349110921338781561590" +
"0925518854738050089022388053975719786650872476732087",
"8325710961489029985546751289520108179287853048861315594709205902" +
"480503199884419224438643760392947333078086511627871",
},
// x = 0
{
elliptic.P384(),
"UNCOMPRESSED",
"0400000000000000000000000000000000000000000000000000000000000000" +
"00000000000000000000000000000000003cf99ef04f51a5ea630ba3f9f960dd" +
"593a14c9be39fd2bd215d3b4b08aaaf86bbf927f2c46e52ab06fb742b8850e52" +
"1e",
"0",
"9384923975005507693384933751151973636103286582194273515051780595" +
"652610803541482195894618304099771370981414591681054",
},
{
elliptic.P384(),
"COMPRESSED",
"0200000000000000000000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000000",
"0",
"9384923975005507693384933751151973636103286582194273515051780595" +
"652610803541482195894618304099771370981414591681054",
},
// x = 2
{
elliptic.P384(),
"UNCOMPRESSED",
"0400000000000000000000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000002732152442fb6ee5c3e6ce1d920c059" +
"bc623563814d79042b903ce60f1d4487fccd450a86da03f3e6ed525d02017bfd" +
"b3",
"2",
"1772015366480916228638409476801818679957736647795608728422858375" +
"4887974043472116432532980617621641492831213601947059",
},
{
elliptic.P384(),
"COMPRESSED",
"0300000000000000000000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000002",
"2",
"1772015366480916228638409476801818679957736647795608728422858375" +
"4887974043472116432532980617621641492831213601947059",
},
// x = -3
{
elliptic.P384(),
"UNCOMPRESSED",
"04ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" +
"feffffffff0000000000000000fffffffc2de9de09a95b74e6b2c430363e1afb" +
"8dff7164987a8cfe0a0d5139250ac02f797f81092a9bdc0e09b574a8f43bf80c" +
"17",
"3940200619639447921227904010014361380507973927046544666794829340" +
"4245721771496870329047266088258938001861606973112316",
"7066741234775658874139271223692271325950306561732202191471600407" +
"582071247913794644254895122656050391930754095909911",
},
{
elliptic.P384(),
"COMPRESSED",
"03ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" +
"feffffffff0000000000000000fffffffc",
"3940200619639447921227904010014361380507973927046544666794829340" +
"4245721771496870329047266088258938001861606973112316",
"7066741234775658874139271223692271325950306561732202191471600407" +
"582071247913794644254895122656050391930754095909911",
},
// NIST_P521
{
elliptic.P521(),
"UNCOMPRESSED",
"0400c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b" +
"4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2" +
"e5bd66011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd" +
"17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94" +
"769fd16650",
"2661740802050217063228768716723360960729859168756973147706671368" +
"4188029449964278084915450806277719023520942412250655586621571135" +
"45570916814161637315895999846",
"3757180025770020463545507224491183603594455134769762486694567779" +
"6155444774405563166912344050129455395621444445372894285225856667" +
"29196580810124344277578376784",
},
{
elliptic.P521(),
"COMPRESSED",
"0200c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b" +
"4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2" +
"e5bd66",
"2661740802050217063228768716723360960729859168756973147706671368" +
"4188029449964278084915450806277719023520942412250655586621571135" +
"45570916814161637315895999846",
"3757180025770020463545507224491183603594455134769762486694567779" +
"6155444774405563166912344050129455395621444445372894285225856667" +
"29196580810124344277578376784",
},
// x = 0
{
elliptic.P521(),
"UNCOMPRESSED",
"0400000000000000000000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000000000000000000000000000000000000" +
"00000000d20ec9fea6b577c10d26ca1bb446f40b299e648b1ad508aad068896f" +
"ee3f8e614bc63054d5772bf01a65d412e0bcaa8e965d2f5d332d7f39f846d440" +
"ae001f4f87",
"0",
"2816414230262626695230339754503506208598534788872316917808418392" +
"0894686826982898181454171638541149642517061885689521392260532032" +
"30035588176689756661142736775",
},
{
elliptic.P521(),
"COMPRESSED",
"0300000000000000000000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000000000000000000000000000000000000" +
"000000",
"0",
"2816414230262626695230339754503506208598534788872316917808418392" +
"0894686826982898181454171638541149642517061885689521392260532032" +
"30035588176689756661142736775",
},
// x = 1
{
elliptic.P521(),
"UNCOMPRESSED",
"0400000000000000000000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000000000000000000000000000000000000" +
"0000010010e59be93c4f269c0269c79e2afd65d6aeaa9b701eacc194fb3ee03d" +
"f47849bf550ec636ebee0ddd4a16f1cd9406605af38f584567770e3f272d688c" +
"832e843564",
"1",
"2265505274322546447629271557184988697103589068170534253193208655" +
"0778100463909972583865730916407864371153050622267306901033104806" +
"9570407113457901669103973732",
},
{
elliptic.P521(),
"COMPRESSED",
"0200000000000000000000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000000000000000000000000000000000000" +
"000001",
"1",
"2265505274322546447629271557184988697103589068170534253193208655" +
"0778100463909972583865730916407864371153050622267306901033104806" +
"9570407113457901669103973732",
},
// x = 2
{
elliptic.P521(),
"UNCOMPRESSED",
"0400000000000000000000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000000000000000000000000000000000000" +
"00000200d9254fdf800496acb33790b103c5ee9fac12832fe546c632225b0f7f" +
"ce3da4574b1a879b623d722fa8fc34d5fc2a8731aad691a9a8bb8b554c95a051" +
"d6aa505acf",
"2",
"2911448509017565583245824537994174021964465504209366849707937264" +
"0417919148200722009442607963590225526059407040161685364728526719" +
"10134103604091376779754756815",
},
{
elliptic.P521(),
"COMPRESSED",
"0300000000000000000000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000000000000000000000000000000000000" +
"000002",
"2",
"2911448509017565583245824537994174021964465504209366849707937264" +
"0417919148200722009442607963590225526059407040161685364728526719" +
"10134103604091376779754756815",
},
// x = -2
{
elliptic.P521(),
"UNCOMPRESSED",
"0401ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" +
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" +
"fffffd0010e59be93c4f269c0269c79e2afd65d6aeaa9b701eacc194fb3ee03d" +
"f47849bf550ec636ebee0ddd4a16f1cd9406605af38f584567770e3f272d688c" +
"832e843564",
"6864797660130609714981900799081393217269435300143305409394463459" +
"1855431833976560521225596406614545549772963113914808580371219879" +
"99716643812574028291115057149",
"2265505274322546447629271557184988697103589068170534253193208655" +
"0778100463909972583865730916407864371153050622267306901033104806" +
"9570407113457901669103973732",
},
{
elliptic.P521(),
"COMPRESSED",
"0201ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" +
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" +
"fffffd",
"6864797660130609714981900799081393217269435300143305409394463459" +
"1855431833976560521225596406614545549772963113914808580371219879" +
"99716643812574028291115057149",
"2265505274322546447629271557184988697103589068170534253193208655" +
"0778100463909972583865730916407864371153050622267306901033104806" +
"9570407113457901669103973732",
},
}
)
func TestPointOnCurve(t *testing.T) {
for i := 0; i < len(tEC1); i++ {
x, y, ye := new(big.Int), new(big.Int), new(big.Int)
x.SetString(tEC1[i].pubX, 16)
y.SetString(tEC1[i].pubY, 16)
ye.Sub(y, big.NewInt(1))
if !tEC1[i].Curve.IsOnCurve(x, y) {
t.Fatalf("valid points not on curve for test case :%d", i)
}
if tEC1[i].Curve.IsOnCurve(x, ye) {
t.Fatalf("invalid points is on curve for test case :%d", i)
}
}
}
func TestPointEncode(t *testing.T) {
for i := 0; i < len(tEC2); i++ {
x, y := new(big.Int), new(big.Int)
x.SetString(tEC2[i].X, 10)
y.SetString(tEC2[i].Y, 10)
p := ECPoint{
X: x,
Y: y,
}
encodedpoint, err := pointEncode(tEC2[i].Curve, tEC2[i].pointFormat, p)
if err != nil {
t.Errorf("error in point encoding in test case %d : %v", i, err)
}
want, err := hex.DecodeString(tEC2[i].encoded)
if err != nil {
t.Errorf("error reading encoded point in test case %d", i)
}
if !bytes.Equal(encodedpoint, want) {
t.Errorf("mismatch point encoding in test case %d", i)
}
}
}
func TestPointDecode(t *testing.T) {
for i := 0; i < len(tEC2); i++ {
x, y := new(big.Int), new(big.Int)
x.SetString(tEC2[i].X, 10)
y.SetString(tEC2[i].Y, 10)
e, err := hex.DecodeString(tEC2[i].encoded)
if err != nil {
t.Errorf("error reading encoded point in test case %d", i)
}
pt, err := pointDecode(tEC2[i].Curve, tEC2[i].pointFormat, e)
if err != nil {
t.Errorf("error in point decoding in test case %d: %v", i, err)
}
spt := ECPoint{
X: x,
Y: y,
}
if pt.X.Cmp(spt.X) != 0 || pt.Y.Cmp(spt.Y) != 0 {
t.Errorf("mismatch point decoding in test case %d", i)
}
}
}
func checkFlag(t *testing.T, flags []string, check []string) bool {
t.Helper()
for _, f := range flags {
for _, c := range check {
if strings.Compare(f, c) == 0 {
return true
}
}
}
return false
}
// getX509PublicKey converts a stored public key to ECPublicKey.
func getX509PublicKey(t *testing.T, b []byte) (*ECPublicKey, error) {
t.Helper()
pkey, err := x509.ParsePKIXPublicKey(b)
if err != nil {
return nil, err
}
ecdsaP, ok := pkey.(*ecdsa.PublicKey)
if !ok {
return nil, errors.New("invalid elliptic curve key")
}
return &ECPublicKey{
Curve: ecdsaP.Curve,
Point: ECPoint{
X: ecdsaP.X,
Y: ecdsaP.Y,
},
}, nil
}
func TestVectors(t *testing.T) {
for _, i := range testVectors {
f, err := os.Open(i)
if err != nil {
t.Fatalf("cannot open file: %s, make sure that github.com/google/wycheproof is in your gopath", err)
}
parser := json.NewDecoder(f)
data := new(testData)
if err := parser.Decode(data); err != nil {
t.Fatalf("cannot decode test data: %s", err)
}
for _, g := range data.TestGroups {
curve, err := GetCurve(g.Curve)
if err != nil {
t.Logf("unsupported curve: %s", g.Curve)
continue
}
for _, test := range g.Tests {
tcID := fmt.Sprintf("testcase %d (%s)", test.TcID, test.Comment)
pvtHex := test.Private
if len(test.Private)%2 == 1 {
pvtHex = fmt.Sprintf("0%s", test.Private)
}
pvt, err := hex.DecodeString(pvtHex)
if err != nil {
t.Errorf("error decoding from hex private key in test case %s: %v", tcID, err)
}
pvtKey := GetECPrivateKey(curve, pvt)
p, err := hex.DecodeString(test.Public)
if err != nil {
t.Errorf("error decoding from hex public key in test case %s: %v", tcID, err)
}
pubKey := &ECPublicKey{}
var errPub error
switch data.Schema {
case "ecdh_test_schema.json":
pubKey, errPub = getX509PublicKey(t, p)
case "ecdh_ecpoint_test_schema.json":
ptFormat := "UNCOMPRESSED"
pt := &ECPoint{}
if checkFlag(t, test.Flags, []string{"CompressedPoint"}) {
ptFormat = "COMPRESSED"
}
pt, errPub = pointDecode(curve, ptFormat, p)
pubKey = &ECPublicKey{
Curve: curve,
Point: *pt,
}
default:
errPub = errors.New("invalid schema")
}
if errPub != nil && test.Result != "valid" {
t.Logf("test case %s failing as expected for invalid result : %v", tcID, err)
continue
}
if reflect.DeepEqual(&ECPublicKey{}, pubKey) {
t.Logf("error decoding public key in test case %s: %v", tcID, err)
// Some test vectors have incorrect public key encoding which
// leads to runtime errors. For more details please see the
// java test file referenced above.
continue
}
cShared, err := ComputeSharedSecret(&pubKey.Point, pvtKey)
got := hex.EncodeToString(cShared)
want := test.Shared
if test.Result == "invalid" {
if err != nil { // shared secret was not computed
continue
}
if strings.Compare(got, want) == 0 && checkFlag(t, test.Flags, []string{"WrongOrder", "WeakPublicKey", "UnnamedCurve"}) {
fmt.Printf("test case %s accepted invalid parameters but shared secret is correct\n", tcID)
continue
}
t.Errorf("test case %s accepted invalid parameters, shared secret: %s", tcID, want)
} else if strings.Compare(got, want) != 0 {
t.Errorf("test case %s incorrect shared secret, want: %s, got: %s", tcID, want, got)
}
fmt.Printf("test :%s done\n", tcID)
}
fmt.Printf("curve :%s done\n", g.Curve)
}
}
}
| [
"\"TEST_SRCDIR\""
]
| []
| [
"TEST_SRCDIR"
]
| [] | ["TEST_SRCDIR"] | go | 1 | 0 | |
setup.py | #!/usr/bin/env python
import os
import sys
import subprocess
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
# ZSTD version
VERSION = (1, 4, 4,)
VERSION_STR = ".".join([str(x) for x in VERSION])
# Package version
PKG_VERSION = VERSION
# Minor versions
PKG_VERSION += ("0",)
PKG_VERSION_STR = ".".join([str(x) for x in PKG_VERSION])
###
# Ugly hacks, I know
#
SUP_LEGACY=0
if "--legacy" in sys.argv:
# Support legacy output format functions
SUP_LEGACY=1
sys.argv.remove("--legacy")
SUP_PYZSTD_LEGACY=0
if "--pyzstd-legacy" in sys.argv:
# Support ZSTD legacy format
SUP_PYZSTD_LEGACY=1
sys.argv.remove("--pyzstd-legacy")
SUP_EXTERNAL=0
ext_libraries=[]
if "--external" in sys.argv:
# You want use external Zstd library?
SUP_EXTERNAL=1
sys.argv.remove("--external")
# You should add external library by option: --libraries zstd
# And probably include paths by option: --include-dirs /usr/include/zstd
# And probably library paths by option: --library-dirs /usr/lib/i386-linux-gnu
# Wee need pkg-config here!
pkgconf = "/usr/bin/pkg-config"
if os.path.exists(pkgconf):
cmd = [pkgconf, "libzstd", "--modversion"]
if sys.hexversion >= 0x02070000:
VERSION_STR = subprocess.check_output(cmd)
else:
# Pure Python 2.6
VERSION_STR = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
if sys.hexversion >= 0x03000000:
# It's bytes in PY3
VERSION_STR = VERSION_STR.decode()
VERSION = tuple(int(v) for v in VERSION_STR.split("."))
if "--libraries" not in sys.argv:
# Add something default
ext_libraries=["zstd"]
COPT = {
'msvc': [ '/Ox', '/DVERSION=\"\\\"%s\\\"\"' % PKG_VERSION_STR, ],
'mingw32': [ '-O2', '-DVERSION="%s"' % PKG_VERSION_STR, ],
'unix': [ '-O2', '-DVERSION="%s"' % PKG_VERSION_STR, ],
'clang': [ '-O2', '-DVERSION="%s"' % PKG_VERSION_STR, ],
'gcc': [ '-O2', '-DVERSION="%s"' % PKG_VERSION_STR, ]
}
if not SUP_EXTERNAL:
for comp in COPT:
if comp == 'msvc':
COPT[comp].extend([ '/DZSTD_MULTITHREAD=1',
'/Izstd\\lib', '/Izstd\\lib\\common', '/Izstd\\lib\\compress', '/Izstd\\lib\\decompress',
])
else:
COPT[comp].extend([ '-DZSTD_MULTITHREAD=1',
'-Izstd/lib', '-Izstd/lib/common', '-Izstd/lib/compress', '-Izstd/lib/decompress',
])
if SUP_LEGACY:
for comp in COPT:
if comp == 'msvc':
COPT[comp].extend(['/Izstd\\lib\\legacy', '/DZSTD_LEGACY_SUPPORT=1'])
else:
COPT[comp].extend(['-Izstd/lib/legacy', '-DZSTD_LEGACY_SUPPORT=1'])
if SUP_PYZSTD_LEGACY:
for comp in COPT:
if comp == 'msvc':
COPT[comp].extend(['/DPYZSTD_LEGACY=1'])
else:
COPT[comp].extend(['-DPYZSTD_LEGACY=1'])
class ZstdBuildExt( build_ext ):
def build_extensions(self):
c = self.compiler.compiler_type
if c in COPT:
for e in self.extensions:
e.extra_compile_args = COPT[c]
build_ext.build_extensions(self)
zstdFiles = []
if not SUP_EXTERNAL:
for f in [
'compress/zstd_compress.c',
'compress/zstd_compress_literals.c',
'compress/zstd_compress_sequences.c',
'compress/zstdmt_compress.c',
'compress/zstd_fast.c', 'compress/zstd_double_fast.c', 'compress/zstd_lazy.c', 'compress/zstd_opt.c', 'compress/zstd_ldm.c',
'compress/fse_compress.c', 'compress/huf_compress.c',
'compress/hist.c',
'common/fse_decompress.c',
'decompress/zstd_decompress.c',
'decompress/zstd_decompress_block.c',
'decompress/zstd_ddict.c',
'decompress/huf_decompress.c',
'common/entropy_common.c', 'common/zstd_common.c', 'common/xxhash.c', 'common/error_private.c',
'common/pool.c',
'common/threading.c',
]:
zstdFiles.append('zstd/lib/'+f)
if SUP_LEGACY:
for f in [
'legacy/zstd_v01.c', 'legacy/zstd_v02.c', 'legacy/zstd_v03.c', 'legacy/zstd_v04.c', 'legacy/zstd_v05.c', 'legacy/zstd_v06.c', 'legacy/zstd_v07.c'
]:
zstdFiles.append('zstd/lib/'+f)
zstdFiles.append('src/python-zstd.c')
# Python 2.6 compat
os.environ["VERSION"] = VERSION_STR
os.environ["PKG_VERSION"] = PKG_VERSION_STR
if SUP_LEGACY:
os.environ["LEGACY"] = "1"
if SUP_EXTERNAL:
os.environ["ZSTD_EXTERNAL"] = "1"
if SUP_PYZSTD_LEGACY:
os.environ["PYZSTD_LEGACY"] = "1"
# Another dirty hack
def my_test_suite():
import unittest
test_suite = unittest.TestSuite()
for test in os.listdir('tests'):
if test.startswith("test_") and test.endswith(".py"):
test_suite.addTest(unittest.defaultTestLoader.loadTestsFromName("tests."+test.replace(".py","")))
return test_suite
setup(
name='zstd',
version=PKG_VERSION_STR,
description="ZSTD Bindings for Python",
long_description=open('README.rst', 'r').read(),
author='Sergey Dryabzhinsky, Anton Stuk',
author_email='[email protected]',
maintainer='Sergey Dryabzhinsky',
maintainer_email='[email protected]',
url='https://github.com/sergey-dryabzhinsky/python-zstd',
keywords=['zstd', 'zstandard', 'compression'],
license='BSD',
packages=find_packages('src'),
package_dir={'': 'src'},
ext_modules=[
Extension('zstd', zstdFiles, libraries=ext_libraries)
],
cmdclass = {'build_ext': ZstdBuildExt },
test_suite='setup.my_test_suite',
classifiers=[
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'Operating System :: POSIX',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
)
| []
| []
| [
"LEGACY",
"VERSION",
"ZSTD_EXTERNAL",
"PKG_VERSION",
"PYZSTD_LEGACY"
]
| [] | ["LEGACY", "VERSION", "ZSTD_EXTERNAL", "PKG_VERSION", "PYZSTD_LEGACY"] | python | 5 | 0 | |
helper.py | import numpy as np, sys, os, random, pdb, json, uuid, time, argparse
from pprint import pprint
import logging, logging.config
from collections import defaultdict as ddict
from ordered_set import OrderedSet
# PyTorch related imports
import torch
from torch.nn import functional as F
from torch.nn.init import xavier_normal_
from torch.utils.data import DataLoader
from torch.nn import Parameter
from torch_scatter import scatter_add
np.set_printoptions(precision=4)
def set_gpu(gpus):
"""
Sets the GPU to be used for the run
Parameters
----------
gpus: List of GPUs to be used for the run
Returns
-------
"""
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = gpus
def get_logger(name, log_dir, config_dir):
"""
Creates a logger object
Parameters
----------
name: Name of the logger file
log_dir: Directory where logger file needs to be stored
config_dir: Directory from where log_config.json needs to be read
Returns
-------
A logger object which writes to both file and stdout
"""
config_dict = json.load(open( config_dir + 'log_config.json'))
config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')
logging.config.dictConfig(config_dict)
logger = logging.getLogger(name)
std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logging.Formatter(std_out_format))
logger.addHandler(consoleHandler)
return logger
def get_combined_results(left_results, right_results):
results = {}
count = float(left_results['count'])
results['left_mr'] = round(left_results ['mr'] /count, 5)
results['left_mrr'] = round(left_results ['mrr']/count, 5)
results['right_mr'] = round(right_results['mr'] /count, 5)
results['right_mrr'] = round(right_results['mrr']/count, 5)
results['mr'] = round((left_results['mr'] + right_results['mr']) /(2*count), 5)
results['mrr'] = round((left_results['mrr'] + right_results['mrr'])/(2*count), 5)
for k in range(10):
results['left_hits@{}'.format(k+1)] = round(left_results ['hits@{}'.format(k+1)]/count, 5)
results['right_hits@{}'.format(k+1)] = round(right_results['hits@{}'.format(k+1)]/count, 5)
results['hits@{}'.format(k+1)] = round((left_results['hits@{}'.format(k+1)] + right_results['hits@{}'.format(k+1)])/(2*count), 5)
return results
def get_param(shape):
param = Parameter(torch.Tensor(*shape));
xavier_normal_(param.data)
return param
def com_mult(a, b):
r1, i1 = a[..., 0], a[..., 1]
r2, i2 = b[..., 0], b[..., 1]
return torch.stack([r1 * r2 - i1 * i2, r1 * i2 + i1 * r2], dim = -1)
def conj(a):
a[..., 1] = -a[..., 1]
return a
def cconv(a, b):
return torch.irfft(com_mult(torch.rfft(a, 1), torch.rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))
def ccorr(a, b):
return torch.irfft(com_mult(conj(torch.rfft(a, 1)), torch.rfft(b, 1)), 1, signal_sizes=(a.shape[-1],)) | []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
backend/entities/login.go | package entities
import "time"
type Login struct {
NoIdModel
UserName string `gorm:"column:user_name;primaryKey"`
LoginResponse string `gorm:"column:login_response"`
PasswordHash uint64 `gorm:"column:password_hash"`
LastLogin time.Time `gorm:"column:last_login"`
MappaUserId int `gorm:"column:mappa_user_id"`
MappaAuth string `gorm:"column:mappa_auth"`
MappaValidUntil time.Time `gorm:"column:mappa_valid_until"`
}
// CREATE TABLE IF NOT EXISTS logins (
// id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
// username TEXT NOT NULL,
// login_response TEXT NOT NULL,
// password_hash INT NOT NULL,
// last_login TEXT NOT NULL,
// mappa_userId INT NOT NULL,
// mappa_auth TEXT NOT NULL,
// mappa_validUntil INT NOT NULL,
// UNIQUE(username) ON CONFLICT REPLACE
// );
// CREATE INDEX IF NOT EXISTS idx_last_login ON logins (last_login);
| []
| []
| []
| [] | [] | go | null | null | null |
pkg/utils/utils.go | /*
MIT License
Copyright (c) 2019 sthlm.io
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package utils
import (
"os"
"strconv"
"github.com/sirupsen/logrus"
types "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
func StringToInt(s string) int {
i, err := strconv.Atoi(s)
if err != nil {
logrus.Fatalf("Can not convert string to int: %v", err)
}
return i
}
func IsNodeReady(nodeStatus types.NodeStatus) bool {
for _, condition := range nodeStatus.Conditions {
if condition.Type == types.NodeReady && condition.Status == types.ConditionTrue {
return true
}
}
return false
}
// GetClient returns a k8s clientset to the request from inside of cluster
func GetClient() kubernetes.Interface {
config, err := rest.InClusterConfig()
if err != nil {
logrus.Fatalf("Can not get kubernetes config: %v", err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
logrus.Fatalf("Can not create kubernetes client: %v", err)
}
return clientset
}
func buildOutOfClusterConfig() (*rest.Config, error) {
kubeconfigPath := os.Getenv("KUBECONFIG")
if kubeconfigPath == "" {
kubeconfigPath = os.Getenv("HOME") + "/.kube/config"
}
return clientcmd.BuildConfigFromFlags("", kubeconfigPath)
}
// GetClientOutOfCluster returns a k8s clientset to the request from outside of cluster
func GetClientOutOfCluster() kubernetes.Interface {
config, err := buildOutOfClusterConfig()
if err != nil {
logrus.Fatalf("Can not get kubernetes config: %v", err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
logrus.Fatalf("Can not create kubernetes client: %v", err)
}
return clientset
}
| [
"\"KUBECONFIG\"",
"\"HOME\""
]
| []
| [
"HOME",
"KUBECONFIG"
]
| [] | ["HOME", "KUBECONFIG"] | go | 2 | 0 | |
src/test/tinc/tincrepo/mpp/gpdb/tests/package/mapreduce/test_mapreduce.py | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os, string, sys, subprocess, signal, re, getpass
import tinctest
import unittest2 as unittest
from tinctest.lib import Gpdiff
from tinctest.lib import local_path, run_shell_command
from mpp.models import MPPTestCase
from mpp.lib.PSQL import PSQL
from mpp.lib.MAPREDUCE import mapreduce
from mpp.lib.config import GPDBConfig
from mpp.lib.gppkg.gppkg import Gppkg
gpdbconfig = GPDBConfig()
localpath = local_path('')
mapr = mapreduce
cmd = 'gpssh --version'
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command (cmd, 'check product version', res)
product_version = res['stdout'].split('gpssh version ')[1].split(' build ')[0]
class MapreduceMPPTestCase(MPPTestCase):
def __init__(self, methodName):
self.dbname = os.environ.get('PGDATABASE')
super(MapreduceMPPTestCase, self).__init__(methodName)
@classmethod
def setUpClass(self):
super(MapreduceMPPTestCase, self).setUpClass()
gppkg = Gppkg()
gppkg.gppkg_install(product_version, 'plperl')
setup_command = "create language plperl;"
PSQL.run_sql_command(setup_command, dbname = os.environ.get('PGDATABASE'))
"compile functions.c and build functions.so"
makeLog = local_path('testBuildSOLog.out')
cmdMake = 'cd '+local_path('c_functions') + ' && make clean && make'
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command(cmdMake, 'compile functions.c', res)
file = open(makeLog, 'w')
file.write(res['stdout'])
file.close()
if res['rc']:
raise Exception('a problem occurred while creating the so files ')
so_dir = local_path('c_functions')
sharedObj = local_path('c_functions/functions.so')
# if not os.path.isfile(sharedObj):
#raise gptest.GPTestError('so files does not exist')
# For multinode cluster, need to copy shared object tabfunc_gppc_demo.so to all primary segments
if gpdbconfig.is_multinode():
res = {'rc':0, 'stderr':'', 'stdout':''}
hosts = gpdbconfig.get_hosts(segments=True)
scp_cmd = 'gpscp -h ' +' -h '.join(map(str,hosts)) +' '+ sharedObj + ' =:%s' % so_dir
run_shell_command(scp_cmd)
if res['rc']:
raise Exception('Could not copy shared object to primary segment')
def check_orca(self):
cmd = 'gpconfig -s optimizer'
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command(cmd, 'check if orca enabled', res)
for line in res['stdout']:
if 'Master value: off' in line or 'Segment value: off' in line:
return False
return True
def doTest(self, fileName):
# get file path to queryXX.sql
file = local_path(fileName)
# run psql on file, and check result
mapr.runYml(file)
mapr.checkResult(file)
def doQuery(self, sqlfile, default=''):
sql_file = local_path(sqlfile)
filename_prefix = sqlfile.split('.sql')[0]
out_file = local_path(filename_prefix + '.out')
ans_file = local_path(filename_prefix + '.ans')
PSQL.run_sql_file(sql_file = sql_file, out_file = out_file)
self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
def run_gpmapreduce_cmd(self, gpmaprcmd = None, expected_ret=0):
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command(gpmaprcmd, 'compile functions.c', res)
self.failUnless(res['rc'] == expected_ret)
def runFunctionTest(self, functiontype, file):
filepath = os.path.join(local_path("c_functions"),functiontype, file)
#mapr.replaceTemplate(local_path("%s.sql.in" % (filepath)), localpath)
so_loc = local_path('c_functions/functions.so')
if not os.path.isfile(so_loc):
self.skipTest()
input = open(filepath+'.sql.in')
output = open(filepath+'.sql','w')
for s in input.xreadlines():
if string.find(s,'%funclib_path%') >= 0:
if string.find(sys.platform,"OSX")==0:
output.write(s.replace('%funclib_path%', local_path("c_functions/functions.NOTSUREEXTNAME")))
else:
output.write(s.replace('%funclib_path%', local_path("c_functions/functions.so")))
else:
output.write(s)
output.close()
input.close()
sqlfile = "%s.sql" % filepath
PSQL.run_sql_file(sqlfile)
self.doTest("%s.yml" % filepath)
def test_MapReduceDemo001(self):
"MapReduce: BFS Demo Init"
mapr.replaceTemplate(local_path("demo/BFS/*.in"), local_path(''))
self.doTest("demo/BFS/bfs-init.yml")
def test_MapReduceDemo002(self):
"MapReduce: BFS Demo Iter"
self.doTest("demo/BFS/bfs-iter.yml")
def test_MapReduceDemo003(self):
"MapReduce: PageRank Demo Init"
mapr.replaceTemplate(local_path("demo/PageRank/*.in"), local_path(''))
self.doTest("demo/PageRank/pagerank-init.yml")
def test_MapReduceDemo004(self):
"MapReduce: PageRank Demo Iter"
self.doTest("demo/PageRank/pagerank-iter.yml")
def test_MapReduceDemo005(self):
"MapReduce: PageRank Demo Final"
self.doTest("demo/PageRank/pagerank-final.yml")
def test_MapReduceDemo006(self):
"MapReduce: PageRank Using pagerank.pl"
cmd = local_path("demo/PageRank/pagerank.pl")
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command(cmd, '', res)
def test_MapReduceError001(self):
"MapReduce: Test Error Handling 1: MPP-4808"
self.doTest("mpp4808.yml")
def test_MapReduceError002(self):
"MapReduce: Test Error Handling 2: MPP-4807"
self.skipTest("Fails always on RHEL62")
self.doTest("mpp4807a.yml")
def test_MapReduceError003(self):
"MapReduce: Test Error Handling 3: MPP-4807"
f1 = open(local_path('mpp4807b.yml.in'), 'r')
f2 = open(local_path('mpp4807b.yml'), 'w')
for line in f1:
if '@db_user@' in line:
line = line.replace('@db_user@', os.environ.get('PGUSER', getpass.getuser()))
f2.write(line)
f1.close()
f2.close()
self.doTest("mpp4807b.yml")
def test_MapReduceError004(self):
"MapReduce: Test Error Handling 4: MPP-5550"
f1 = open(local_path('mpp5550.yml.in'), 'r')
f2 = open(local_path('mpp5550.yml'), 'w')
for line in f1:
if '@db_user@' in line:
line = line.replace('@db_user@', os.environ.get('PGUSER', getpass.getuser()))
f2.write(line)
f1.close()
f2.close()
self.doTest("mpp5550.yml")
def test_MapReduceError005(self):
"MapReduce: Test Error Handling 4: MPP-4863"
PSQL.run_sql_file(local_path('mpp4863-create.sql'))
p = subprocess.Popen(["gpfdist","-d",local_path(''),"-p","8090"])
f1 = open(local_path('mpp4863.yml.in'), 'r')
f2 = open(local_path('mpp4863.yml'), 'w')
for line in f1:
if '@db_user@' in line:
line = line.replace('@db_user@', os.environ.get('PGUSER', getpass.getuser()))
elif '@hostname@' in line:
line = line.replace('@hostname@', 'localhost')
f2.write(line)
f1.close()
f2.close()
self.doTest("mpp4863.yml")
os.kill(p.pid,signal.SIGKILL)
def test_MapReduceError005(self):
"MapReduce: MPP-5551: gpmapreduce - assertion trying to set output file format"
mapr.replaceTemplate(local_path("mpp5551.yml.in"), local_path(''))
self.doTest("mpp5551.yml")
def test_MapReduceCASE0001(self):
"MapReduce: Regression MPP-3478"
PSQL.run_sql_file(local_path('mpp3478-create.sql'))
f1 = open(local_path('mpp3478.yml.in'), 'r')
f2 = open(local_path('mpp3478.yml'), 'w')
for line in f1:
if '@db_user@' in line:
line = line.replace('@db_user@', os.environ.get('PGUSER', getpass.getuser()))
f2.write(line)
f1.close()
f2.close()
self.doTest("mpp3478.yml")
self.doQuery("mpp3478-check.sql")
def test_MapReduceDemo007(self):
"MapReduce: Demos Init"
# mapr.replaceTemplate(local_path("regression/*.in"), local_path(''))
# source is not included in the build regression so we cannot build gpmrdemo.o
def DONTtestMapReduceError006(self):
"MapReduce: MPP-11061: mapreduce crash c.yaml"
os.system("cd %s/mpp11061; make" % (local_path('')))
mapr.replaceTemplate(local_path("mpp11061/c.yaml.in"), local_path(''))
p = subprocess.Popen(["gpfdist","-d",local_path(''),"-p","8090"])
self.doTest("mpp11061/c.yaml")
os.kill(p.pid,signal.SIGKILL)
# source is not included in the build regression so we cannot build gpmrdemo.o
def DONTtestMapReduceError007(self):
"MapReduce: MPP-11061: mapreduce crash c_working.yaml"
os.system("cd %s/mpp11061; make" % (local_path('')))
mapr.replaceTemplate(local_path("mpp11061/c_working.yaml.in"), local_path)('')
p = subprocess.Popen(["gpfdist","-d",local_path(''),"-p","8090"])
self.doTest("mpp11061/c_working.yaml")
os.kill(p.pid,signal.SIGKILL)
def test_Noargument(self):
"not giving any command line arguement"
cmd = "gpmapreduce"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
# this is no-op, no exit code was returned
def test_InvalidOption(self):
"use invalid option"
cmd = "gpmapreduce -d"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_ambigous_function(self):
"ambigous_functions"
cmd = "gpmapreduce -f neg_Ambiguous_function.yml template1"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_fileDir(self):
"file path is a directory"
cmd = "gpmapreduce -f neg_fileDir.yml.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_fileDOE(self):
"file does not exist"
cmd = "gpmapreduce -f neg_fileDOE.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_mapAsSource(self):
"Map fucntion used as Source"
cmd = "gpmapreduce -f neg_mapAsSource.yml template1"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_nonScalar(self):
"non scalar used"
cmd = "gpmapreduce -f neg_nonScalar.yaml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_returnVoid(self):
"return Void functions"
cmd = "gpmapreduce -f neg_returnVoid.yml template1"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_wrongReturnValue(self):
"wrong return value used"
cmd = "gpmapreduce -f neg_wrongReturnValue.yml template1"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_InvalidHost(self):
"Invalid Host"
cmd = "gpmapreduce -f working.yml -h rh55-qavm01"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_InvalidPort(self):
"Invalid Port"
cmd = "gpmapreduce -f working.yml -p 0000"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_InvalidPassword(self):
"invalid password"
cmd = "gpmapreduce -f working.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_InvalidUser(self):
"invalid User"
cmd = "gpmapreduce -f working.yml -U wrongUser"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_InvalidDB(self):
"invalid DB"
cmd = "gpmapreduce -f working.yml template100"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_gpfdistDOE(self):
"gpfdist not up"
cmd = "gpmapreduce -f neg_gpfdistDOE.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_DataBaseDOE_DBspecifiedINdb(self):
"Invalid DB, where DB specified in yml"
cmd = "gpmapreduce -f neg_InvalidDB.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_mpp12767_emptyYML(self):
"empty YML"
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("touch empty.yml;echo $?", '', res)
cmd = "gpmapreduce -f empty.yml;echo $?"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 0)
def test_mpp12767_notproper(self):
"notproper.yaml from Symantec"
cmd = "gpmapreduce -f mpp12767.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_mpp12767_badyaml(self):
"badyaml.yaml from Symantec"
cmd = "gpmapreduce -f mpp12767b.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_InvalidTable(self):
"Invalid Table"
cmd = "gpmapreduce -f mpp5551.yml;echo $?"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_InvalidTable2(self):
"Invalid Table"
cmd = "gpmapreduce -f mpp5550.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_binaryFile(self):
"binary file was used as yml"
GPHOME = os.environ.get('GPHOME')
binaryPath = '%s/bin/gpmapreduce' % GPHOME
cmd = 'gpmapreduce -f %s' % binaryPath
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_missing_colon(self):
"missing : in yml"
cmd = "gpmapreduce -f neg_missing_colon.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_MissingYMLversion(self):
"missing YML version"
cmd = "gpmapreduce -f neg_missingYMLversion.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_MissingYMLversion2(self):
"missing YML version"
cmd = "gpmapreduce -f neg_invalidYMLversion.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_MultipleDefine(self):
"Multiple Define block"
cmd = "gpmapreduce -f neg_multipleDefine.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_invalidName(self):
"Invalid Name value"
cmd = "gpmapreduce -f neg_invalidName.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_MissingRequiredAttribute(self):
"Missing Required Attribuite: Name"
cmd = "gpmapreduce -f neg_missingName.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_2inputs(self):
"2 inputs"
cmd = "gpmapreduce -f neg_2inputs.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_Ambigous_function_byCaleb(self):
"Ambigous function"
cmd = "gpmapreduce -f neg_Ambiguous_function2.yml template1"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_undefinedFunction(self):
"Undefined Function"
cmd = "gpmapreduce -f neg_undefinedFunction.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_undefinedReduce(self):
"Undefined Reduce function"
cmd = "gpmapreduce -f neg_undefinedReduce.yml template1"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Neg_undefinedTable(self):
"undefined table target"
cmd = "gpmapreduce -f neg_undefinedTable.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_OutputFileAlreadyExist(self):
"Output File Already Exist"
cmd = "gpmapreduce -f neg_outputFileAlreadyExist.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_Noperm(self):
"no permission to write output file"
PSQL.run_sql_command('create table mytest (a text);', dbname='template1')
os.system("mkdir ./noperm")
os.system("chmod 000 ./noperm")
cmd = "gpmapreduce -f neg_nopermtowrite.yml"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
os.system("chmod 777 ./noperm")
os.system("rm -rf ./noperm")
def test_Invalidparameter(self):
"invalid parameter for option -k"
cmd = "gpmapreduce -k"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_EmptyArguement(self):
"empty argument"
cmd = "gpmapreduce -f"
self.run_gpmapreduce_cmd(gpmaprcmd=cmd, expected_ret = 1)
def test_builtinfunction(self):
"""
builtin functions as reducer
"""
self.runFunctionTest("","builtinfunction")
def test_aggFunction(self):
"""
use custom aggregation functions as reducer
"""
self.runFunctionTest("","aggFunction")
#### test_scalar_transistion ######
def test_scalar_transition_namedInYml_namedInDB(self):
"""
scalar Transition "param specified in yaml, DB defined with named param"
"""
self.runFunctionTest("scalar_transition", "namedInYml_namedInDB")
def test_scalar_transition_namedInYml_unnamedInDB(self):
"""
scalar Transition "param specified in yaml, DB defined with unnamed param"
"""
self.runFunctionTest("scalar_transition", "namedInYml_unnamedInDB")
def test_scalar_transition_unnamedInYml_namedInDB(self):
"""
scalar Transition "param not specified in yaml, DB defined with named param"
"""
self.runFunctionTest("scalar_transition","unnamedInYml_namedInDB")
def test_scalar_transition_unnamedInYml_unnamedInDB(self):
"""
scalar Transition "parameters not specified in yaml, DB defined with named param"
"""
self.runFunctionTest("scalar_transition","unnamedInYml_unnamedInDB")
def test_scalar_transition_retValInYml(self):
"""
scalar Transition return val specified in yaml, DB defined with named return val
"""
self.runFunctionTest("scalar_transition","retValInYml")
def test_scalar_transition_retValNotInYml(self):
"""
scalar Transition return value not specified in yaml
"""
self.runFunctionTest("scalar_transition","retValNotInYml")
def test_scalar_transition_1outParam_namedInYml_namedInDB(self):
"""
scalar Transition use 1 named out parameter, specified in yaml
"""
self.runFunctionTest("scalar_transition","1outParam_namedInYml_namedInDB")
def test_scalar_transition_1outParam_unnamedInYml_namedInDB(self):
"""
scalar Transition use 1 unnamed out parameter, specified in yaml
"""
self.runFunctionTest("scalar_transition","1outParam_unnamedInYml_namedInDB")
def test_scalar_transition_1outParam_unnamedInYml_unnamedInDB(self):
"""
scalar Transition use 1 unnamed out parameter, not specified in yaml
"""
self.runFunctionTest("scalar_transition","1outParam_unnamedInYml_unnamedInDB")
def test_scalar_transition_1outParam_namedInYml_unnamedInDB(self):
"""
scalar Transition use 1 named out parameter, not specified in yaml
"""
self.runFunctionTest("scalar_transition","1outParam_namedInYml_unnamedInDB")
def test_scalar_transition_1outParam_paramOverride(self):
"""
scalar Transition use 1 named out parameter, specified with a different value in yaml
"""
self.runFunctionTest("scalar_transition","1outParam_paramOverride")
def test_scalar_transition_mismatchingReturnVal(self):
"""
scalar Transition override return value with what's defined in db vs library
"""
self.runFunctionTest("scalar_transition","mismatchingReturnVal")
def test_scalar_transition_ambiguousFunction(self):
"""
scalar Transition NEG specify more than 1 function in db with the same name
"""
self.runFunctionTest("scalar_transition","ambiguousFunction")
def test_scalar_transition_NEG_2outParam(self):
"""
scalar Transition NEG use 2 out parameters
"""
self.runFunctionTest("scalar_transition","NEG_2outParam")
def test_scalar_transition_NEG_1param(self):
"""
scalar Transition NEG use 1 parameters
"""
self.runFunctionTest("scalar_transition","NEG_1param")
##### test_scalar_consolidation ######
def test_scalar_consolidation_namedInYml_namedInDB(self):
"""
scalar Consolidation "param specified in yaml, DB defined with named param"
"""
self.runFunctionTest("scalar_consolidation", "namedInYml_namedInDB")
def test_scalar_consolidation_namedInYml_unnamedInDB(self):
"""
scalar Consolidation "param specified in yaml, DB defined with unnamed param"
"""
self.runFunctionTest("scalar_consolidation", "namedInYml_unnamedInDB")
def test_scalar_consolidation_unnamedInYml_namedInDB(self):
"""
scalar Consolidation "param specified in yaml, DB defined with named param"
"""
self.runFunctionTest("scalar_consolidation","unnamedInYml_namedInDB")
def test_scalar_consolidation_unnamedInYml_unnamedInDB(self):
"""
scalar Consolidation "parameters not specified in yaml, DB defined with unnamed parameters "
"""
self.runFunctionTest("scalar_consolidation","unnamedInYml_unnamedInDB")
def test_scalar_consolidation_retValInYml(self):
"""
scalar Consolidation return val specified in yaml, DB defined with named return val
"""
self.runFunctionTest("scalar_consolidation","retValInYml")
def test_scalar_consolidation_retValNotInYml(self):
"""
scalar Consolidation return value not specified in yaml
"""
self.runFunctionTest("scalar_consolidation","retValNotInYml")
def test_scalar_consolidation_1outParam_namedInYml_namedInDB(self):
"""
scalar Consolidation use 1 named out parameter, specified in yaml
"""
self.runFunctionTest("scalar_consolidation","1outParam_namedInYml_namedInDB")
def test_scalar_consolidation_1outParam_unnamedInYml_namedInDB(self):
"""
scalar Consolidation use 1 unnamed out parameter, specified in yaml
"""
self.runFunctionTest("scalar_consolidation","1outParam_unnamedInYml_namedInDB")
def test_scalar_consolidation_1outParam_unnamedInYml_unnamed(self):
"""
scalar Consolidation use 1 unnamed out parameter, not specified in yaml
"""
self.runFunctionTest("scalar_consolidation","1outParam_unnamedInYml_unnamed")
def test_scalar_consolidation_1outParam_unnamedInYml_namedInDB2(self):
"""
scalar Consolidation use 1 named out parameter, not specified in yaml
"""
self.runFunctionTest("scalar_consolidation","1outParam_unnamedInYml_namedInDB")
def test_scalar_consolidation_1outParam_paramOverride(self):
"""
scalar Consolidation use 1 named out parameter, specified with a different value in yaml
"""
self.runFunctionTest("scalar_consolidation","1outParam_paramOverride")
def test_scalar_consolidation_mismatchingReturnVal(self):
"""
scalar Consolidation override return value with what's defined in db vs library
"""
self.runFunctionTest("scalar_consolidation","mismatchingReturnVal")
def test_scalar_consolidation_ambiguousFunction(self):
"""
scalar Consolidation NEG specify more than 1 function in db with the same name
"""
self.runFunctionTest("scalar_consolidation","ambiguousFunction")
def test_scalar_consolidation_NEG_1param(self):
"""
scalar Consolidation NEG specify only 1 parameter
"""
self.runFunctionTest("scalar_consolidation","NEG_1param")
def test_scalar_consolidation_NEG_paramDiffType(self):
"""
scalar Consolidation NEG two parameters with different type
"""
self.runFunctionTest("scalar_consolidation","NEG_paramDiffType")
filename = local_path("c_functions/scalar_consolidation/NEG_paramDiffType_cleanup.sql")
PSQL.run_sql_file(filename)
###### test_scalar_finalize #######
def test_scalar_finalize_namedInYml_namedInDB(self):
"""
scalar Finalize "param specified in yaml, DB defined with named param"
"""
self.runFunctionTest("scalar_finalize", "namedInYml_namedInDB")
def test_scalar_finalize_namedInYml_unnamedInDB(self):
"""
scalar Finalize "param specified in yaml, DB defined with unnamed param"
"""
self.runFunctionTest("scalar_finalize", "namedInYml_unnamedInDB")
def test_scalar_finalize_unnamedInYml_namedInDB(self):
"""
scalar Finalize "param specified in yaml, DB defined with named param"
"""
self.runFunctionTest("scalar_finalize","unnamedInYml_namedInDB")
def test_scalar_finalize_unnamedInYml_unnamedInDB(self):
"""
scalar Finalize "parameters not specified in yaml, DB defined with unnamed parameters "
"""
self.runFunctionTest("scalar_finalize","unnamedInYml_unnamedInDB")
def test_scalar_finalize_retValInYml(self):
"""
scalar Finalize return val specified in yaml, DB defined with named return val
"""
self.runFunctionTest("scalar_finalize","retValInYml")
def test_scalar_finalize_retValNotInYml(self):
"""
scalar Finalize return value not specified in yaml
"""
self.runFunctionTest("scalar_finalize","retValNotInYml")
def test_scalar_finalize_1outParam_namedInYml_namedInDB(self):
"""
scalar Finalize use 1 named out parameter, specified in yaml
"""
self.runFunctionTest("scalar_finalize","1outParam_namedInYml_namedInDB")
def test_scalar_finalize_1outParam_namedInYml_unnamedInDB(self):
"""
scalar Finalize use 1 unnamed out parameter, specified in yaml
"""
self.runFunctionTest("scalar_finalize","1outParam_namedInYml_unnamedInDB")
def test_scalar_finalize_1outParam_unnamedInYml_unnamed(self):
"""
scalar Finalize use 1 unnamed out parameter, not specified in yaml
"""
self.runFunctionTest("scalar_finalize","1outParam_unnamedInYml_unnamed")
def test_scalar_finalize_1outParam_unnamedInYml_namedInDB(self):
"""
scalar Finalize use 1 named out parameter, not specified in yaml
"""
self.runFunctionTest("scalar_finalize","1outParam_unnamedInYml_namedInDB")
def test_scalar_finalize_1outParam_paramOverride(self):
"""
scalar Finalize use 1 named out parameter, specified with a different value in yaml
"""
self.runFunctionTest("scalar_finalize","1outParam_paramOverride")
def test_scalar_finalize_mismatchingReturnVal(self):
"""
scalar Finalize override return value with what's defined in db vs library
"""
self.runFunctionTest("scalar_finalize","mismatchingReturnVal")
def test_scalar_finalize_ambiguousFunction(self):
"""
scalar Finalize NEG specify more than 1 function in db with the same name
"""
self.runFunctionTest("scalar_finalize","ambiguousFunction")
def test_scalar_finalize_NEG_2param(self):
"""
scalar Finalize NEG specify more than 1 parameter
"""
self.runFunctionTest("scalar_finalize","NEG_2param")
###### test_scalar_map #######
def test_scalar_map_namedInYml_namedInDB(self):
"""
scalar Map "param specified in yaml, DB defined with named param"
"""
self.runFunctionTest("scalar_map", "namedInYml_namedInDB")
def test_scalar_map_namedInYml_unnamedInDB(self):
"""
scalar Map "param specified in yaml, DB defined with unnamed param"
"""
self.runFunctionTest("scalar_map", "namedInYml_unnamedInDB")
def test_scalar_map_unnamedInYml_namedInDB(self):
"""
scalar Map "param not specified in yaml, DB defined with named param"
"""
if self.check_orca():
self.skipTest("Skipping due to MPP-23877")
self.runFunctionTest("scalar_map","unnamedInYml_namedInDB")
def test_scalar_map_unnamedInYml_unnamedInDB(self):
"""
scalar Map "parameters not specified in yaml, DB defined with unnamed parameters "
"""
if self.check_orca():
self.skipTest("Skipping due to MPP-23877")
self.runFunctionTest("scalar_map","unnamedInYml_unnamedInDB")
def test_scalar_map_retValInYml(self):
"""
scalar Map return val specified in yaml, DB defined with named return val
"""
self.runFunctionTest("scalar_map","retValInYml")
def test_scalar_map_retValNotInYml(self):
"""
scalar Map return value not specified in yaml
"""
self.runFunctionTest("scalar_map","retValNotInYml")
def test_scalar_map_1outParam_namedInYml_namedInDB(self):
"""
scalar Map use 1 named out parameter, specified in yaml
"""
self.runFunctionTest("scalar_map","1outParam_namedInYml_namedInDB")
def test_scalar_map_1outParam_namedInYml_unnamedInDB(self):
"""
scalar Map use 1 unnamed out parameter, specified in yaml
"""
self.runFunctionTest("scalar_map","1outParam_namedInYml_unnamedInDB")
def test_scalar_map_1outParam_unnamedInYml_unnamed(self):
"""
scalar Map use 1 unnamed out parameter, not specified in yaml
"""
self.runFunctionTest("scalar_map","1outParam_unnamedInYml_unnamed")
def test_scalar_map_1outParam_unnamedInYml_namedInDB(self):
"""
scalar Map use 1 named out parameter, not specified in yaml
"""
self.runFunctionTest("scalar_map","1outParam_unnamedInYml_namedInDB")
def test_scalar_map_1outParam_paramOverride(self):
"""
scalar Map use 1 named out parameter, specified with a different value in yaml
"""
self.runFunctionTest("scalar_map","1outParam_paramOverride")
def test_scalar_map_mismatchingReturnVal(self):
"""
scalar Map override return value with what's defined in db vs library
"""
self.runFunctionTest("scalar_map","mismatchingReturnVal")
def test_scalar_map_ambiguousFunction(self):
"""
scalar Map NEG specify more than 1 function in db with the same name
"""
self.runFunctionTest("scalar_map","ambiguousFunction")
##### test_composite_finalize ####
def test_composite_finalize_namedInYml_namedInDB(self):
"""
composite: finalize: "param specified in yaml, DB defined with named param"
"""
self.runFunctionTest("composite_finalize", "namedInYml_namedInDB")
def test_composite_finalize_namedInYml_unnamedInDB(self):
"""
composite: finalize: "param specified in yaml, DB defined with unnamed param"
"""
self.runFunctionTest("composite_finalize", "namedInYml_unnamedInDB")
def test_composite_finalize_unnamedInYml_namedInDB(self):
"""
composite: finalize: "param specified in yaml, DB defined with named param"
"""
self.runFunctionTest("composite_finalize","unnamedInYml_namedInDB")
def test_composite_finalize_unnamedInYml_unnamedInDB(self):
"""
composite: finalize: "parameters not specified in yaml, DB defined with unnamed parameters "
"""
self.runFunctionTest("composite_finalize","unnamedInYml_unnamedInDB")
def test_composite_finalize_retValInYml(self):
"""
composite: finalize: return val specified in yaml, DB defined with named return val
"""
self.runFunctionTest("composite_finalize","retValInYml")
def test_composite_finalize_retValNotInYml(self):
"""
composite: finalize: return value not specified in yaml
"""
self.runFunctionTest("composite_finalize","retValNotInYml")
def test_composite_finalize_outParam_namedInYml_namedInDB(self):
"""
composite: finalize: use named out parameter, specified in yaml
"""
self.runFunctionTest("composite_finalize","outParam_namedInYml_namedInDB")
def test_composite_finalize_outParam_namedInYml_unnamedInDB(self):
"""
composite: finalize: use unnamed out parameter, specified in yaml
"""
self.runFunctionTest("composite_finalize","outParam_namedInYml_unnamedInDB")
def test_composite_finalize_outParam_unnamedInYml_unnamed(self):
"""
composite: finalize: use unnamed out parameter, not specified in yaml
"""
self.runFunctionTest("composite_finalize","outParam_unnamedInYml_unnamed")
def test_composite_finalize_outParam_unnamedInYml_namedInDB(self):
"""
composite: finalize: use named out parameter, not specified in yaml
"""
self.runFunctionTest("composite_finalize","outParam_unnamedInYml_namedInDB")
def test_composite_finalize_outParam_paramOverride(self):
"""
composite: finalize: use named out parameter, specified with a different value in yaml
"""
self.runFunctionTest("composite_finalize","outParam_paramOverride")
def test_composite_finalize_mismatchingReturnVal(self):
"""
composite: finalize: override return value with what's defined in db vs library
"""
self.runFunctionTest("composite_finalize","mismatchingReturnVal")
def test_composite_finalize_ambiguousFunction(self):
"""
composite: finalize: NEG specify more than 1 function in db with the same name
"""
self.runFunctionTest("composite_finalize","ambiguousFunction")
def test_composite_finalize_outTableDeclaration_namedInYml(self):
"""
composite: finalize: returns Table declaration, named return Val in Yml
"""
self.runFunctionTest("composite_finalize","outTableDeclaration_namedInYml")
def test_composite_finalize_outTableDeclaration_unnamedInYml(self):
"""
composite: finalize: returns Table declaration, named return Val unnamed in Yml
"""
self.runFunctionTest("composite_finalize","outTableDeclaration_unnamedInYml")
def test_composite_finalize_outToTable_namedInYml(self):
"""
composite: finalize: returns To DB table, named return Val in Yml
"""
self.runFunctionTest("composite_finalize","outToTable_namedInYml")
def test_composite_finalize_outToTable_unnamedInYml(self):
"""
composite: finalize: returns To DB table, named return Val not in Yml
"""
self.runFunctionTest("composite_finalize","outToTable_unnamedInYml")
def test_composite_finalize_NEG_2param(self):
"""
composite: finalize: specify more than 1 parameter
"""
self.runFunctionTest("composite_finalize","NEG_2param")
##### test_composite_map #####
def test_composite_map_namedInYml_namedInDB(self):
"""
composite: map: "param specified in yaml, DB defined with named param"
"""
self.runFunctionTest("composite_map", "namedInYml_namedInDB")
def test_composite_map_namedInYml_unnamedInDB(self):
"""
composite: map: "param specified in yaml, DB defined with unnamed param"
"""
self.runFunctionTest("composite_map", "namedInYml_unnamedInDB")
def test_composite_map_unnamedInYml_namedInDB(self):
"""
composite: map: "param specified in yaml, DB defined with named param"
"""
self.runFunctionTest("composite_map","unnamedInYml_namedInDB")
def test_composite_map_unnamedInYml_unnamedInDB(self):
"""
composite: map: "parameters not specified in yaml, DB defined with unnamed parameters "
"""
self.runFunctionTest("composite_map","unnamedInYml_unnamedInDB")
def test_composite_map_retValInYml(self):
"""
composite: map: return val specified in yaml, DB defined with named return val
"""
self.runFunctionTest("composite_map","retValInYml")
def test_composite_map_retValNotInYml(self):
"""
composite: map: return value not specified in yaml
"""
self.runFunctionTest("composite_map","retValNotInYml")
def test_composite_map_outParam_namedInYml_namedInDB(self):
"""
composite: map: use named out parameter, specified in yaml
"""
self.runFunctionTest("composite_map","outParam_namedInYml_namedInDB")
def test_composite_map_outParam_namedInYml_unnamedInDB(self):
"""
composite: map: use unnamed out parameter, specified in yaml
"""
self.runFunctionTest("composite_map","outParam_namedInYml_unnamedInDB")
def test_composite_map_outParam_unnamedInYml_unnamed(self):
"""
composite: map: use unnamed out parameter, not specified in yaml
"""
self.runFunctionTest("composite_map","outParam_unnamedInYml_unnamed")
def test_composite_map_outParam_unnamedInYml_namedInDB(self):
"""
composite: map: use named out parameter, not specified in yaml
"""
self.runFunctionTest("composite_map","outParam_unnamedInYml_namedInDB")
def test_composite_map_outParam_paramOverride(self):
"""
composite: map: use named out parameter, specified with a different value in yaml
"""
self.runFunctionTest("composite_map","outParam_paramOverride")
def test_composite_map_mismatchingReturnVal(self):
"""
composite: map: override return value with what's defined in db vs library
"""
self.runFunctionTest("composite_map","mismatchingReturnVal")
def test_composite_map_ambiguousFunction(self):
"""
composite: map: NEG specify more than 1 function in db with the same name
"""
self.runFunctionTest("composite_map","ambiguousFunction")
def test_composite_map_outTableDeclaration_namedInYml(self):
"""
composite: map: returns Table declaration, named return Val in Yml
"""
self.runFunctionTest("composite_map","outTableDeclaration_namedInYml")
def test_composite_map_outTableDeclaration_unnamedInYml(self):
"""
composite: map: returns Table declaration, named return Val unnamed in Yml
"""
self.runFunctionTest("composite_map","outTableDeclaration_unnamedInYml")
def test_composite_map_outToTable_composite_map_namedInYml(self):
"""
composite: map: returns To DB table, named return Val in Yml
"""
self.runFunctionTest("composite_map","outToTable_composite_map_namedInYml")
def test_composite_map_outToTable_composite_map_unnamedInYml(self):
"""
composite: map: returns To DB table, named return Val not in Yml
"""
self.runFunctionTest("composite_map","outToTable_composite_map_unnamedInYml")
| []
| []
| [
"PGUSER",
"GPHOME",
"PGDATABASE"
]
| [] | ["PGUSER", "GPHOME", "PGDATABASE"] | python | 3 | 0 | |
example_test.go | package bitmexgo
import (
"fmt"
"os"
"testing"
)
func Test(t *testing.T) {
// Get your API key/secret pair at https://www.bitmex.com/app/apiKeys
testApiKey := os.Getenv("TEST_API_KEY")
testApiSecret := os.Getenv("TEST_API_SECRET_KEY")
// Create an authentication context
auth := NewAPIKeyContext(testApiKey, testApiSecret)
// Create a shareable API client instance
// apiClient := bitmexgo.NewAPIClient(bitmexgo.NewConfiguration())
// Create a testnet API client instance
testnetClient := NewAPIClient(NewTestnetConfiguration())
// Call APIs without parameters by passing the auth context.
// e.g. getting exchange-wide turnover and volume statistics:
// stats, res, err := apiClient.StatsApi.StatsGet(auth)
// Call APIs with default parameters by passing the auth context and a nil.
// e.g. getting all open positions:
// pos, res, err := apiClient.PositionApi.PositionGet(auth, nil)
// Call APIs with additional parameters by constructing a corresponding XXXOpts struct.
// e.g. submitting a limit order to buy 100 contracts of XBTUSD at $8000.5 and to sell 50 contracts of XBTUSD at 16000:
var params []*OrderNewOpts
var paramBuy OrderNewOpts
paramBuy.OrdType.Set("Limit")
paramBuy.Side.Set("Buy")
paramBuy.OrderQty.Set(100)
paramBuy.Price.Set(1000.0)
params = append(params, ¶mBuy)
var paramSell OrderNewOpts
paramSell.OrdType.Set("Limit")
paramSell.Side.Set("Sell")
paramSell.OrderQty.Set(50)
paramSell.Price.Set(19700.5)
params = append(params, ¶mSell)
orders, res, err := testnetClient.OrderApi.OrderNewBulk(auth, "XBTUSD", params)
fmt.Println(orders) // if sending orders is failed, 'orders' returns an empty list.
fmt.Println(res)
fmt.Println(err)
}
| [
"\"TEST_API_KEY\"",
"\"TEST_API_SECRET_KEY\""
]
| []
| [
"TEST_API_KEY",
"TEST_API_SECRET_KEY"
]
| [] | ["TEST_API_KEY", "TEST_API_SECRET_KEY"] | go | 2 | 0 | |
FCN8s_wVGG_mk2/FCN_VGG.py | """
Architecture definition for Fully Convolutional Neural Networks (FCN8s)
Initialised with pretrained VGG weights
Weights initialised by bilinear upsampling for convolutional transpose layers
"""
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import math
import os
import time
#os.environ["CUDA_VISIBLE_DEVICES"]= "4"
use_gpu = torch.cuda.is_available()
num_gpu = list(range(torch.cuda.device_count()))
def get_upsampling_weight(in_channels,out_channels,kernel_size):
""" Make a 2D bilinear kernel suitable for upsampling"""
factor = (kernel_size+1)//2
if kernel_size%2 == 1:
center = factor-1
else:
center = factor-1
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center)/factor) * (1 - abs(og[1] - center)/factor)
weight = np.zeros((in_channels,out_channels,kernel_size,kernel_size), dtype = np.float64)
weight[range(in_channels), range(out_channels),:,: ]
return torch.from_numpy(weight).float()
class FCN8s(nn.Module):
def __init__(self,n_class,vgg):
super(FCN8s,self).__init__()
self.vgg = vgg #VGG architecture definition
#conv1
self.conv1_1 = nn.Conv2d(3,64,3,padding=100)
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64,64,3,padding = 1)
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) #1/2 dimension reduction
#conv2
self.conv2_1 = nn.Conv2d(64,128,3,padding = 1)
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128,128,3,padding=1)
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/4 dimension reduction
#conv3
self.conv3_1 = nn.Conv2d(128,256,3,padding=1)
self.relu3_1 = nn.ReLU(inplace = True)
self.conv3_2 = nn.Conv2d(256,256,3,padding=1)
self.relu3_2 = nn.ReLU(inplace = True)
self.conv3_3 = nn.Conv2d(256,256,3,padding=1)
self.relu3_3 = nn.ReLU(inplace = True)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/8 dimension reduction
#conv4
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_2 = nn.ReLU(inplace=True)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/16 dimension reduction
#conv5
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_1 = nn.ReLU(inplace=True)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_2 = nn.ReLU(inplace=True)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_3 = nn.ReLU(inplace=True)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/32 dimension reduction
#fc6
self.fc6 = nn.Conv2d(512,4096,7)
self.relu6 = nn.ReLU(inplace=True)
self.drop6 = nn.Dropout2d()
#fc7
self.fc7 = nn.Conv2d(4096,4096,1)
self.relu7 = nn.ReLU(inplace = True)
self.drop7 = nn.Dropout2d()
self.score_fr = nn.Conv2d(4096,n_class,1) #Skip Layer defintions
self.score_pool3 = nn.Conv2d(256,n_class,1)
self.score_pool4 = nn.Conv2d(512,n_class,1)
self.upscore2 = nn.ConvTranspose2d(n_class,n_class,4,stride=2,bias=False) #Upsampling layer defintions
self.upscore8 = nn.ConvTranspose2d(n_class,n_class,16,stride=8,bias=False)
self.upscore_pool4 = nn.ConvTranspose2d(n_class,n_class,4,stride=2,bias=False)
self._copy_params_from_vgg16()
def forward(self,x):
h = x
h = self.relu1_1(self.conv1_1(h))
h = self.relu1_2(self.conv1_2(h))
h = self.pool1(h)
h = self.relu2_1(self.conv2_1(h))
h = self.relu2_2(self.conv2_2(h))
h = self.pool2(h)
h = self.relu3_1(self.conv3_1(h))
h = self.relu3_2(self.conv3_2(h))
h = self.relu3_3(self.conv3_3(h))
h = self.pool3(h)
pool3 = h # 1/8
h = self.relu4_1(self.conv4_1(h))
h = self.relu4_2(self.conv4_2(h))
h = self.relu4_3(self.conv4_3(h))
h = self.pool4(h)
pool4 = h # 1/16
h = self.relu5_1(self.conv5_1(h))
h = self.relu5_2(self.conv5_2(h))
h = self.relu5_3(self.conv5_3(h))
h = self.pool5(h)
h = self.relu6(self.fc6(h))
h = self.drop6(h)
h = self.relu7(self.fc7(h))
h = self.drop7(h)
h = self.score_fr(h)
h = self.upscore2(h)
upscore2 = h # 1/16
h = self.score_pool4(pool4)
h = h[:, :, 5:5 + upscore2.size()[2], 5:5 + upscore2.size()[3]]
score_pool4c = h
h = upscore2 + score_pool4c # 1/16
h = self.upscore_pool4(h)
upscore_pool4 = h # 1/8
h = self.score_pool3(pool3)
h = h[:,:,9:9+upscore_pool4.size()[2],9:9+upscore_pool4.size()[3]]
score_pool3c = h
h = upscore_pool4 + score_pool3c
h = self.upscore8(h)
h = h[:, :, 31:31 + x.size()[2], 31:31 + x.size()[3]].contiguous()
return h
def _copy_params_from_vgg16(self):
#Copy VGG parameters from a pretrained VGG16 net available on Pytorch
#Generate weights for all layers not part of VGG16 by either Xavier or Bilinear upsampling
features = [self.conv1_1, self.relu1_1, self.conv1_2, self.relu1_2, self.pool1, self.conv2_1, self.relu2_1, self.conv2_2, self.relu2_2, self.pool2,self.conv3_1, self.relu3_1, self.conv3_2, self.relu3_2, self.conv3_3, self.relu3_3, self.pool3,self.conv4_1, self.relu4_1, self.conv4_2, self.relu4_2, self.conv4_3, self.relu4_3, self.pool4, self.conv5_1, self.relu5_1, self.conv5_2, self.relu5_2, self.conv5_3, self.relu5_3, self.pool5,]
for l1,l2 in zip(self.vgg.features,features):
if isinstance(l1,nn.Conv2d) and isinstance(l2,nn.Conv2d):
assert l1.weight.size() == l2.weight.size()
assert l1.bias.size() == l2.bias.size()
l2.weight.data.copy_(l1.weight.data).double()
l2.bias.data.copy_(l1.bias.data).double()
l2.bias.data.copy_(l1.bias.data).double()
classifier = [self.fc6,self.relu6,self.drop6,self.fc7,self.relu7,self.drop7,self.score_fr,self.score_pool3,self.score_pool4,self.upscore2,self.upscore8,self.upscore_pool4]
for i in classifier:
if isinstance(i,nn.Conv2d):
n = i.kernel_size[0] * i.kernel_size[1] *i.out_channels
i.weight.data.normal_(0,math.sqrt(2./n))
if i.bias is not None:
i.bias.data.zero_()
if isinstance(i,nn.ConvTranspose2d):
assert i.kernel_size[0] == i.kernel_size[1]
initial_weight = get_upsampling_weight(i.in_channels,i.out_channels,i.kernel_size[0])
i.weight.data.copy_(initial_weight)
"""
#Test Code
if __name__ == "__main__":
batch_size, n_class, h,w = 5,11,224,224
#test the output size
fcn_model = FCN8s(n_class)
if use_gpu:
ts = time.time()
fcn_model = fcn_model.cuda()
print ("Finsished loading CUDA, time elapsed {}".format(time.time()-ts))
input = torch.autograd.Variable(torch.randn(batch_size,3,h,w).cuda())
print("hello")
output = fcn_model(input)
print(output.size())
#To check whether training properly
y = torch.autograd.Variable(torch.randn(batch_size,n_class,h,w).cuda())
criterion = nn.BCEWithLogitsLoss()
optimiser = optim.SGD(fcn_model.parameters(),lr=1e-3,momentum=0.9)
for iter in range(10):
optimiser.zero_grad()
output = fcn_model(input)
loss = criterion(output,y)
loss.backward()
print("iter {}, loss {}".format(iter, loss.data[0]))
optimiser.step()
""" | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
pkg/testing/integration/program.go | // Copyright 2016-2018, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"context"
cryptorand "crypto/rand"
"encoding/hex"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"testing"
"time"
user "github.com/tweekmonster/luser"
multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/pulumi/pulumi/pkg/v2/backend/filestate"
"github.com/pulumi/pulumi/pkg/v2/engine"
"github.com/pulumi/pulumi/pkg/v2/operations"
"github.com/pulumi/pulumi/pkg/v2/resource/stack"
"github.com/pulumi/pulumi/sdk/v2/go/common/apitype"
"github.com/pulumi/pulumi/sdk/v2/go/common/resource"
"github.com/pulumi/pulumi/sdk/v2/go/common/resource/config"
pulumi_testing "github.com/pulumi/pulumi/sdk/v2/go/common/testing"
"github.com/pulumi/pulumi/sdk/v2/go/common/tokens"
"github.com/pulumi/pulumi/sdk/v2/go/common/tools"
"github.com/pulumi/pulumi/sdk/v2/go/common/util/ciutil"
"github.com/pulumi/pulumi/sdk/v2/go/common/util/contract"
"github.com/pulumi/pulumi/sdk/v2/go/common/util/fsutil"
"github.com/pulumi/pulumi/sdk/v2/go/common/util/retry"
"github.com/pulumi/pulumi/sdk/v2/go/common/workspace"
)
const PythonRuntime = "python"
const NodeJSRuntime = "nodejs"
const GoRuntime = "go"
const DotNetRuntime = "dotnet"
const windowsOS = "windows"
// RuntimeValidationStackInfo contains details related to the stack that runtime validation logic may want to use.
type RuntimeValidationStackInfo struct {
StackName tokens.QName
Deployment *apitype.DeploymentV3
RootResource apitype.ResourceV3
Outputs map[string]interface{}
Events []apitype.EngineEvent
}
// EditDir is an optional edit to apply to the example, as subsequent deployments.
type EditDir struct {
Dir string
ExtraRuntimeValidation func(t *testing.T, stack RuntimeValidationStackInfo)
// Additive is true if Dir should be copied *on top* of the test directory.
// Otherwise Dir *replaces* the test directory, except we keep .pulumi/ and Pulumi.yaml and Pulumi.<stack>.yaml.
Additive bool
// ExpectFailure is true if we expect this test to fail. This is very coarse grained, and will essentially
// tolerate *any* failure in the program (IDEA: in the future, offer a way to narrow this down more).
ExpectFailure bool
// ExpectNoChanges is true if the edit is expected to not propose any changes.
ExpectNoChanges bool
// Stdout is the writer to use for all stdout messages.
Stdout io.Writer
// Stderr is the writer to use for all stderr messages.
Stderr io.Writer
// Verbose may be set to true to print messages as they occur, rather than buffering and showing upon failure.
Verbose bool
// Run program directory in query mode.
QueryMode bool
}
// TestCommandStats is a collection of data related to running a single command during a test.
type TestCommandStats struct {
// StartTime is the time at which the command was started
StartTime string `json:"startTime"`
// EndTime is the time at which the command exited
EndTime string `json:"endTime"`
// ElapsedSeconds is the time at which the command exited
ElapsedSeconds float64 `json:"elapsedSeconds"`
// StackName is the name of the stack
StackName string `json:"stackName"`
// TestId is the unique ID of the test run
TestID string `json:"testId"`
// StepName is the command line which was invoked
StepName string `json:"stepName"`
// CommandLine is the command line which was invoked
CommandLine string `json:"commandLine"`
// TestName is the name of the directory in which the test was executed
TestName string `json:"testName"`
// IsError is true if the command failed
IsError bool `json:"isError"`
// The Cloud that the test was run against, or empty for local deployments
CloudURL string `json:"cloudURL"`
}
// TestStatsReporter reports results and metadata from a test run.
type TestStatsReporter interface {
ReportCommand(stats TestCommandStats)
}
// ConfigValue is used to provide config values to a test program.
type ConfigValue struct {
// The config key to pass to `pulumi config`.
Key string
// The config value to pass to `pulumi config`.
Value string
// Secret indicates that the `--secret` flag should be specified when calling `pulumi config`.
Secret bool
// Path indicates that the `--path` flag should be specified when calling `pulumi config`.
Path bool
}
// ProgramTestOptions provides options for ProgramTest
type ProgramTestOptions struct {
// Dir is the program directory to test.
Dir string
// Array of NPM packages which must be `yarn linked` (e.g. {"pulumi", "@pulumi/aws"})
Dependencies []string
// Map of package names to versions. The test will use the specified versions of these packages instead of what
// is declared in `package.json`.
Overrides map[string]string
// Map of config keys and values to set (e.g. {"aws:region": "us-east-2"}).
Config map[string]string
// Map of secure config keys and values to set (e.g. {"aws:region": "us-east-2"}).
Secrets map[string]string
// List of config keys and values to set in order, including Secret and Path options.
OrderedConfig []ConfigValue
// SecretsProvider is the optional custom secrets provider to use instead of the default.
SecretsProvider string
// EditDirs is an optional list of edits to apply to the example, as subsequent deployments.
EditDirs []EditDir
// ExtraRuntimeValidation is an optional callback for additional validation, called before applying edits.
ExtraRuntimeValidation func(t *testing.T, stack RuntimeValidationStackInfo)
// RelativeWorkDir is an optional path relative to `Dir` which should be used as working directory during tests.
RelativeWorkDir string
// AllowEmptyPreviewChanges is true if we expect that this test's no-op preview may propose changes (e.g.
// because the test is sensitive to the exact contents of its working directory and those contents change
// incidentally between the initial update and the empty update).
AllowEmptyPreviewChanges bool
// AllowEmptyUpdateChanges is true if we expect that this test's no-op update may perform changes (e.g.
// because the test is sensitive to the exact contents of its working directory and those contents change
// incidentally between the initial update and the empty update).
AllowEmptyUpdateChanges bool
// ExpectFailure is true if we expect this test to fail. This is very coarse grained, and will essentially
// tolerate *any* failure in the program (IDEA: in the future, offer a way to narrow this down more).
ExpectFailure bool
// ExpectRefreshChanges may be set to true if a test is expected to have changes yielded by an immediate refresh.
// This could occur, for example, is a resource's state is constantly changing outside of Pulumi (e.g., timestamps).
ExpectRefreshChanges bool
// RetryFailedSteps indicates that failed updates, refreshes, and destroys should be retried after a brief
// intermission. A maximum of 3 retries will be attempted.
RetryFailedSteps bool
// SkipRefresh indicates that the refresh step should be skipped entirely.
SkipRefresh bool
// SkipPreview indicates that the preview step should be skipped entirely.
SkipPreview bool
// SkipUpdate indicates that the update step should be skipped entirely.
SkipUpdate bool
// SkipExportImport skips testing that exporting and importing the stack works properly.
SkipExportImport bool
// SkipEmptyPreviewUpdate skips the no-change preview/update that is performed that validates
// that no changes happen.
SkipEmptyPreviewUpdate bool
// SkipStackRemoval indicates that the stack should not be removed. (And so the test's results could be inspected
// in the Pulumi Service after the test has completed.)
SkipStackRemoval bool
// Quick implies SkipPreview, SkipExportImport and SkipEmptyPreviewUpdate
Quick bool
// PreviewCommandlineFlags specifies flags to add to the `pulumi preview` command line (e.g. "--color=raw")
PreviewCommandlineFlags []string
// UpdateCommandlineFlags specifies flags to add to the `pulumi up` command line (e.g. "--color=raw")
UpdateCommandlineFlags []string
// QueryCommandlineFlags specifies flags to add to the `pulumi query` command line (e.g. "--color=raw")
QueryCommandlineFlags []string
// RunBuild indicates that the build step should be run (e.g. run `yarn build` for `nodejs` programs)
RunBuild bool
// RunUpdateTest will ensure that updates to the package version can test for spurious diffs
RunUpdateTest bool
// CloudURL is an optional URL to override the default Pulumi Service API (https://api.pulumi-staging.io). The
// PULUMI_ACCESS_TOKEN environment variable must also be set to a valid access token for the target cloud.
CloudURL string
// StackName allows the stack name to be explicitly provided instead of computed from the
// environment during tests.
StackName string
// Tracing specifies the Zipkin endpoint if any to use for tracing Pulumi invocations.
Tracing string
// NoParallel will opt the test out of being ran in parallel.
NoParallel bool
// PrePulumiCommand specifies a callback that will be executed before each `pulumi` invocation. This callback may
// optionally return another callback to be invoked after the `pulumi` invocation completes.
PrePulumiCommand func(verb string) (func(err error) error, error)
// ReportStats optionally specifies how to report results from the test for external collection.
ReportStats TestStatsReporter
// Stdout is the writer to use for all stdout messages.
Stdout io.Writer
// Stderr is the writer to use for all stderr messages.
Stderr io.Writer
// Verbose may be set to true to print messages as they occur, rather than buffering and showing upon failure.
Verbose bool
// DebugLogging may be set to anything >0 to enable excessively verbose debug logging from `pulumi`. This is
// equivalent to `--logtostderr -v=N`, where N is the value of DebugLogLevel. This may also be enabled by setting
// the environment variable PULUMI_TEST_DEBUG_LOG_LEVEL.
DebugLogLevel int
// DebugUpdates may be set to true to enable debug logging from `pulumi preview`, `pulumi up`, and
// `pulumi destroy`. This may also be enabled by setting the environment variable PULUMI_TEST_DEBUG_UPDATES.
DebugUpdates bool
// Bin is a location of a `pulumi` executable to be run. Taken from the $PATH if missing.
Bin string
// YarnBin is a location of a `yarn` executable to be run. Taken from the $PATH if missing.
YarnBin string
// GoBin is a location of a `go` executable to be run. Taken from the $PATH if missing.
GoBin string
// PythonBin is a location of a `python` executable to be run. Taken from the $PATH if missing.
PythonBin string
// PipenvBin is a location of a `pipenv` executable to run. Taken from the $PATH if missing.
PipenvBin string
// DotNetBin is a location of a `dotnet` executable to be run. Taken from the $PATH if missing.
DotNetBin string
// Additional environment variables to pass for each command we run.
Env []string
// Automatically create and use a virtual environment, rather than using the Pipenv tool.
UseAutomaticVirtualEnv bool
}
func (opts *ProgramTestOptions) GetDebugLogLevel() int {
if opts.DebugLogLevel > 0 {
return opts.DebugLogLevel
}
if du := os.Getenv("PULUMI_TEST_DEBUG_LOG_LEVEL"); du != "" {
if n, e := strconv.Atoi(du); e != nil {
panic(e)
} else if n > 0 {
return n
}
}
return 0
}
func (opts *ProgramTestOptions) GetDebugUpdates() bool {
return opts.DebugUpdates || os.Getenv("PULUMI_TEST_DEBUG_UPDATES") != ""
}
// GetStackName returns a stack name to use for this test.
func (opts *ProgramTestOptions) GetStackName() tokens.QName {
if opts.StackName == "" {
// Fetch the host and test dir names, cleaned so to contain just [a-zA-Z0-9-_] chars.
hostname, err := os.Hostname()
contract.AssertNoErrorf(err, "failure to fetch hostname for stack prefix")
var host string
for _, c := range hostname {
if len(host) >= 10 {
break
}
if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9') || c == '-' || c == '_' {
host += string(c)
}
}
var test string
for _, c := range filepath.Base(opts.Dir) {
if len(test) >= 10 {
break
}
if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9') || c == '-' || c == '_' {
test += string(c)
}
}
b := make([]byte, 4)
_, err = cryptorand.Read(b)
contract.AssertNoError(err)
opts.StackName = strings.ToLower("p-it-" + host + "-" + test + "-" + hex.EncodeToString(b))
}
return tokens.QName(opts.StackName)
}
// GetStackNameWithOwner gets the name of the stack prepended with an owner, if PULUMI_TEST_OWNER is set.
// We use this in CI to create test stacks in an organization that all developers have access to, for debugging.
func (opts *ProgramTestOptions) GetStackNameWithOwner() tokens.QName {
if owner := os.Getenv("PULUMI_TEST_OWNER"); owner != "" {
return tokens.QName(fmt.Sprintf("%s/%s", owner, opts.GetStackName()))
}
return opts.GetStackName()
}
// With combines a source set of options with a set of overrides.
func (opts ProgramTestOptions) With(overrides ProgramTestOptions) ProgramTestOptions {
if overrides.Dir != "" {
opts.Dir = overrides.Dir
}
if overrides.Dependencies != nil {
opts.Dependencies = overrides.Dependencies
}
if overrides.Overrides != nil {
opts.Overrides = overrides.Overrides
}
for k, v := range overrides.Config {
if opts.Config == nil {
opts.Config = make(map[string]string)
}
opts.Config[k] = v
}
for k, v := range overrides.Secrets {
if opts.Secrets == nil {
opts.Secrets = make(map[string]string)
}
opts.Secrets[k] = v
}
if overrides.SecretsProvider != "" {
opts.SecretsProvider = overrides.SecretsProvider
}
if overrides.EditDirs != nil {
opts.EditDirs = overrides.EditDirs
}
if overrides.ExtraRuntimeValidation != nil {
opts.ExtraRuntimeValidation = overrides.ExtraRuntimeValidation
}
if overrides.RelativeWorkDir != "" {
opts.RelativeWorkDir = overrides.RelativeWorkDir
}
if overrides.AllowEmptyPreviewChanges {
opts.AllowEmptyPreviewChanges = overrides.AllowEmptyPreviewChanges
}
if overrides.AllowEmptyUpdateChanges {
opts.AllowEmptyUpdateChanges = overrides.AllowEmptyUpdateChanges
}
if overrides.ExpectFailure {
opts.ExpectFailure = overrides.ExpectFailure
}
if overrides.ExpectRefreshChanges {
opts.ExpectRefreshChanges = overrides.ExpectRefreshChanges
}
if overrides.RetryFailedSteps {
opts.RetryFailedSteps = overrides.RetryFailedSteps
}
if overrides.SkipRefresh {
opts.SkipRefresh = overrides.SkipRefresh
}
if overrides.SkipPreview {
opts.SkipPreview = overrides.SkipPreview
}
if overrides.SkipUpdate {
opts.SkipUpdate = overrides.SkipUpdate
}
if overrides.SkipExportImport {
opts.SkipExportImport = overrides.SkipExportImport
}
if overrides.SkipEmptyPreviewUpdate {
opts.SkipEmptyPreviewUpdate = overrides.SkipEmptyPreviewUpdate
}
if overrides.SkipStackRemoval {
opts.SkipStackRemoval = overrides.SkipStackRemoval
}
if overrides.Quick {
opts.Quick = overrides.Quick
}
if overrides.PreviewCommandlineFlags != nil {
opts.PreviewCommandlineFlags = append(opts.PreviewCommandlineFlags, overrides.PreviewCommandlineFlags...)
}
if overrides.UpdateCommandlineFlags != nil {
opts.UpdateCommandlineFlags = append(opts.UpdateCommandlineFlags, overrides.UpdateCommandlineFlags...)
}
if overrides.QueryCommandlineFlags != nil {
opts.QueryCommandlineFlags = append(opts.QueryCommandlineFlags, overrides.QueryCommandlineFlags...)
}
if overrides.RunBuild {
opts.RunBuild = overrides.RunBuild
}
if overrides.RunUpdateTest {
opts.RunUpdateTest = overrides.RunUpdateTest
}
if overrides.CloudURL != "" {
opts.CloudURL = overrides.CloudURL
}
if overrides.StackName != "" {
opts.StackName = overrides.StackName
}
if overrides.Tracing != "" {
opts.Tracing = overrides.Tracing
}
if overrides.NoParallel {
opts.NoParallel = overrides.NoParallel
}
if overrides.PrePulumiCommand != nil {
opts.PrePulumiCommand = overrides.PrePulumiCommand
}
if overrides.ReportStats != nil {
opts.ReportStats = overrides.ReportStats
}
if overrides.Stdout != nil {
opts.Stdout = overrides.Stdout
}
if overrides.Stderr != nil {
opts.Stderr = overrides.Stderr
}
if overrides.Verbose {
opts.Verbose = overrides.Verbose
}
if overrides.DebugLogLevel != 0 {
opts.DebugLogLevel = overrides.DebugLogLevel
}
if overrides.DebugUpdates {
opts.DebugUpdates = overrides.DebugUpdates
}
if overrides.Bin != "" {
opts.Bin = overrides.Bin
}
if overrides.YarnBin != "" {
opts.YarnBin = overrides.YarnBin
}
if overrides.GoBin != "" {
opts.GoBin = overrides.GoBin
}
if overrides.PipenvBin != "" {
opts.PipenvBin = overrides.PipenvBin
}
if overrides.Env != nil {
opts.Env = append(opts.Env, overrides.Env...)
}
return opts
}
type regexFlag struct {
re *regexp.Regexp
}
func (rf *regexFlag) String() string {
if rf.re == nil {
return ""
}
return rf.re.String()
}
func (rf *regexFlag) Set(v string) error {
r, err := regexp.Compile(v)
if err != nil {
return err
}
rf.re = r
return nil
}
var directoryMatcher regexFlag
var listDirs bool
var pipMutex *fsutil.FileMutex
func init() {
flag.Var(&directoryMatcher, "dirs", "optional list of regexes to use to select integration tests to run")
flag.BoolVar(&listDirs, "list-dirs", false, "list available integration tests without running them")
mutexPath := filepath.Join(os.TempDir(), "pip-mutex.lock")
pipMutex = fsutil.NewFileMutex(mutexPath)
}
// GetLogs retrieves the logs for a given stack in a particular region making the query provided.
//
// [provider] should be one of "aws" or "azure"
func GetLogs(
t *testing.T,
provider, region string,
stackInfo RuntimeValidationStackInfo,
query operations.LogQuery) *[]operations.LogEntry {
snap, err := stack.DeserializeDeploymentV3(*stackInfo.Deployment, stack.DefaultSecretsProvider)
assert.NoError(t, err)
tree := operations.NewResourceTree(snap.Resources)
if !assert.NotNil(t, tree) {
return nil
}
cfg := map[config.Key]string{
config.MustMakeKey(provider, "region"): region,
}
ops := tree.OperationsProvider(cfg)
// Validate logs from example
logs, err := ops.GetLogs(query)
if !assert.NoError(t, err) {
return nil
}
return logs
}
func prepareProgram(t *testing.T, opts *ProgramTestOptions) {
// If we're just listing tests, simply print this test's directory.
if listDirs {
fmt.Printf("%s\n", opts.Dir)
}
// If we have a matcher, ensure that this test matches its pattern.
if directoryMatcher.re != nil && !directoryMatcher.re.Match([]byte(opts.Dir)) {
t.Skip(fmt.Sprintf("Skipping: '%v' does not match '%v'", opts.Dir, directoryMatcher.re))
}
// Disable stack backups for tests to avoid filling up ~/.pulumi/backups with unnecessary
// backups of test stacks.
if err := os.Setenv(filestate.DisableCheckpointBackupsEnvVar, "1"); err != nil {
t.Errorf("error setting env var '%s': %v", filestate.DisableCheckpointBackupsEnvVar, err)
}
// We want tests to default into being ran in parallel, hence the odd double negative.
if !opts.NoParallel {
t.Parallel()
}
if ciutil.IsCI() && os.Getenv("PULUMI_ACCESS_TOKEN") == "" {
t.Skip("Skipping: PULUMI_ACCESS_TOKEN is not set")
}
// If the test panics, recover and log instead of letting the panic escape the test. Even though *this* test will
// have run deferred functions and cleaned up, if the panic reaches toplevel it will kill the process and prevent
// other tests running in parallel from cleaning up.
defer func() {
if failure := recover(); failure != nil {
t.Errorf("panic testing %v: %v", opts.Dir, failure)
}
}()
// Set up some default values for sending test reports and tracing data. We use environment varaiables to
// control these globally and set reasonable values for our own use in CI.
if opts.ReportStats == nil {
if v := os.Getenv("PULUMI_TEST_REPORT_CONFIG"); v != "" {
splits := strings.Split(v, ":")
if len(splits) != 3 {
t.Errorf("report config should be set to a value of the form: <aws-region>:<bucket-name>:<keyPrefix>")
}
opts.ReportStats = NewS3Reporter(splits[0], splits[1], splits[2])
}
}
if opts.Tracing == "" {
opts.Tracing = os.Getenv("PULUMI_TEST_TRACE_ENDPOINT")
}
}
// ProgramTest runs a lifecycle of Pulumi commands in a program working directory, using the `pulumi` and `yarn`
// binaries available on PATH. It essentially executes the following workflow:
//
// yarn install
// yarn link <each opts.Depencies>
// (+) yarn run build
// pulumi init
// (*) pulumi login
// pulumi stack init integrationtesting
// pulumi config set <each opts.Config>
// pulumi config set --secret <each opts.Secrets>
// pulumi preview
// pulumi up
// pulumi stack export --file stack.json
// pulumi stack import --file stack.json
// pulumi preview (expected to be empty)
// pulumi up (expected to be empty)
// pulumi destroy --yes
// pulumi stack rm --yes integrationtesting
//
// (*) Only if PULUMI_ACCESS_TOKEN is set.
// (+) Only if `opts.RunBuild` is true.
//
// All commands must return success return codes for the test to succeed, unless ExpectFailure is true.
func ProgramTest(t *testing.T, opts *ProgramTestOptions) {
prepareProgram(t, opts)
pt := newProgramTester(t, opts)
err := pt.TestLifeCycleInitAndDestroy()
assert.NoError(t, err)
}
// ProgramTestManualLifeCycle returns a ProgramTester than must be manually controlled in terms of its lifecycle
func ProgramTestManualLifeCycle(t *testing.T, opts *ProgramTestOptions) *ProgramTester {
prepareProgram(t, opts)
pt := newProgramTester(t, opts)
return pt
}
// fprintf works like fmt.FPrintf, except it explicitly drops the return values. This keeps the linters happy, since
// they don't like to see errors dropped on the floor. It is possible that our call to fmt.Fprintf will fail, even
// for "standard" streams like `stdout` and `stderr`, if they have been set to non-blocking by an external process.
// In that case, we just drop the error on the floor and continue. We see this behavior in Travis when we try to write
// a lot of messages quickly (as we do when logging test failures)
func fprintf(w io.Writer, format string, a ...interface{}) {
_, err := fmt.Fprintf(w, format, a...)
contract.IgnoreError(err)
}
// ProgramTester contains state associated with running a single test pass.
type ProgramTester struct {
t *testing.T // the Go tester for this run.
opts *ProgramTestOptions // options that control this test run.
bin string // the `pulumi` binary we are using.
yarnBin string // the `yarn` binary we are using.
goBin string // the `go` binary we are using.
pythonBin string // the `python` binary we are using.
pipenvBin string // The `pipenv` binary we are using.
dotNetBin string // the `dotnet` binary we are using.
eventLog string // The path to the event log for this test.
maxStepTries int // The maximum number of times to retry a failed pulumi step.
tmpdir string // the temporary directory we use for our test environment
projdir string // the project directory we use for this run
TestFinished bool // whether or not the test if finished
}
func newProgramTester(t *testing.T, opts *ProgramTestOptions) *ProgramTester {
stackName := opts.GetStackName()
maxStepTries := 1
if opts.RetryFailedSteps {
maxStepTries = 3
}
if opts.Quick {
opts.SkipPreview = true
opts.SkipExportImport = true
opts.SkipEmptyPreviewUpdate = true
}
return &ProgramTester{
t: t,
opts: opts,
eventLog: filepath.Join(os.TempDir(), string(stackName)+"-events.json"),
maxStepTries: maxStepTries,
}
}
func (pt *ProgramTester) getBin() (string, error) {
return getCmdBin(&pt.bin, "pulumi", pt.opts.Bin)
}
func (pt *ProgramTester) getYarnBin() (string, error) {
return getCmdBin(&pt.yarnBin, "yarn", pt.opts.YarnBin)
}
func (pt *ProgramTester) getGoBin() (string, error) {
return getCmdBin(&pt.goBin, "go", pt.opts.GoBin)
}
// getPythonBin returns a path to the currently-installed `python` binary, or an error if it could not be found.
func (pt *ProgramTester) getPythonBin() (string, error) {
if pt.pythonBin == "" {
pt.pythonBin = pt.opts.PythonBin
if pt.opts.PythonBin == "" {
var err error
// Look for "python3" by default, but fallback to `python` if not found as some Python 3
// distributions (in particular the default python.org Windows installation) do not include
// a `python3` binary.
pythonCmds := []string{"python3", "python"}
for _, bin := range pythonCmds {
pt.pythonBin, err = exec.LookPath(bin)
// Break on the first cmd we find on the path (if any).
if err == nil {
break
}
}
if err != nil {
return "", errors.Wrapf(err, "Expected to find one of %q on $PATH", pythonCmds)
}
}
}
return pt.pythonBin, nil
}
// getPipenvBin returns a path to the currently-installed Pipenv tool, or an error if the tool could not be found.
func (pt *ProgramTester) getPipenvBin() (string, error) {
return getCmdBin(&pt.pipenvBin, "pipenv", pt.opts.PipenvBin)
}
func (pt *ProgramTester) getDotNetBin() (string, error) {
return getCmdBin(&pt.dotNetBin, "dotnet", pt.opts.DotNetBin)
}
func (pt *ProgramTester) pulumiCmd(args []string) ([]string, error) {
bin, err := pt.getBin()
if err != nil {
return nil, err
}
cmd := []string{bin}
if du := pt.opts.GetDebugLogLevel(); du > 0 {
cmd = append(cmd, "--logtostderr", "-v="+strconv.Itoa(du))
}
cmd = append(cmd, args...)
if tracing := pt.opts.Tracing; tracing != "" {
cmd = append(cmd, "--tracing", tracing)
}
return cmd, nil
}
func (pt *ProgramTester) yarnCmd(args []string) ([]string, error) {
bin, err := pt.getYarnBin()
if err != nil {
return nil, err
}
result := []string{bin}
result = append(result, args...)
return withOptionalYarnFlags(result), nil
}
func (pt *ProgramTester) pythonCmd(args []string) ([]string, error) {
bin, err := pt.getPythonBin()
if err != nil {
return nil, err
}
cmd := []string{bin}
return append(cmd, args...), nil
}
func (pt *ProgramTester) pipenvCmd(args []string) ([]string, error) {
bin, err := pt.getPipenvBin()
if err != nil {
return nil, err
}
cmd := []string{bin}
return append(cmd, args...), nil
}
func (pt *ProgramTester) runCommand(name string, args []string, wd string) error {
return RunCommand(pt.t, name, args, wd, pt.opts)
}
func (pt *ProgramTester) runPulumiCommand(name string, args []string, wd string, expectFailure bool) error {
cmd, err := pt.pulumiCmd(args)
if err != nil {
return err
}
var postFn func(error) error
if pt.opts.PrePulumiCommand != nil {
postFn, err = pt.opts.PrePulumiCommand(args[0])
if err != nil {
return err
}
}
isUpdate := args[0] == "preview" || args[0] == "up" || args[0] == "destroy" || args[0] == "refresh"
// If we're doing a preview or an update and this project is a Python project, we need to run
// the command in the context of the virtual environment that Pipenv created in order to pick up
// the correct version of Python. We also need to do this for destroy and refresh so that
// dynamic providers are run in the right virtual environment.
// This is only necessary when not using automatic virtual environment support.
if !pt.opts.UseAutomaticVirtualEnv && isUpdate {
projinfo, err := pt.getProjinfo(wd)
if err != nil {
return nil
}
if projinfo.Proj.Runtime.Name() == "python" {
pipenvBin, err := pt.getPipenvBin()
if err != nil {
return err
}
// "pipenv run" activates the current virtual environment and runs the remainder of the arguments as if it
// were a command.
cmd = append([]string{pipenvBin, "run"}, cmd...)
}
}
_, _, err = retry.Until(context.Background(), retry.Acceptor{
Accept: func(try int, nextRetryTime time.Duration) (bool, interface{}, error) {
runerr := pt.runCommand(name, cmd, wd)
if runerr == nil {
return true, nil, nil
} else if _, ok := runerr.(*exec.ExitError); ok && isUpdate && !expectFailure {
// the update command failed, let's try again, assuming we haven't failed a few times.
if try+1 >= pt.maxStepTries {
return false, nil, errors.Errorf("%v did not succeed after %v tries", cmd, try+1)
}
pt.t.Logf("%v failed: %v; retrying...", cmd, runerr)
return false, nil, nil
}
// someother error, fail
return false, nil, runerr
},
})
if postFn != nil {
if postErr := postFn(err); postErr != nil {
return multierror.Append(err, postErr)
}
}
return err
}
func (pt *ProgramTester) runYarnCommand(name string, args []string, wd string) error {
cmd, err := pt.yarnCmd(args)
if err != nil {
return err
}
_, _, err = retry.Until(context.Background(), retry.Acceptor{
Accept: func(try int, nextRetryTime time.Duration) (bool, interface{}, error) {
runerr := pt.runCommand(name, cmd, wd)
if runerr == nil {
return true, nil, nil
} else if _, ok := runerr.(*exec.ExitError); ok {
// yarn failed, let's try again, assuming we haven't failed a few times.
if try+1 >= 3 {
return false, nil, errors.Errorf("%v did not complete after %v tries", cmd, try+1)
}
return false, nil, nil
}
// someother error, fail
return false, nil, runerr
},
})
return err
}
func (pt *ProgramTester) runPythonCommand(name string, args []string, wd string) error {
cmd, err := pt.pythonCmd(args)
if err != nil {
return err
}
return pt.runCommand(name, cmd, wd)
}
func (pt *ProgramTester) runVirtualEnvCommand(name string, args []string, wd string) error {
// When installing with `pip install -e`, a PKG-INFO file is created. If two packages are being installed
// this way simultaneously (which happens often, when running tests), both installations will be writing the
// same file simultaneously. If one process catches "PKG-INFO" in a half-written state, the one process that
// observed the torn write will fail to install the package.
//
// To avoid this problem, we use pipMutex to explicitly serialize installation operations. Doing so avoids
// the problem of multiple processes stomping on the same files in the source tree. Note that pipMutex is a
// file mutex, so this strategy works even if the go test runner chooses to split up text execution across
// multiple processes. (Furthermore, each test gets an instance of ProgramTester and thus the mutex, so we'd
// need to be sharing the mutex globally in each test process if we weren't using the file system to lock.)
if name == "virtualenv-pip-install-package" {
if err := pipMutex.Lock(); err != nil {
panic(err)
}
if pt.opts.Verbose {
fprintf(pt.opts.Stdout, "acquired pip install lock\n")
defer fprintf(pt.opts.Stdout, "released pip install lock\n")
}
defer func() {
if err := pipMutex.Unlock(); err != nil {
panic(err)
}
}()
}
virtualenvBinPath, err := getVirtualenvBinPath(wd, args[0])
if err != nil {
return err
}
cmd := append([]string{virtualenvBinPath}, args[1:]...)
return pt.runCommand(name, cmd, wd)
}
func (pt *ProgramTester) runPipenvCommand(name string, args []string, wd string) error {
// Pipenv uses setuptools to install and uninstall packages. Setuptools has an installation mode called "develop"
// that we use to install the package being tested, since it is 1) lightweight and 2) not doing so has its own set
// of annoying problems.
//
// Setuptools develop does three things:
// 1. It invokes the "egg_info" command in the target package,
// 2. It creates a special `.egg-link` sentinel file in the current site-packages folder, pointing to the package
// being installed's path on disk
// 3. It updates easy-install.pth in site-packages so that pip understand that this package has been installed.
//
// Steps 2 and 3 operate entirely within the context of a virtualenv. The state that they mutate is fully contained
// within the current virtualenv. However, step 1 operates in the context of the package's source tree. Egg info
// is responsible for producing a minimal "egg" for a particular package, and its largest responsibility is creating
// a PKG-INFO file for a package. PKG-INFO contains, among other things, the version of the package being installed.
//
// If two packages are being installed in "develop" mode simultaneously (which happens often, when running tests),
// both installations will run "egg_info" on the source tree and both processes will be writing the same files
// simultaneously. If one process catches "PKG-INFO" in a half-written state, the one process that observed the
// torn write will fail to install the package (setuptools crashes).
//
// To avoid this problem, we use pipMutex to explicitly serialize installation operations. Doing so avoids the
// problem of multiple processes stomping on the same files in the source tree. Note that pipMutex is a file
// mutex, so this strategy works even if the go test runner chooses to split up text execution across multiple
// processes. (Furthermore, each test gets an instance of ProgramTester and thus the mutex, so we'd need to be
// sharing the mutex globally in each test process if we weren't using the file system to lock.)
if name == "pipenv-install-package" {
if err := pipMutex.Lock(); err != nil {
panic(err)
}
if pt.opts.Verbose {
fprintf(pt.opts.Stdout, "acquired pip install lock\n")
defer fprintf(pt.opts.Stdout, "released pip install lock\n")
}
defer func() {
if err := pipMutex.Unlock(); err != nil {
panic(err)
}
}()
}
cmd, err := pt.pipenvCmd(args)
if err != nil {
return err
}
return pt.runCommand(name, cmd, wd)
}
// TestLifeCyclePrepare prepares a test by creating a temporary directory
func (pt *ProgramTester) TestLifeCyclePrepare() error {
tmpdir, projdir, err := pt.copyTestToTemporaryDirectory()
pt.tmpdir = tmpdir
pt.projdir = projdir
return err
}
// TestCleanUp cleans up the temporary directory that a test used
func (pt *ProgramTester) TestCleanUp() {
testFinished := pt.TestFinished
if pt.tmpdir != "" {
if !testFinished || pt.t.Failed() {
// Test aborted or failed. Maybe copy to "failed tests" directory.
failedTestsDir := os.Getenv("PULUMI_FAILED_TESTS_DIR")
if failedTestsDir != "" {
dest := filepath.Join(failedTestsDir, pt.t.Name()+uniqueSuffix())
contract.IgnoreError(fsutil.CopyFile(dest, pt.tmpdir, nil))
}
} else {
contract.IgnoreError(os.RemoveAll(pt.tmpdir))
}
} else {
// When tmpdir is empty, we ran "in tree", which means we wrote output
// to the "command-output" folder in the projdir, and we should clean
// it up if the test passed
if testFinished && !pt.t.Failed() {
contract.IgnoreError(os.RemoveAll(filepath.Join(pt.projdir, commandOutputFolderName)))
}
}
}
// TestLifeCycleInitAndDestroy executes the test and cleans up
func (pt *ProgramTester) TestLifeCycleInitAndDestroy() error {
err := pt.TestLifeCyclePrepare()
if err != nil {
return errors.Wrapf(err, "copying test to temp dir %s", pt.tmpdir)
}
pt.TestFinished = false
defer pt.TestCleanUp()
err = pt.TestLifeCycleInitialize()
if err != nil {
return errors.Wrap(err, "initializing test project")
}
// Ensure that before we exit, we attempt to destroy and remove the stack.
defer func() {
destroyErr := pt.TestLifeCycleDestroy()
assert.NoError(pt.t, destroyErr)
}()
if err = pt.TestPreviewUpdateAndEdits(); err != nil {
return errors.Wrap(err, "running test preview, update, and edits")
}
if pt.opts.RunUpdateTest {
err = upgradeProjectDeps(pt.projdir, pt)
if err != nil {
return errors.Wrap(err, "upgrading project dependencies")
}
if err = pt.TestPreviewUpdateAndEdits(); err != nil {
return errors.Wrap(err, "running test preview, update, and edits")
}
}
pt.TestFinished = true
return nil
}
func upgradeProjectDeps(projectDir string, pt *ProgramTester) error {
projInfo, err := pt.getProjinfo(projectDir)
if err != nil {
return errors.Wrap(err, "getting project info")
}
switch rt := projInfo.Proj.Runtime.Name(); rt {
case NodeJSRuntime:
if err = pt.yarnLinkPackageDeps(projectDir); err != nil {
return err
}
case PythonRuntime:
if err = pt.installPipPackageDeps(projectDir); err != nil {
return err
}
default:
return errors.Errorf("unrecognized project runtime: %s", rt)
}
return nil
}
// TestLifeCycleInitialize initializes the project directory and stack along with any configuration
func (pt *ProgramTester) TestLifeCycleInitialize() error {
dir := pt.projdir
stackName := pt.opts.GetStackName()
// If RelativeWorkDir is specified, apply that relative to the temp folder for use as working directory during tests.
if pt.opts.RelativeWorkDir != "" {
dir = filepath.Join(dir, pt.opts.RelativeWorkDir)
}
// Set the default target Pulumi API if not overridden in options.
if pt.opts.CloudURL == "" {
pulumiAPI := os.Getenv("PULUMI_API")
if pulumiAPI != "" {
pt.opts.CloudURL = pulumiAPI
}
}
// Ensure all links are present, the stack is created, and all configs are applied.
fprintf(pt.opts.Stdout, "Initializing project (dir %s; stack %s)\n", dir, stackName)
// Login as needed.
stackInitName := string(pt.opts.GetStackNameWithOwner())
if os.Getenv("PULUMI_ACCESS_TOKEN") == "" && pt.opts.CloudURL == "" {
fmt.Printf("Using existing logged in user for tests. Set PULUMI_ACCESS_TOKEN and/or PULUMI_API to override.\n")
} else {
// Set PulumiCredentialsPathEnvVar to our CWD, so we use credentials specific to just this
// test.
pt.opts.Env = append(pt.opts.Env, fmt.Sprintf("%s=%s", workspace.PulumiCredentialsPathEnvVar, dir))
loginArgs := []string{"login"}
loginArgs = addFlagIfNonNil(loginArgs, "--cloud-url", pt.opts.CloudURL)
// If this is a local OR cloud login, then don't attach the owner to the stack-name.
if pt.opts.CloudURL != "" {
stackInitName = string(pt.opts.GetStackName())
}
if err := pt.runPulumiCommand("pulumi-login", loginArgs, dir, false); err != nil {
return err
}
}
// Stack init
stackInitArgs := []string{"stack", "init", stackInitName}
if pt.opts.SecretsProvider != "" {
stackInitArgs = append(stackInitArgs, "--secrets-provider", pt.opts.SecretsProvider)
}
if err := pt.runPulumiCommand("pulumi-stack-init", stackInitArgs, dir, false); err != nil {
return err
}
for key, value := range pt.opts.Config {
if err := pt.runPulumiCommand("pulumi-config",
[]string{"config", "set", key, value}, dir, false); err != nil {
return err
}
}
for key, value := range pt.opts.Secrets {
if err := pt.runPulumiCommand("pulumi-config",
[]string{"config", "set", "--secret", key, value}, dir, false); err != nil {
return err
}
}
for _, cv := range pt.opts.OrderedConfig {
configArgs := []string{"config", "set", cv.Key, cv.Value}
if cv.Secret {
configArgs = append(configArgs, "--secret")
}
if cv.Path {
configArgs = append(configArgs, "--path")
}
if err := pt.runPulumiCommand("pulumi-config", configArgs, dir, false); err != nil {
return err
}
}
return nil
}
// TestLifeCycleDestroy destroys a stack and removes it
func (pt *ProgramTester) TestLifeCycleDestroy() error {
if pt.projdir != "" {
// Destroy and remove the stack.
fprintf(pt.opts.Stdout, "Destroying stack\n")
destroy := []string{"destroy", "--non-interactive", "--yes", "--skip-preview"}
if pt.opts.GetDebugUpdates() {
destroy = append(destroy, "-d")
}
if err := pt.runPulumiCommand("pulumi-destroy", destroy, pt.projdir, false); err != nil {
return err
}
if pt.t.Failed() {
fprintf(pt.opts.Stdout, "Test failed, retaining stack '%s'\n", pt.opts.GetStackNameWithOwner())
return nil
}
if !pt.opts.SkipStackRemoval {
return pt.runPulumiCommand("pulumi-stack-rm", []string{"stack", "rm", "--yes"}, pt.projdir, false)
}
}
return nil
}
// TestPreviewUpdateAndEdits runs the preview, update, and any relevant edits
func (pt *ProgramTester) TestPreviewUpdateAndEdits() error {
dir := pt.projdir
// Now preview and update the real changes.
fprintf(pt.opts.Stdout, "Performing primary preview and update\n")
initErr := pt.PreviewAndUpdate(dir, "initial", pt.opts.ExpectFailure, false, false)
// If the initial preview/update failed, just exit without trying the rest (but make sure to destroy).
if initErr != nil {
return initErr
}
// Perform an empty preview and update; nothing is expected to happen here.
if !pt.opts.SkipExportImport {
fprintf(pt.opts.Stdout, "Roundtripping checkpoint via stack export and stack import\n")
if err := pt.exportImport(dir); err != nil {
return err
}
}
if !pt.opts.SkipEmptyPreviewUpdate {
msg := ""
if !pt.opts.AllowEmptyUpdateChanges {
msg = "(no changes expected)"
}
fprintf(pt.opts.Stdout, "Performing empty preview and update%s\n", msg)
if err := pt.PreviewAndUpdate(
dir, "empty", false, !pt.opts.AllowEmptyPreviewChanges, !pt.opts.AllowEmptyUpdateChanges); err != nil {
return err
}
}
// Run additional validation provided by the test options, passing in the checkpoint info.
if err := pt.performExtraRuntimeValidation(pt.opts.ExtraRuntimeValidation, dir); err != nil {
return err
}
if !pt.opts.SkipRefresh {
// Perform a refresh and ensure it doesn't yield changes.
refresh := []string{"refresh", "--non-interactive", "--yes", "--skip-preview"}
if pt.opts.GetDebugUpdates() {
refresh = append(refresh, "-d")
}
if !pt.opts.ExpectRefreshChanges {
refresh = append(refresh, "--expect-no-changes")
}
if err := pt.runPulumiCommand("pulumi-refresh", refresh, dir, false); err != nil {
return err
}
}
// If there are any edits, apply them and run a preview and update for each one.
return pt.testEdits(dir)
}
func (pt *ProgramTester) exportImport(dir string) error {
exportCmd := []string{"stack", "export", "--file", "stack.json"}
importCmd := []string{"stack", "import", "--file", "stack.json"}
defer func() {
contract.IgnoreError(os.Remove(filepath.Join(dir, "stack.json")))
}()
if err := pt.runPulumiCommand("pulumi-stack-export", exportCmd, dir, false); err != nil {
return err
}
return pt.runPulumiCommand("pulumi-stack-import", importCmd, dir, false)
}
// PreviewAndUpdate runs pulumi preview followed by pulumi up
func (pt *ProgramTester) PreviewAndUpdate(dir string, name string, shouldFail, expectNopPreview,
expectNopUpdate bool) error {
preview := []string{"preview", "--non-interactive"}
update := []string{"up", "--non-interactive", "--yes", "--skip-preview", "--event-log", pt.eventLog}
if pt.opts.GetDebugUpdates() {
preview = append(preview, "-d")
update = append(update, "-d")
}
if expectNopPreview {
preview = append(preview, "--expect-no-changes")
}
if expectNopUpdate {
update = append(update, "--expect-no-changes")
}
if pt.opts.PreviewCommandlineFlags != nil {
preview = append(preview, pt.opts.PreviewCommandlineFlags...)
}
if pt.opts.UpdateCommandlineFlags != nil {
update = append(update, pt.opts.UpdateCommandlineFlags...)
}
// If not in quick mode, run an explicit preview.
if !pt.opts.SkipPreview {
if err := pt.runPulumiCommand("pulumi-preview-"+name, preview, dir, shouldFail); err != nil {
if shouldFail {
fprintf(pt.opts.Stdout, "Permitting failure (ExpectFailure=true for this preview)\n")
return nil
}
return err
}
}
// Now run an update.
if !pt.opts.SkipUpdate {
if err := pt.runPulumiCommand("pulumi-update-"+name, update, dir, shouldFail); err != nil {
if shouldFail {
fprintf(pt.opts.Stdout, "Permitting failure (ExpectFailure=true for this update)\n")
return nil
}
return err
}
}
// If we expected a failure, but none occurred, return an error.
if shouldFail {
return errors.New("expected this step to fail, but it succeeded")
}
return nil
}
func (pt *ProgramTester) query(dir string, name string, shouldFail bool) error {
query := []string{"query", "--non-interactive"}
if pt.opts.GetDebugUpdates() {
query = append(query, "-d")
}
if pt.opts.QueryCommandlineFlags != nil {
query = append(query, pt.opts.QueryCommandlineFlags...)
}
// Now run a query.
if err := pt.runPulumiCommand("pulumi-query-"+name, query, dir, shouldFail); err != nil {
if shouldFail {
fprintf(pt.opts.Stdout, "Permitting failure (ExpectFailure=true for this update)\n")
return nil
}
return err
}
// If we expected a failure, but none occurred, return an error.
if shouldFail {
return errors.New("expected this step to fail, but it succeeded")
}
return nil
}
func (pt *ProgramTester) testEdits(dir string) error {
for i, edit := range pt.opts.EditDirs {
var err error
if err = pt.testEdit(dir, i, edit); err != nil {
return err
}
}
return nil
}
func (pt *ProgramTester) testEdit(dir string, i int, edit EditDir) error {
fprintf(pt.opts.Stdout, "Applying edit '%v' and rerunning preview and update\n", edit.Dir)
if edit.Additive {
// Just copy new files into dir
if err := fsutil.CopyFile(dir, edit.Dir, nil); err != nil {
return errors.Wrapf(err, "Couldn't copy %v into %v", edit.Dir, dir)
}
} else {
// Create a new temporary directory
newDir, err := ioutil.TempDir("", pt.opts.StackName+"-")
if err != nil {
return errors.Wrapf(err, "Couldn't create new temporary directory")
}
// Delete whichever copy of the test is unused when we return
dirToDelete := newDir
defer func() {
contract.IgnoreError(os.RemoveAll(dirToDelete))
}()
// Copy everything except Pulumi.yaml, Pulumi.<stack-name>.yaml, and .pulumi from source into new directory
exclusions := make(map[string]bool)
projectYaml := workspace.ProjectFile + ".yaml"
configYaml := workspace.ProjectFile + "." + pt.opts.StackName + ".yaml"
exclusions[workspace.BookkeepingDir] = true
exclusions[projectYaml] = true
exclusions[configYaml] = true
if err := fsutil.CopyFile(newDir, edit.Dir, exclusions); err != nil {
return errors.Wrapf(err, "Couldn't copy %v into %v", edit.Dir, newDir)
}
// Copy Pulumi.yaml, Pulumi.<stack-name>.yaml, and .pulumi from old directory to new directory
oldProjectYaml := filepath.Join(dir, projectYaml)
newProjectYaml := filepath.Join(newDir, projectYaml)
oldConfigYaml := filepath.Join(dir, configYaml)
newConfigYaml := filepath.Join(newDir, configYaml)
oldProjectDir := filepath.Join(dir, workspace.BookkeepingDir)
newProjectDir := filepath.Join(newDir, workspace.BookkeepingDir)
if err := fsutil.CopyFile(newProjectYaml, oldProjectYaml, nil); err != nil {
return errors.Wrap(err, "Couldn't copy Pulumi.yaml")
}
if err := fsutil.CopyFile(newConfigYaml, oldConfigYaml, nil); err != nil {
return errors.Wrapf(err, "Couldn't copy Pulumi.%s.yaml", pt.opts.StackName)
}
if err := fsutil.CopyFile(newProjectDir, oldProjectDir, nil); err != nil {
return errors.Wrap(err, "Couldn't copy .pulumi")
}
// Finally, replace our current temp directory with the new one.
dirOld := dir + ".old"
if err := os.Rename(dir, dirOld); err != nil {
return errors.Wrapf(err, "Couldn't rename %v to %v", dir, dirOld)
}
// There's a brief window here where the old temp dir name could be taken from us.
if err := os.Rename(newDir, dir); err != nil {
return errors.Wrapf(err, "Couldn't rename %v to %v", newDir, dir)
}
// Keep dir, delete oldDir
dirToDelete = dirOld
}
err := pt.prepareProjectDir(dir)
if err != nil {
return errors.Wrapf(err, "Couldn't prepare project in %v", dir)
}
oldStdOut := pt.opts.Stdout
oldStderr := pt.opts.Stderr
oldVerbose := pt.opts.Verbose
if edit.Stdout != nil {
pt.opts.Stdout = edit.Stdout
}
if edit.Stderr != nil {
pt.opts.Stderr = edit.Stderr
}
if edit.Verbose {
pt.opts.Verbose = true
}
defer func() {
pt.opts.Stdout = oldStdOut
pt.opts.Stderr = oldStderr
pt.opts.Verbose = oldVerbose
}()
if !edit.QueryMode {
if err = pt.PreviewAndUpdate(dir, fmt.Sprintf("edit-%d", i),
edit.ExpectFailure, edit.ExpectNoChanges, edit.ExpectNoChanges); err != nil {
return err
}
} else {
if err = pt.query(dir, fmt.Sprintf("query-%d", i), edit.ExpectFailure); err != nil {
return err
}
}
return pt.performExtraRuntimeValidation(edit.ExtraRuntimeValidation, dir)
}
func (pt *ProgramTester) performExtraRuntimeValidation(
extraRuntimeValidation func(t *testing.T, stack RuntimeValidationStackInfo), dir string) error {
if extraRuntimeValidation == nil {
return nil
}
stackName := pt.opts.GetStackName()
// Create a temporary file name for the stack export
tempDir, err := ioutil.TempDir("", string(stackName))
if err != nil {
return err
}
fileName := filepath.Join(tempDir, "stack.json")
// Invoke `pulumi stack export`
if err = pt.runPulumiCommand("pulumi-export",
[]string{"stack", "export", "--file", fileName}, dir, false); err != nil {
return errors.Wrapf(err, "expected to export stack to file: %s", fileName)
}
// Open the exported JSON file
f, err := os.Open(fileName)
if err != nil {
return errors.Wrapf(err, "expected to be able to open file with stack exports: %s", fileName)
}
defer func() {
contract.IgnoreClose(f)
contract.IgnoreError(os.RemoveAll(tempDir))
}()
// Unmarshal the Deployment
var untypedDeployment apitype.UntypedDeployment
if err = json.NewDecoder(f).Decode(&untypedDeployment); err != nil {
return err
}
var deployment apitype.DeploymentV3
if err = json.Unmarshal(untypedDeployment.Deployment, &deployment); err != nil {
return err
}
// Get the root resource and outputs from the deployment
var rootResource apitype.ResourceV3
var outputs map[string]interface{}
for _, res := range deployment.Resources {
if res.Type == resource.RootStackType {
rootResource = res
outputs = res.Outputs
}
}
// Read the event log.
eventsFile, err := os.Open(pt.eventLog)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "expected to be able to open event log file %s", pt.eventLog)
}
defer contract.IgnoreClose(eventsFile)
decoder, events := json.NewDecoder(eventsFile), []apitype.EngineEvent{}
for {
var event apitype.EngineEvent
if err = decoder.Decode(&event); err != nil {
if err == io.EOF {
break
}
return errors.Wrapf(err, "decoding engine event")
}
events = append(events, event)
}
// Populate stack info object with all of this data to pass to the validation function
stackInfo := RuntimeValidationStackInfo{
StackName: pt.opts.GetStackName(),
Deployment: &deployment,
RootResource: rootResource,
Outputs: outputs,
Events: events,
}
fprintf(pt.opts.Stdout, "Performing extra runtime validation.\n")
extraRuntimeValidation(pt.t, stackInfo)
fprintf(pt.opts.Stdout, "Extra runtime validation complete.\n")
return nil
}
// copyTestToTemporaryDirectory creates a temporary directory to run the test in and copies the test to it.
func (pt *ProgramTester) copyTestToTemporaryDirectory() (string, string, error) {
// Get the source dir and project info.
sourceDir := pt.opts.Dir
projinfo, err := pt.getProjinfo(sourceDir)
if err != nil {
return "", "", err
}
// Set up a prefix so that all output has the test directory name in it. This is important for debugging
// because we run tests in parallel, and so all output will be interleaved and difficult to follow otherwise.
var prefix string
if len(sourceDir) <= 30 {
prefix = fmt.Sprintf("[ %30.30s ] ", sourceDir)
} else {
prefix = fmt.Sprintf("[ %30.30s ] ", sourceDir[len(sourceDir)-30:])
}
stdout := pt.opts.Stdout
if stdout == nil {
stdout = newPrefixer(os.Stdout, prefix)
pt.opts.Stdout = stdout
}
stderr := pt.opts.Stderr
if stderr == nil {
stderr = newPrefixer(os.Stderr, prefix)
pt.opts.Stderr = stderr
}
fprintf(pt.opts.Stdout, "sample: %v\n", sourceDir)
bin, err := pt.getBin()
if err != nil {
return "", "", err
}
fprintf(pt.opts.Stdout, "pulumi: %v\n", bin)
stackName := string(pt.opts.GetStackName())
// For most projects, we will copy to a temporary directory. For Go projects, however, we must create
// a folder structure that adheres to GOPATH requirements
var tmpdir, projdir string
if projinfo.Proj.Runtime.Name() == "go" {
targetDir, err := tools.CreateTemporaryGoFolder("stackName")
if err != nil {
return "", "", errors.Wrap(err, "Couldn't create temporary directory")
}
tmpdir = targetDir
projdir = targetDir
} else {
targetDir, tempErr := ioutil.TempDir("", stackName+"-")
if tempErr != nil {
return "", "", errors.Wrap(tempErr, "Couldn't create temporary directory")
}
tmpdir = targetDir
projdir = targetDir
}
// Copy the source project.
if copyErr := fsutil.CopyFile(tmpdir, sourceDir, nil); copyErr != nil {
return "", "", copyErr
}
projinfo.Root = projdir
err = pt.prepareProject(projinfo)
if err != nil {
return "", "", errors.Wrapf(err, "Failed to prepare %v", projdir)
}
fprintf(stdout, "projdir: %v\n", projdir)
return tmpdir, projdir, nil
}
func (pt *ProgramTester) getProjinfo(projectDir string) (*engine.Projinfo, error) {
// Load up the package so we know things like what language the project is.
projfile := filepath.Join(projectDir, workspace.ProjectFile+".yaml")
proj, err := workspace.LoadProject(projfile)
if err != nil {
return nil, err
}
return &engine.Projinfo{Proj: proj, Root: projectDir}, nil
}
// prepareProject runs setup necessary to get the project ready for `pulumi` commands.
func (pt *ProgramTester) prepareProject(projinfo *engine.Projinfo) error {
// Based on the language, invoke the right routine to prepare the target directory.
switch rt := projinfo.Proj.Runtime.Name(); rt {
case NodeJSRuntime:
return pt.prepareNodeJSProject(projinfo)
case PythonRuntime:
return pt.preparePythonProject(projinfo)
case GoRuntime:
return pt.prepareGoProject(projinfo)
case DotNetRuntime:
return pt.prepareDotNetProject(projinfo)
default:
return errors.Errorf("unrecognized project runtime: %s", rt)
}
}
// prepareProjectDir runs setup necessary to get the project ready for `pulumi` commands.
func (pt *ProgramTester) prepareProjectDir(projectDir string) error {
projinfo, err := pt.getProjinfo(projectDir)
if err != nil {
return err
}
return pt.prepareProject(projinfo)
}
// prepareNodeJSProject runs setup necessary to get a Node.js project ready for `pulumi` commands.
func (pt *ProgramTester) prepareNodeJSProject(projinfo *engine.Projinfo) error {
if err := pulumi_testing.WriteYarnRCForTest(projinfo.Root); err != nil {
return err
}
// Get the correct pwd to run Yarn in.
cwd, _, err := projinfo.GetPwdMain()
if err != nil {
return err
}
// If the test requested some packages to be overridden, we do two things. First, if the package is listed as a
// direct dependency of the project, we change the version constraint in the package.json. For transitive
// dependeices, we use yarn's "resolutions" feature to force them to a specific version.
if len(pt.opts.Overrides) > 0 {
packageJSON, err := readPackageJSON(cwd)
if err != nil {
return err
}
resolutions := make(map[string]interface{})
for packageName, packageVersion := range pt.opts.Overrides {
for _, section := range []string{"dependencies", "devDependencies"} {
if _, has := packageJSON[section]; has {
entry := packageJSON[section].(map[string]interface{})
if _, has := entry[packageName]; has {
entry[packageName] = packageVersion
}
}
}
fprintf(pt.opts.Stdout, "adding resolution for %s to version %s\n", packageName, packageVersion)
resolutions["**/"+packageName] = packageVersion
}
// Wack any existing resolutions section with our newly computed one.
packageJSON["resolutions"] = resolutions
if err := writePackageJSON(cwd, packageJSON); err != nil {
return err
}
}
// Now ensure dependencies are present.
if err = pt.runYarnCommand("yarn-install", []string{"install"}, cwd); err != nil {
return err
}
if !pt.opts.RunUpdateTest {
if err = pt.yarnLinkPackageDeps(cwd); err != nil {
return err
}
}
if pt.opts.RunBuild {
// And finally compile it using whatever build steps are in the package.json file.
if err = pt.runYarnCommand("yarn-build", []string{"run", "build"}, cwd); err != nil {
return err
}
}
return nil
}
// readPackageJSON unmarshals the package.json file located in pathToPackage.
func readPackageJSON(pathToPackage string) (map[string]interface{}, error) {
f, err := os.Open(filepath.Join(pathToPackage, "package.json"))
if err != nil {
return nil, errors.Wrap(err, "opening package.json")
}
defer contract.IgnoreClose(f)
var ret map[string]interface{}
if err := json.NewDecoder(f).Decode(&ret); err != nil {
return nil, errors.Wrap(err, "decoding package.json")
}
return ret, nil
}
func writePackageJSON(pathToPackage string, metadata map[string]interface{}) error {
// os.Create truncates the already existing file.
f, err := os.Create(filepath.Join(pathToPackage, "package.json"))
if err != nil {
return errors.Wrap(err, "opening package.json")
}
defer contract.IgnoreClose(f)
encoder := json.NewEncoder(f)
encoder.SetIndent("", " ")
return errors.Wrap(encoder.Encode(metadata), "writing package.json")
}
// preparePythonProject runs setup necessary to get a Python project ready for `pulumi` commands.
func (pt *ProgramTester) preparePythonProject(projinfo *engine.Projinfo) error {
cwd, _, err := projinfo.GetPwdMain()
if err != nil {
return err
}
if pt.opts.UseAutomaticVirtualEnv {
if err = pt.runPythonCommand("python-venv", []string{"-m", "venv", "venv"}, cwd); err != nil {
return err
}
projinfo.Proj.Runtime.SetOption("virtualenv", "venv")
projfile := filepath.Join(projinfo.Root, workspace.ProjectFile+".yaml")
if err = projinfo.Proj.Save(projfile); err != nil {
return errors.Wrap(err, "saving project")
}
if err := pt.runVirtualEnvCommand("virtualenv-pip-install",
[]string{"pip", "install", "-r", "requirements.txt"}, cwd); err != nil {
return err
}
} else {
if err = pt.preparePythonProjectWithPipenv(cwd); err != nil {
return err
}
}
if !pt.opts.RunUpdateTest {
if err = pt.installPipPackageDeps(cwd); err != nil {
return err
}
}
return nil
}
func (pt *ProgramTester) preparePythonProjectWithPipenv(cwd string) error {
// Create a new Pipenv environment. This bootstraps a new virtual environment containing the version of Python that
// we requested. Note that this version of Python is sourced from the machine, so you must first install the version
// of Python that you are requesting on the host machine before building a virtualenv for it.
pythonVersion := "3"
if runtime.GOOS == windowsOS {
// Due to https://bugs.python.org/issue34679, Python Dynamic Providers on Windows do not
// work on Python 3.8.0 (but are fixed in 3.8.1). For now we will force Windows to use 3.7
// to avoid this bug, until 3.8.1 is available in all our CI systems.
pythonVersion = "3.7"
}
if err := pt.runPipenvCommand("pipenv-new", []string{"--python", pythonVersion}, cwd); err != nil {
return err
}
// Install the package's dependencies. We do this by running `pip` inside the virtualenv that `pipenv` has created.
// We don't use `pipenv install` because we don't want a lock file and prefer the similar model of `pip install`
// which matches what our customers do
err := pt.runPipenvCommand("pipenv-install", []string{"run", "pip", "install", "-r", "requirements.txt"}, cwd)
if err != nil {
return err
}
return nil
}
// YarnLinkPackageDeps bring in package dependencies via yarn
func (pt *ProgramTester) yarnLinkPackageDeps(cwd string) error {
for _, dependency := range pt.opts.Dependencies {
if err := pt.runYarnCommand("yarn-link", []string{"link", dependency}, cwd); err != nil {
return err
}
}
return nil
}
// InstallPipPackageDeps brings in package dependencies via pip install
func (pt *ProgramTester) installPipPackageDeps(cwd string) error {
var err error
for _, dep := range pt.opts.Dependencies {
// If the given filepath isn't absolute, make it absolute. We're about to pass it to pipenv and pipenv is
// operating inside of a random folder in /tmp.
if !filepath.IsAbs(dep) {
dep, err = filepath.Abs(dep)
if err != nil {
return err
}
}
if pt.opts.UseAutomaticVirtualEnv {
if err := pt.runVirtualEnvCommand("virtualenv-pip-install-package",
[]string{"pip", "install", "-e", dep}, cwd); err != nil {
return err
}
} else {
if err := pt.runPipenvCommand("pipenv-install-package",
[]string{"run", "pip", "install", "-e", dep}, cwd); err != nil {
return err
}
}
}
return nil
}
func getVirtualenvBinPath(cwd, bin string) (string, error) {
virtualenvBinPath := filepath.Join(cwd, "venv", "bin", bin)
if runtime.GOOS == windowsOS {
virtualenvBinPath = filepath.Join(cwd, "venv", "Scripts", fmt.Sprintf("%s.exe", bin))
}
if info, err := os.Stat(virtualenvBinPath); err != nil || info.IsDir() {
return "", errors.Errorf("Expected %s to exist in virtual environment at %q", bin, virtualenvBinPath)
}
return virtualenvBinPath, nil
}
// getSanitizedPkg strips the version string from a go dep
// Note: most of the pulumi modules don't use major version subdirectories for modules
func getSanitizedModulePath(pkg string) string {
re := regexp.MustCompile(`v\d`)
v := re.FindString(pkg)
if v != "" {
return strings.TrimSuffix(strings.Replace(pkg, v, "", -1), "/")
}
return pkg
}
func getRewritePath(pkg string, gopath string, depRoot string) string {
var depParts []string
sanitizedPkg := getSanitizedModulePath(pkg)
splitPkg := strings.Split(sanitizedPkg, "/")
if depRoot != "" {
// Get the package name
// This is the value after "github.com/foo/bar"
repoName := splitPkg[2]
basePath := splitPkg[len(splitPkg)-1]
if basePath == repoName {
depParts = append([]string{depRoot, repoName})
} else {
depParts = append([]string{depRoot, repoName, basePath})
}
return filepath.Join(depParts...)
}
depParts = append([]string{gopath, "src"}, splitPkg...)
return filepath.Join(depParts...)
}
// prepareGoProject runs setup necessary to get a Go project ready for `pulumi` commands.
func (pt *ProgramTester) prepareGoProject(projinfo *engine.Projinfo) error {
// Go programs are compiled, so we will compile the project first.
goBin, err := pt.getGoBin()
if err != nil {
return errors.Wrap(err, "locating `go` binary")
}
// Ensure GOPATH is known.
gopath := os.Getenv("GOPATH")
if gopath == "" {
usr, userErr := user.Current()
if userErr != nil {
return userErr
}
gopath = filepath.Join(usr.HomeDir, "go")
}
depRoot := os.Getenv("PULUMI_GO_DEP_ROOT")
cwd, _, err := projinfo.GetPwdMain()
if err != nil {
return err
}
// initialize a go.mod for dependency resolution if one doesn't exist
_, err = os.Stat(filepath.Join(cwd, "go.mod"))
if err != nil {
err = pt.runCommand("go-mod-init", []string{goBin, "mod", "init"}, cwd)
if err != nil {
return err
}
}
// initial tidy to resolve dependencies
err = pt.runCommand("go-mod-tidy", []string{goBin, "mod", "tidy"}, cwd)
if err != nil {
return err
}
// link local dependencies
for _, pkg := range pt.opts.Dependencies {
dep := getRewritePath(pkg, gopath, depRoot)
editStr := fmt.Sprintf("%s=%s", pkg, dep)
err = pt.runCommand("go-mod-edit", []string{goBin, "mod", "edit", "-replace", editStr}, cwd)
if err != nil {
return err
}
}
// resolve dependencies
err = pt.runCommand("go-mod-download", []string{goBin, "mod", "download"}, cwd)
if err != nil {
return err
}
if pt.opts.RunBuild {
outBin := filepath.Join(gopath, "bin", string(projinfo.Proj.Name))
if runtime.GOOS == windowsOS {
outBin = fmt.Sprintf("%s.exe", outBin)
}
err = pt.runCommand("go-build", []string{goBin, "build", "-o", outBin, "."}, cwd)
if err != nil {
return err
}
_, err = os.Stat(outBin)
if err != nil {
return fmt.Errorf("error finding built application artifact: %w", err)
}
}
return nil
}
// prepareDotNetProject runs setup necessary to get a .NET project ready for `pulumi` commands.
func (pt *ProgramTester) prepareDotNetProject(projinfo *engine.Projinfo) error {
dotNetBin, err := pt.getDotNetBin()
if err != nil {
return errors.Wrap(err, "locating `dotnet` binary")
}
cwd, _, err := projinfo.GetPwdMain()
if err != nil {
return err
}
localNuget := os.Getenv("PULUMI_LOCAL_NUGET")
if localNuget == "" {
localNuget = "/opt/pulumi/nuget"
}
for _, dep := range pt.opts.Dependencies {
// dotnet add package requires a specific version in case of a pre-release, so we have to look it up.
matches, err := filepath.Glob(filepath.Join(localNuget, dep+".?.*.nupkg"))
if err != nil {
return errors.Wrap(err, "failed to find a local Pulumi NuGet package")
}
if len(matches) != 1 {
return errors.New(fmt.Sprintf("attempting to find a local Pulumi NuGet package yielded %v results", matches))
}
file := filepath.Base(matches[0])
r := strings.NewReplacer(dep+".", "", ".nupkg", "")
version := r.Replace(file)
err = pt.runCommand("dotnet-add-package",
[]string{dotNetBin, "add", "package", dep, "-s", localNuget, "-v", version}, cwd)
if err != nil {
return errors.Wrapf(err, "failed to add dependency on %s", dep)
}
}
return nil
}
| [
"\"PULUMI_TEST_DEBUG_LOG_LEVEL\"",
"\"PULUMI_TEST_DEBUG_UPDATES\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_ACCESS_TOKEN\"",
"\"PULUMI_TEST_REPORT_CONFIG\"",
"\"PULUMI_TEST_TRACE_ENDPOINT\"",
"\"PULUMI_FAILED_TESTS_DIR\"",
"\"PULUMI_API\"",
"\"PULUMI_ACCESS_TOKEN\"",
"\"GOPATH\"",
"\"PULUMI_GO_DEP_ROOT\"",
"\"PULUMI_LOCAL_NUGET\""
]
| []
| [
"PULUMI_ACCESS_TOKEN",
"PULUMI_FAILED_TESTS_DIR",
"PULUMI_TEST_DEBUG_UPDATES",
"PULUMI_API",
"PULUMI_GO_DEP_ROOT",
"PULUMI_LOCAL_NUGET",
"PULUMI_TEST_OWNER",
"GOPATH",
"PULUMI_TEST_DEBUG_LOG_LEVEL",
"PULUMI_TEST_REPORT_CONFIG",
"PULUMI_TEST_TRACE_ENDPOINT"
]
| [] | ["PULUMI_ACCESS_TOKEN", "PULUMI_FAILED_TESTS_DIR", "PULUMI_TEST_DEBUG_UPDATES", "PULUMI_API", "PULUMI_GO_DEP_ROOT", "PULUMI_LOCAL_NUGET", "PULUMI_TEST_OWNER", "GOPATH", "PULUMI_TEST_DEBUG_LOG_LEVEL", "PULUMI_TEST_REPORT_CONFIG", "PULUMI_TEST_TRACE_ENDPOINT"] | go | 11 | 0 | |
tensorflow/python/kernel_tests/collective_ops_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 Collective Operations."""
import os
import threading
import time
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.experimental.ops import testing as dataset_testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import collective_ops as _collective_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class CollectiveOpsV1(object):
all_reduce = _collective_ops.all_reduce
all_gather = _collective_ops.all_gather
broadcast_send = _collective_ops.broadcast_send
broadcast_recv = _collective_ops.broadcast_recv
class CollectiveOpsV2(object):
@staticmethod
def all_reduce(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_reduce_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def all_gather(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_gather_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def broadcast_send(t, shape, dtype, group_size, group_key, instance_key,
*args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.broadcast_send_v2(t, group_size, group_key,
instance_key, *args, **kwargs)
@staticmethod
def broadcast_recv(shape, dtype, group_size, group_key, instance_key, *args,
**kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
shape = array_ops.identity(shape)
return _collective_ops.broadcast_recv_v2(shape, dtype, group_size,
group_key, instance_key, *args,
**kwargs)
device_combination = (
combinations.combine(device='CPU', communication='RING', required_gpus=0) +
combinations.combine(
device='GPU', communication=['RING', 'NCCL'], required_gpus=2))
collective_op_combinations = combinations.combine(collective_op=[
combinations.NamedObject('all_reduce', CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2', CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather', CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2', CollectiveOpsV2.all_gather)
])
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v1', CollectiveOpsV1),
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager'), device_combination))
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
def testGather(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_gather_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_gather_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_gather_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_gather_2devices():
self.assertAllClose(result, [1., 1.], rtol=1e-5, atol=1e-5)
def testBroadcast(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_broadcast_2devices():
shape = [3]
in_value = constant_op.constant([1., 2., 3.], shape=shape)
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.broadcast_send(
in_value,
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.broadcast_recv(
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
for result in run_broadcast_2devices():
self.assertAllClose(result, [1., 2., 3.], rtol=1e-5, atol=1e-5)
def testInstanceKeyScopedUnderGroupKey(self, collective_ops, device,
communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
dev2 = '/device:%s:2' % device
dev3 = '/device:%s:3' % device
@def_function.function
def run_all_reduce_4devices_same_instance_key():
# Use a common instance key for both groups.
instance_key = 0
# We will create 2 groups each with 2 devices.
group_size = 2
# Group 0 comprises dev0 and dev1.
group0_key = 0
# Group 1 comprises dev2 and dev3.
group1_key = 1
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(1.), group_size, group0_key, instance_key))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(2.), group_size, group0_key, instance_key))
with ops.device(dev2):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(3.), group_size, group1_key, instance_key))
with ops.device(dev3):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(4.), group_size, group1_key, instance_key))
return collectives
results = run_all_reduce_4devices_same_instance_key()
self.assertAllClose(results[0], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[2], 7., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[3], 7., rtol=1e-5, atol=1e-5)
def testCollectiveGroupSizeOne(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key=100,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with ops.device(dev0):
gathered_tensor = collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key=200,
communication_hint=communication)
self.assertAllEqual(in_value, gathered_tensor.numpy())
def testCollectiveInvalidKey(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
instance_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with self.assertRaisesRegex(
errors.InternalError, 'instance 100 expected type 0 and data_type 1 but'
' got type 2 and data_type 1'):
with ops.device(dev0):
collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def testMultipleGroups(self, collective_ops, device, communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
num_elements = 4
@def_function.function
def run_all_reduce(group_size, group_key):
instance_key = group_key
input_value = [float(group_key) for i in range(num_elements)]
collectives = []
for device_idx in range(group_size):
with ops.device('/{}:{}'.format(device, device_idx)):
input_tensor = constant_op.constant(input_value)
collectives.append(
collective_ops.all_reduce(
input_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
def run_and_assert(group_size, group_key):
for reduced_tensor in run_all_reduce(group_size, group_key):
self.assertAllEqual(
[float(group_key) * group_size for i in range(num_elements)],
reduced_tensor.numpy())
run_and_assert(group_size=2, group_key=1)
run_and_assert(group_size=3, group_key=2)
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[combinations.NamedObject('v2', CollectiveOpsV2)],
mode='eager',
max_subdivs_per_device=[-1, 0, 16]), device_combination))
class AllReduceWithSubdivisionsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication,
max_subdivs_per_device):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
if max_subdivs_per_device == -1:
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
else:
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication,
max_subdivs_per_device=max_subdivs_per_device)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
@combinations.generate(
combinations.combine(required_physical_gpus=2, mode='eager'))
class XlaTest(test.TestCase, parameterized.TestCase):
def testReduce(self):
device0 = '/device:GPU:0'
device1 = '/device:GPU:1'
group_size = 2
group_key = 100
instance_key = 100
results = []
def all_reduce(device):
@def_function.function(jit_compile=True)
def f():
return _collective_ops.all_reduce_v2([1.], group_size, group_key,
instance_key)
with ops.device(device):
results.append(f())
t0 = threading.Thread(target=all_reduce, args=(device0,))
t1 = threading.Thread(target=all_reduce, args=(device1,))
t0.start()
t1.start()
t0.join()
t1.join()
self.assertAllEqual(results, [[2.], [2.]])
@combinations.generate(
combinations.times(collective_op_combinations, device_combination))
class AbortCollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testAbortGroupParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
def testAbortInstanceParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# First perform a normal all-reduce to complete the group resolution.
def_function.function(collective_fn)()
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
# Use a different instance key to trigger another instance resolution.
instance_key = 101
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
context._reset_context() # pylint: disable=protected-access
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def_function.function(collective_fn)()
def testAbortCommunication(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# First perform a normal collective to finish resolution.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
# Launch a collective that hangs, and abort the collective executor after
# the launch.
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Reset the context in order to reset the collective executor.
t.join()
_setup_context()
def_function.function(collective_fn)()
class OpCancellationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortIfNoCollective(self, collective_op, device,
communication):
# Do not abort if there's no active collective ops. There could be
# exceptions like EOF which we expect users to catch, aborting collective
# ops on all op errors intervenes with this workflow.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
dataset = dataset_ops.Dataset.from_tensors([1.])
@def_function.function
def collective_fn(in_tensor):
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def f():
iterator = iter(dataset)
collective_fn(next(iterator))
# This next(iterator) should raise EOF.
collective_fn(next(iterator))
with self.assertRaises(errors.OutOfRangeError):
f()
collective_fn(constant_op.constant([1.]))
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
],
mode='eager'), device_combination))
def testOpErrorAbortWithCollective(self, collective_op, device,
communication):
# Abort v1 collective ops if there're active collective ops at the time of
# an op error. This is due to the inability to cancel collective ops, and op
# errors may cause running collective ops to hang.
dev0 = '/device:%s:0' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test abortion
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Now collective ops is aborted, subsequent collective ops should fail with
# the previous error.
with self.assertRaises(errors.CancelledError):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortWithCollective(self, collective_op, device,
communication):
# Do not abort v2 collective ops even if there're active collective ops at
# the time of an op error. We rely cancellation to terminate active
# collective ops.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
@def_function.function
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Local params resolution cannot be cancelled yet, so we perform a normal
# collective so that the group is resolved.
collective_fn()
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test cancellation
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Collective ops shouldn't be aborted and new collectives should be able to
# proceed.
collective_fn()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testCancelDuringParamResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
t1_cancellation_manager = cancellation.CancellationManager()
t2_cancellation_manager = cancellation.CancellationManager()
@def_function.function
def _collective_fn(x):
# Run an assertion to crash one of the two function executions running
# collectives. We explicitly cancel the other in response.
assert_op = check_ops.assert_equal(x, in_tensor)
with ops.control_dependencies([assert_op]):
return collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
collective_concrete = _collective_fn.get_concrete_function(in_tensor)
finish_mu = threading.Lock()
finishes = 0
def _placement_wrapper(device, x, my_cancellation, other_cancellation):
try:
with ops.device(device):
cancelable_collective = my_cancellation.get_cancelable_function(
collective_concrete)
return cancelable_collective(x)
except errors.InvalidArgumentError:
# `assert_equal` failed for this execution of the function. The other
# function would deadlock without cancellation.
other_cancellation.start_cancel()
except errors.CancelledError:
pass
nonlocal finishes
with finish_mu:
finishes += 1
t1 = threading.Thread(
target=_placement_wrapper,
args=(dev0, constant_op.constant([1.]), t1_cancellation_manager,
t2_cancellation_manager))
t2 = threading.Thread(
target=_placement_wrapper,
# Will cause the assertion to fail
args=(dev1, constant_op.constant([2.]), t2_cancellation_manager,
t1_cancellation_manager))
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(finishes, 2)
@combinations.generate(
combinations.times(collective_op_combinations, device_combination))
class TimeoutTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testTimeout(self, collective_op, device, communication):
timeout = 1.5
@def_function.function
def run(group_size, reported_group_size=None):
group_key = 20
instance_key = 30
tensor = [1., 2., 3., 4.]
results = []
if reported_group_size is None:
reported_group_size = group_size
for i in range(group_size):
with ops.device('/{}:{}'.format(device, i)):
input_data = constant_op.constant(tensor)
result = collective_op(
input_data,
group_size=reported_group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
results.append(result)
return results
run(2, 2)
start_time = time.time()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
run(1, 2)
elapsed = time.time() - start_time
self.assertAllGreaterEqual(elapsed, timeout)
def testParamResolutionAfterTimeout(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
# This timeout comes from param solution.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev1):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def testExecutionAfterTimeout(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# Run a normal all-reduce to complete param resolution.
run()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev1):
# No timeout.
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
class CommunicationHintTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
@combinations.generate(
combinations.times(collective_op_combinations,
combinations.combine(required_gpus=[0, 1])))
def testNCCLFallbackOnCPU(self, collective_op):
# communication_hint=NCCL should work for CPU by falling back to RING. The
# test doesn't actually require GPU, only GPU builds. We specify
# required_gpus=1 so that it's tested with GPU builds.
dev0 = '/device:CPU:0'
dev1 = '/device:CPU:1'
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint='NCCL')
run()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class OrderingTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testOrdering(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
with ops.device(dev0):
token0 = resource_variable_ops.ResourceVariable(0.)
with ops.device(dev1):
token1 = resource_variable_ops.ResourceVariable(0.)
@def_function.function
def f():
# Launch the first collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
# Launch the second collective without token.
with ops.device(dev0):
collective_op(in_tensor, group_size, group_key, instance_key)
with ops.device(dev1):
collective_op(in_tensor, group_size, group_key, instance_key)
# Launch the third collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
graph = f.get_concrete_function().graph
for device in [dev0, dev1]:
# Try to find the third collective, which should have the first collective
# as a control input.
third = None
for op in graph.get_operations():
if (op.type.startswith('Collective') and op.device.endswith(device) and
op.control_inputs and
op.control_inputs[0].type.startswith('Collective')):
self.assertIsNone(third)
third = op
self.assertIsNotNone(third)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in third.inputs))
first = third.control_inputs[0]
self.assertEqual(third.device, first.device)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in first.inputs))
self.assertEmpty(first.control_inputs)
class InputPipelineTest(test.TestCase):
def setUp(self):
super().setUp()
_setup_context()
def testMap(self):
group_size = 2
group_key = 100
instance_key = 100
def create_dataset_and_fetch_one(t):
dataset = dataset_ops.Dataset.from_tensor_slices([t])
def reduce_fn(t):
return CollectiveOpsV2.all_reduce(
t,
group_size=group_size,
group_key=group_key,
instance_key=instance_key)
dataset = dataset.map(reduce_fn)
return next(iter(dataset))
@def_function.function
def f():
with ops.device('CPU:0'):
value0 = create_dataset_and_fetch_one([1.])
with ops.device('CPU:1'):
value1 = create_dataset_and_fetch_one([2.])
return value0, value1
self.assertAllEqual(self.evaluate(f()), [[3.], [3.]])
@combinations.generate(
combinations.times(
combinations.combine(collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather)
]), device_combination))
class InvalidInputTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testInvalidGroupKey(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
group_size = 2
group_key = [100]
instance_key = 100
in_tensor = constant_op.constant([1.])
with self.assertRaises(errors.InvalidArgumentError):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def testInvalidGroupSize(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
group_size = -2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
with self.assertRaises(errors.InvalidArgumentError):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def testInvalidInstanceKey(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
group_size = 2
group_key = 100
instance_key = [100]
in_tensor = constant_op.constant([1.])
with self.assertRaises(errors.InvalidArgumentError):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
class CollectiveOpsV3Test(test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
_setup_context()
def testGroupInitialization(self):
group_size = 2
group_key = 100
@def_function.function
def f():
with ops.device('CPU:0'):
_collective_ops.initialize_communicator(
group_key=group_key, rank=0, group_size=group_size)
with ops.device('CPU:1'):
_collective_ops.initialize_communicator(
group_key=group_key, rank=1, group_size=group_size)
# TODO(b/193864859): Add validation with reduction op.
self.evaluate(f())
@combinations.generate(device_combination)
def testAllReduceV3(self, device, communication):
group_size = 2
group_key = 101
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_2devices():
collectives = []
with ops.device(dev0):
group_handle0 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=0,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_reduce_v3(
group_handle0, [1.0], reduction='Add'))
with ops.device(dev1):
group_handle1 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=1,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_reduce_v3(
group_handle1, [2.0], reduction='Add'))
return collectives
for result in run_all_reduce_2devices():
self.assertAllClose(result, [3.], rtol=1e-5, atol=1e-5)
@combinations.generate(device_combination)
def testAllToAllV3(self, device, communication):
group_size = 2
group_key = 104
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_to_all_2devices():
collectives = []
with ops.device(dev0):
group_handle0 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=0,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle0, [1.0, 3.0]))
with ops.device(dev1):
group_handle1 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=1,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle1, [2.0, 4.0]))
return collectives
result = run_all_to_all_2devices()
self.assertAllClose(result[0], [1.0, 2.0], rtol=1e-5, atol=1e-5)
self.assertAllClose(result[1], [3.0, 4.0], rtol=1e-5, atol=1e-5)
@combinations.generate(device_combination)
def testAllToAllV3DifferentUserRank(self, device, communication):
group_size = 2
group_key = 105
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_to_all_2devices():
collectives = []
with ops.device(dev0):
group_handle0 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=1,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle0, [1.0, 3.0]))
with ops.device(dev1):
group_handle1 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=0,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle1, [2.0, 4.0]))
return collectives
result = run_all_to_all_2devices()
self.assertAllClose(result[0], [2.0, 1.0], rtol=1e-5, atol=1e-5)
self.assertAllClose(result[1], [4.0, 3.0], rtol=1e-5, atol=1e-5)
@combinations.generate(device_combination)
def testAllToAllV3DifferentUserRankWithTensorInput(self, device,
communication):
group_size = 2
group_key = 106
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_to_all_2devices():
collectives = []
with ops.device(dev0):
group_handle0 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=1,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle0,
constant_op.constant([1.0, 2.0])))
with ops.device(dev1):
group_handle1 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=0,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle1,
constant_op.constant([3.0, 4.0])))
return collectives
result = run_all_to_all_2devices()
# FIXME(b/214407359): This is correct.
# result[0] is rank 1 and shall have 4, 2.
self.assertAllClose(result[1], [4.0, 2.0], rtol=1e-5, atol=1e-5)
self.assertAllClose(result[0], [3.0, 1.0], rtol=1e-5, atol=1e-5)
def _setup_context():
context._reset_context()
test_util.set_logical_devices_to_at_least('CPU', 4)
context.ensure_initialized()
context.set_log_device_placement(True)
if __name__ == '__main__':
os.environ['NCCL_DEBUG'] = 'INFO'
v2_compat.enable_v2_behavior()
test.main()
| []
| []
| [
"NCCL_DEBUG"
]
| [] | ["NCCL_DEBUG"] | python | 1 | 0 | |
test/dialect/test_oracle.py | # coding: utf-8
from sqlalchemy.testing import eq_
from sqlalchemy import *
from sqlalchemy import types as sqltypes, exc, schema
from sqlalchemy.sql import table, column
from sqlalchemy.testing import fixtures, AssertsExecutionResults, AssertsCompiledSQL
from sqlalchemy import testing
from sqlalchemy.util import u, b
from sqlalchemy import util
from sqlalchemy.testing import assert_raises, assert_raises_message
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.dialects.oracle import cx_oracle, base as oracle
from sqlalchemy.engine import default
import decimal
from sqlalchemy.testing.schema import Table, Column
import datetime
import os
from sqlalchemy import sql
from sqlalchemy.testing.mock import Mock
class OutParamTest(fixtures.TestBase, AssertsExecutionResults):
__only_on__ = 'oracle+cx_oracle'
@classmethod
def setup_class(cls):
testing.db.execute("""
create or replace procedure foo(x_in IN number, x_out OUT number,
y_out OUT number, z_out OUT varchar) IS
retval number;
begin
retval := 6;
x_out := 10;
y_out := x_in * 15;
z_out := NULL;
end;
""")
def test_out_params(self):
result = testing.db.execute(text('begin foo(:x_in, :x_out, :y_out, '
':z_out); end;',
bindparams=[bindparam('x_in', Float),
outparam('x_out', Integer),
outparam('y_out', Float),
outparam('z_out', String)]), x_in=5)
eq_(result.out_parameters,
{'x_out': 10, 'y_out': 75, 'z_out': None})
assert isinstance(result.out_parameters['x_out'], int)
@classmethod
def teardown_class(cls):
testing.db.execute("DROP PROCEDURE foo")
class CXOracleArgsTest(fixtures.TestBase):
__only_on__ = 'oracle+cx_oracle'
def test_autosetinputsizes(self):
dialect = cx_oracle.dialect()
assert dialect.auto_setinputsizes
dialect = cx_oracle.dialect(auto_setinputsizes=False)
assert not dialect.auto_setinputsizes
def test_exclude_inputsizes_none(self):
dialect = cx_oracle.dialect(exclude_setinputsizes=None)
eq_(dialect.exclude_setinputsizes, set())
def test_exclude_inputsizes_custom(self):
import cx_Oracle
dialect = cx_oracle.dialect(dbapi=cx_Oracle,
exclude_setinputsizes=('NCLOB',))
eq_(dialect.exclude_setinputsizes, set([cx_Oracle.NCLOB]))
class QuotedBindRoundTripTest(fixtures.TestBase):
__only_on__ = 'oracle'
@testing.provide_metadata
def test_table_round_trip(self):
oracle.RESERVED_WORDS.remove('UNION')
metadata = self.metadata
table = Table("t1", metadata,
Column("option", Integer),
Column("plain", Integer, quote=True),
# test that quote works for a reserved word
# that the dialect isn't aware of when quote
# is set
Column("union", Integer, quote=True)
)
metadata.create_all()
table.insert().execute(
{"option": 1, "plain": 1, "union": 1}
)
eq_(
testing.db.execute(table.select()).first(),
(1, 1, 1)
)
table.update().values(option=2, plain=2, union=2).execute()
eq_(
testing.db.execute(table.select()).first(),
(2, 2, 2)
)
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "oracle" #oracle.dialect()
def test_true_false(self):
self.assert_compile(
sql.false(), "0"
)
self.assert_compile(
sql.true(),
"1"
)
def test_owner(self):
meta = MetaData()
parent = Table('parent', meta, Column('id', Integer,
primary_key=True), Column('name', String(50)),
schema='ed')
child = Table('child', meta, Column('id', Integer,
primary_key=True), Column('parent_id', Integer,
ForeignKey('ed.parent.id')), schema='ed')
self.assert_compile(parent.join(child),
'ed.parent JOIN ed.child ON ed.parent.id = '
'ed.child.parent_id')
def test_subquery(self):
t = table('sometable', column('col1'), column('col2'))
s = select([t])
s = select([s.c.col1, s.c.col2])
self.assert_compile(s, "SELECT col1, col2 FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 "
"AS col2 FROM sometable)")
def test_bindparam_quote(self):
"""test that bound parameters take on quoting for reserved words,
column names quote flag enabled."""
# note: this is only in cx_oracle at the moment. not sure
# what other hypothetical oracle dialects might need
self.assert_compile(
bindparam("option"), ':"option"'
)
self.assert_compile(
bindparam("plain"), ':plain'
)
t = Table("s", MetaData(), Column('plain', Integer, quote=True))
self.assert_compile(
t.insert().values(plain=5), 'INSERT INTO s ("plain") VALUES (:"plain")'
)
self.assert_compile(
t.update().values(plain=5), 'UPDATE s SET "plain"=:"plain"'
)
def test_limit(self):
t = table('sometable', column('col1'), column('col2'))
s = select([t])
c = s.compile(dialect=oracle.OracleDialect())
assert t.c.col1 in set(c.result_map['col1'][1])
s = select([t]).limit(10).offset(20)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, '
'col2, ROWNUM AS ora_rn FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable) WHERE ROWNUM <= '
':param_1 + :param_2) WHERE ora_rn > :param_2',
checkparams={'param_1': 10, 'param_2': 20})
c = s.compile(dialect=oracle.OracleDialect())
assert t.c.col1 in set(c.result_map['col1'][1])
s = select([s.c.col1, s.c.col2])
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, col2 '
'FROM (SELECT col1, col2, ROWNUM AS ora_rn '
'FROM (SELECT sometable.col1 AS col1, '
'sometable.col2 AS col2 FROM sometable) '
'WHERE ROWNUM <= :param_1 + :param_2) WHERE ora_rn > '
':param_2)',
checkparams={'param_1': 10, 'param_2': 20})
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, col2 '
'FROM (SELECT col1, col2, ROWNUM AS ora_rn '
'FROM (SELECT sometable.col1 AS col1, '
'sometable.col2 AS col2 FROM sometable) '
'WHERE ROWNUM <= :param_1 + :param_2) WHERE ora_rn > '
':param_2)')
s = select([t]).limit(10).offset(20).order_by(t.c.col2)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, '
'col2, ROWNUM AS ora_rn FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable ORDER BY '
'sometable.col2) WHERE ROWNUM <= '
':param_1 + :param_2) WHERE ora_rn > :param_2',
checkparams={'param_1': 10, 'param_2': 20}
)
s = select([t], for_update=True).limit(10).order_by(t.c.col2)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable ORDER BY '
'sometable.col2) WHERE ROWNUM <= :param_1 '
'FOR UPDATE')
s = select([t],
for_update=True).limit(10).offset(20).order_by(t.c.col2)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, '
'col2, ROWNUM AS ora_rn FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable ORDER BY '
'sometable.col2) WHERE ROWNUM <= '
':param_1 + :param_2) WHERE ora_rn > :param_2 FOR '
'UPDATE')
def test_for_update(self):
table1 = table('mytable',
column('myid'), column('name'), column('description'))
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE")
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(of=table1.c.myid),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE OF mytable.myid")
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE NOWAIT")
self.assert_compile(
table1.select(table1.c.myid == 7).
with_for_update(nowait=True, of=table1.c.myid),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 "
"FOR UPDATE OF mytable.myid NOWAIT")
self.assert_compile(
table1.select(table1.c.myid == 7).
with_for_update(nowait=True, of=[table1.c.myid, table1.c.name]),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE OF "
"mytable.myid, mytable.name NOWAIT")
ta = table1.alias()
self.assert_compile(
ta.select(ta.c.myid == 7).
with_for_update(of=[ta.c.myid, ta.c.name]),
"SELECT mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM mytable mytable_1 "
"WHERE mytable_1.myid = :myid_1 FOR UPDATE OF "
"mytable_1.myid, mytable_1.name"
)
def test_limit_preserves_typing_information(self):
class MyType(TypeDecorator):
impl = Integer
stmt = select([type_coerce(column('x'), MyType).label('foo')]).limit(1)
dialect = oracle.dialect()
compiled = stmt.compile(dialect=dialect)
assert isinstance(compiled.result_map['foo'][-1], MyType)
def test_use_binds_for_limits_disabled(self):
t = table('sometable', column('col1'), column('col2'))
dialect = oracle.OracleDialect(use_binds_for_limits=False)
self.assert_compile(select([t]).limit(10),
"SELECT col1, col2 FROM (SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) WHERE ROWNUM <= 10",
dialect=dialect)
self.assert_compile(select([t]).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable)) WHERE ora_rn > 10",
dialect=dialect)
self.assert_compile(select([t]).limit(10).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) WHERE ROWNUM <= 20) WHERE ora_rn > 10",
dialect=dialect)
def test_use_binds_for_limits_enabled(self):
t = table('sometable', column('col1'), column('col2'))
dialect = oracle.OracleDialect(use_binds_for_limits=True)
self.assert_compile(select([t]).limit(10),
"SELECT col1, col2 FROM (SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) WHERE ROWNUM "
"<= :param_1",
dialect=dialect)
self.assert_compile(select([t]).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable)) WHERE ora_rn > :param_1",
dialect=dialect)
self.assert_compile(select([t]).limit(10).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) WHERE ROWNUM <= :param_1 + :param_2) "
"WHERE ora_rn > :param_2",
dialect=dialect,
checkparams={'param_1': 10, 'param_2': 10})
def test_long_labels(self):
dialect = default.DefaultDialect()
dialect.max_identifier_length = 30
ora_dialect = oracle.dialect()
m = MetaData()
a_table = Table(
'thirty_characters_table_xxxxxx',
m,
Column('id', Integer, primary_key=True)
)
other_table = Table(
'other_thirty_characters_table_',
m,
Column('id', Integer, primary_key=True),
Column('thirty_characters_table_id',
Integer,
ForeignKey('thirty_characters_table_xxxxxx.id'),
primary_key=True
)
)
anon = a_table.alias()
self.assert_compile(select([other_table,
anon]).
select_from(
other_table.outerjoin(anon)).apply_labels(),
'SELECT other_thirty_characters_table_.id '
'AS other_thirty_characters__1, '
'other_thirty_characters_table_.thirty_char'
'acters_table_id AS other_thirty_characters'
'__2, thirty_characters_table__1.id AS '
'thirty_characters_table__3 FROM '
'other_thirty_characters_table_ LEFT OUTER '
'JOIN thirty_characters_table_xxxxxx AS '
'thirty_characters_table__1 ON '
'thirty_characters_table__1.id = '
'other_thirty_characters_table_.thirty_char'
'acters_table_id', dialect=dialect)
self.assert_compile(select([other_table,
anon]).select_from(
other_table.outerjoin(anon)).apply_labels(),
'SELECT other_thirty_characters_table_.id '
'AS other_thirty_characters__1, '
'other_thirty_characters_table_.thirty_char'
'acters_table_id AS other_thirty_characters'
'__2, thirty_characters_table__1.id AS '
'thirty_characters_table__3 FROM '
'other_thirty_characters_table_ LEFT OUTER '
'JOIN thirty_characters_table_xxxxxx '
'thirty_characters_table__1 ON '
'thirty_characters_table__1.id = '
'other_thirty_characters_table_.thirty_char'
'acters_table_id', dialect=ora_dialect)
def test_outer_join(self):
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
table2 = table(
'myothertable',
column('otherid', Integer),
column('othername', String),
)
table3 = table(
'thirdtable',
column('userid', Integer),
column('otherstuff', String),
)
query = select([table1, table2], or_(table1.c.name == 'fred',
table1.c.myid == 10, table2.c.othername != 'jack',
'EXISTS (select yay from foo where boo = lar)'
), from_obj=[outerjoin(table1, table2,
table1.c.myid == table2.c.otherid)])
self.assert_compile(query,
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername FROM mytable, '
'myothertable WHERE (mytable.name = '
':name_1 OR mytable.myid = :myid_1 OR '
'myothertable.othername != :othername_1 OR '
'EXISTS (select yay from foo where boo = '
'lar)) AND mytable.myid = '
'myothertable.otherid(+)',
dialect=oracle.OracleDialect(use_ansi=False))
query = table1.outerjoin(table2, table1.c.myid
== table2.c.otherid).outerjoin(table3,
table3.c.userid == table2.c.otherid)
self.assert_compile(query.select(),
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername, '
'thirdtable.userid, thirdtable.otherstuff '
'FROM mytable LEFT OUTER JOIN myothertable '
'ON mytable.myid = myothertable.otherid '
'LEFT OUTER JOIN thirdtable ON '
'thirdtable.userid = myothertable.otherid')
self.assert_compile(query.select(),
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername, '
'thirdtable.userid, thirdtable.otherstuff '
'FROM mytable, myothertable, thirdtable '
'WHERE thirdtable.userid(+) = '
'myothertable.otherid AND mytable.myid = '
'myothertable.otherid(+)',
dialect=oracle.dialect(use_ansi=False))
query = table1.join(table2, table1.c.myid
== table2.c.otherid).join(table3,
table3.c.userid == table2.c.otherid)
self.assert_compile(query.select(),
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername, '
'thirdtable.userid, thirdtable.otherstuff '
'FROM mytable, myothertable, thirdtable '
'WHERE thirdtable.userid = '
'myothertable.otherid AND mytable.myid = '
'myothertable.otherid',
dialect=oracle.dialect(use_ansi=False))
query = table1.join(table2, table1.c.myid
== table2.c.otherid).outerjoin(table3,
table3.c.userid == table2.c.otherid)
self.assert_compile(query.select().order_by(table1.c.name).
limit(10).offset(5),
'SELECT myid, name, description, otherid, '
'othername, userid, otherstuff FROM '
'(SELECT myid, name, description, otherid, '
'othername, userid, otherstuff, ROWNUM AS '
'ora_rn FROM (SELECT mytable.myid AS myid, '
'mytable.name AS name, mytable.description '
'AS description, myothertable.otherid AS '
'otherid, myothertable.othername AS '
'othername, thirdtable.userid AS userid, '
'thirdtable.otherstuff AS otherstuff FROM '
'mytable, myothertable, thirdtable WHERE '
'thirdtable.userid(+) = '
'myothertable.otherid AND mytable.myid = '
'myothertable.otherid ORDER BY '
'mytable.name) WHERE ROWNUM <= :param_1 + :param_2) '
'WHERE ora_rn > :param_2',
checkparams={'param_1': 10, 'param_2': 5},
dialect=oracle.dialect(use_ansi=False))
subq = select([table1]).select_from(table1.outerjoin(table2,
table1.c.myid == table2.c.otherid)).alias()
q = select([table3]).select_from(table3.outerjoin(subq,
table3.c.userid == subq.c.myid))
self.assert_compile(q,
'SELECT thirdtable.userid, '
'thirdtable.otherstuff FROM thirdtable '
'LEFT OUTER JOIN (SELECT mytable.myid AS '
'myid, mytable.name AS name, '
'mytable.description AS description FROM '
'mytable LEFT OUTER JOIN myothertable ON '
'mytable.myid = myothertable.otherid) '
'anon_1 ON thirdtable.userid = anon_1.myid',
dialect=oracle.dialect(use_ansi=True))
self.assert_compile(q,
'SELECT thirdtable.userid, '
'thirdtable.otherstuff FROM thirdtable, '
'(SELECT mytable.myid AS myid, '
'mytable.name AS name, mytable.description '
'AS description FROM mytable, myothertable '
'WHERE mytable.myid = myothertable.otherid('
'+)) anon_1 WHERE thirdtable.userid = '
'anon_1.myid(+)',
dialect=oracle.dialect(use_ansi=False))
q = select([table1.c.name]).where(table1.c.name == 'foo')
self.assert_compile(q,
'SELECT mytable.name FROM mytable WHERE '
'mytable.name = :name_1',
dialect=oracle.dialect(use_ansi=False))
subq = select([table3.c.otherstuff]).where(table3.c.otherstuff
== table1.c.name).label('bar')
q = select([table1.c.name, subq])
self.assert_compile(q,
'SELECT mytable.name, (SELECT '
'thirdtable.otherstuff FROM thirdtable '
'WHERE thirdtable.otherstuff = '
'mytable.name) AS bar FROM mytable',
dialect=oracle.dialect(use_ansi=False))
def test_nonansi_nested_right_join(self):
a = table('a', column('a'))
b = table('b', column('b'))
c = table('c', column('c'))
j = a.join(b.join(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b AND b.b = c.c",
dialect=oracle.OracleDialect(use_ansi=False)
)
j = a.outerjoin(b.join(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b(+) AND b.b = c.c",
dialect=oracle.OracleDialect(use_ansi=False)
)
j = a.join(b.outerjoin(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b AND b.b = c.c(+)",
dialect=oracle.OracleDialect(use_ansi=False)
)
def test_alias_outer_join(self):
address_types = table('address_types', column('id'),
column('name'))
addresses = table('addresses', column('id'), column('user_id'),
column('address_type_id'),
column('email_address'))
at_alias = address_types.alias()
s = select([at_alias,
addresses]).select_from(addresses.outerjoin(at_alias,
addresses.c.address_type_id
== at_alias.c.id)).where(addresses.c.user_id
== 7).order_by(addresses.c.id, address_types.c.id)
self.assert_compile(s,
'SELECT address_types_1.id, '
'address_types_1.name, addresses.id, '
'addresses.user_id, addresses.address_type_'
'id, addresses.email_address FROM '
'addresses LEFT OUTER JOIN address_types '
'address_types_1 ON addresses.address_type_'
'id = address_types_1.id WHERE '
'addresses.user_id = :user_id_1 ORDER BY '
'addresses.id, address_types.id')
def test_returning_insert(self):
t1 = table('t1', column('c1'), column('c2'), column('c3'))
self.assert_compile(
t1.insert().values(c1=1).returning(t1.c.c2, t1.c.c3),
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"t1.c2, t1.c3 INTO :ret_0, :ret_1"
)
def test_returning_insert_functional(self):
t1 = table('t1', column('c1'), column('c2', String()), column('c3', String()))
fn = func.lower(t1.c.c2, type_=String())
stmt = t1.insert().values(c1=1).returning(fn, t1.c.c3)
compiled = stmt.compile(dialect=oracle.dialect())
eq_(
compiled.result_map,
{'ret_1': ('ret_1', (t1.c.c3, 'c3', 'c3'), t1.c.c3.type),
'ret_0': ('ret_0', (fn, 'lower', None), fn.type)}
)
self.assert_compile(
stmt,
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"lower(t1.c2), t1.c3 INTO :ret_0, :ret_1"
)
def test_returning_insert_labeled(self):
t1 = table('t1', column('c1'), column('c2'), column('c3'))
self.assert_compile(
t1.insert().values(c1=1).returning(
t1.c.c2.label('c2_l'), t1.c.c3.label('c3_l')),
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"t1.c2, t1.c3 INTO :ret_0, :ret_1"
)
def test_compound(self):
t1 = table('t1', column('c1'), column('c2'), column('c3'))
t2 = table('t2', column('c1'), column('c2'), column('c3'))
self.assert_compile(union(t1.select(), t2.select()),
'SELECT t1.c1, t1.c2, t1.c3 FROM t1 UNION '
'SELECT t2.c1, t2.c2, t2.c3 FROM t2')
self.assert_compile(except_(t1.select(), t2.select()),
'SELECT t1.c1, t1.c2, t1.c3 FROM t1 MINUS '
'SELECT t2.c1, t2.c2, t2.c3 FROM t2')
def test_no_paren_fns(self):
for fn, expected in [
(func.uid(), "uid"),
(func.UID(), "UID"),
(func.sysdate(), "sysdate"),
(func.row_number(), "row_number()"),
(func.rank(), "rank()"),
(func.now(), "CURRENT_TIMESTAMP"),
(func.current_timestamp(), "CURRENT_TIMESTAMP"),
(func.user(), "USER"),
]:
self.assert_compile(fn, expected)
def test_create_index_alt_schema(self):
m = MetaData()
t1 = Table('foo', m,
Column('x', Integer),
schema="alt_schema"
)
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x)),
"CREATE INDEX alt_schema.bar ON alt_schema.foo (x)"
)
def test_create_index_expr(self):
m = MetaData()
t1 = Table('foo', m,
Column('x', Integer)
)
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x > 5)),
"CREATE INDEX bar ON foo (x > 5)"
)
class CompatFlagsTest(fixtures.TestBase, AssertsCompiledSQL):
def _dialect(self, server_version, **kw):
def server_version_info(conn):
return server_version
dialect = oracle.dialect(
dbapi=Mock(version="0.0.0", paramstyle="named"),
**kw)
dialect._get_server_version_info = server_version_info
dialect._check_unicode_returns = Mock()
dialect._check_unicode_description = Mock()
dialect._get_default_schema_name = Mock()
return dialect
def test_ora8_flags(self):
dialect = self._dialect((8, 2, 5))
# before connect, assume modern DB
assert dialect._supports_char_length
assert dialect._supports_nchar
assert dialect.use_ansi
dialect.initialize(Mock())
assert not dialect.implicit_returning
assert not dialect._supports_char_length
assert not dialect._supports_nchar
assert not dialect.use_ansi
self.assert_compile(String(50), "VARCHAR2(50)", dialect=dialect)
self.assert_compile(Unicode(50), "VARCHAR2(50)", dialect=dialect)
self.assert_compile(UnicodeText(), "CLOB", dialect=dialect)
dialect = self._dialect((8, 2, 5), implicit_returning=True)
dialect.initialize(testing.db.connect())
assert dialect.implicit_returning
def test_default_flags(self):
"""test with no initialization or server version info"""
dialect = self._dialect(None)
assert dialect._supports_char_length
assert dialect._supports_nchar
assert dialect.use_ansi
self.assert_compile(String(50), "VARCHAR2(50 CHAR)", dialect=dialect)
self.assert_compile(Unicode(50), "NVARCHAR2(50)", dialect=dialect)
self.assert_compile(UnicodeText(), "NCLOB", dialect=dialect)
def test_ora10_flags(self):
dialect = self._dialect((10, 2, 5))
dialect.initialize(Mock())
assert dialect._supports_char_length
assert dialect._supports_nchar
assert dialect.use_ansi
self.assert_compile(String(50), "VARCHAR2(50 CHAR)", dialect=dialect)
self.assert_compile(Unicode(50), "NVARCHAR2(50)", dialect=dialect)
self.assert_compile(UnicodeText(), "NCLOB", dialect=dialect)
class MultiSchemaTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'oracle'
@classmethod
def setup_class(cls):
# currently assuming full DBA privs for the user.
# don't really know how else to go here unless
# we connect as the other user.
for stmt in """
create table test_schema.parent(
id integer primary key,
data varchar2(50)
);
create table test_schema.child(
id integer primary key,
data varchar2(50),
parent_id integer references test_schema.parent(id)
);
create table local_table(
id integer primary key,
data varchar2(50)
);
create synonym test_schema.ptable for test_schema.parent;
create synonym test_schema.ctable for test_schema.child;
create synonym test_schema_ptable for test_schema.parent;
create synonym test_schema.local_table for local_table;
-- can't make a ref from local schema to the
-- remote schema's table without this,
-- *and* cant give yourself a grant !
-- so we give it to public. ideas welcome.
grant references on test_schema.parent to public;
grant references on test_schema.child to public;
""".split(";"):
if stmt.strip():
testing.db.execute(stmt)
@classmethod
def teardown_class(cls):
for stmt in """
drop table test_schema.child;
drop table test_schema.parent;
drop table local_table;
drop synonym test_schema.ctable;
drop synonym test_schema.ptable;
drop synonym test_schema_ptable;
drop synonym test_schema.local_table;
""".split(";"):
if stmt.strip():
testing.db.execute(stmt)
@testing.provide_metadata
def test_create_same_names_explicit_schema(self):
schema = testing.db.dialect.default_schema_name
meta = self.metadata
parent = Table('parent', meta,
Column('pid', Integer, primary_key=True),
schema=schema
)
child = Table('child', meta,
Column('cid', Integer, primary_key=True),
Column('pid', Integer, ForeignKey('%s.parent.pid' % schema)),
schema=schema
)
meta.create_all()
parent.insert().execute({'pid': 1})
child.insert().execute({'cid': 1, 'pid': 1})
eq_(child.select().execute().fetchall(), [(1, 1)])
def test_reflect_alt_table_owner_local_synonym(self):
meta = MetaData(testing.db)
parent = Table('test_schema_ptable', meta, autoload=True,
oracle_resolve_synonyms=True)
self.assert_compile(parent.select(),
"SELECT test_schema_ptable.id, "
"test_schema_ptable.data FROM test_schema_ptable")
select([parent]).execute().fetchall()
def test_reflect_alt_synonym_owner_local_table(self):
meta = MetaData(testing.db)
parent = Table('local_table', meta, autoload=True,
oracle_resolve_synonyms=True, schema="test_schema")
self.assert_compile(parent.select(),
"SELECT test_schema.local_table.id, "
"test_schema.local_table.data FROM test_schema.local_table")
select([parent]).execute().fetchall()
@testing.provide_metadata
def test_create_same_names_implicit_schema(self):
meta = self.metadata
parent = Table('parent', meta,
Column('pid', Integer, primary_key=True),
)
child = Table('child', meta,
Column('cid', Integer, primary_key=True),
Column('pid', Integer, ForeignKey('parent.pid')),
)
meta.create_all()
parent.insert().execute({'pid': 1})
child.insert().execute({'cid': 1, 'pid': 1})
eq_(child.select().execute().fetchall(), [(1, 1)])
def test_reflect_alt_owner_explicit(self):
meta = MetaData(testing.db)
parent = Table('parent', meta, autoload=True, schema='test_schema')
child = Table('child', meta, autoload=True, schema='test_schema')
self.assert_compile(parent.join(child),
"test_schema.parent JOIN test_schema.child ON "
"test_schema.parent.id = test_schema.child.parent_id")
select([parent, child]).\
select_from(parent.join(child)).\
execute().fetchall()
def test_reflect_local_to_remote(self):
testing.db.execute('CREATE TABLE localtable (id INTEGER '
'PRIMARY KEY, parent_id INTEGER REFERENCES '
'test_schema.parent(id))')
try:
meta = MetaData(testing.db)
lcl = Table('localtable', meta, autoload=True)
parent = meta.tables['test_schema.parent']
self.assert_compile(parent.join(lcl),
'test_schema.parent JOIN localtable ON '
'test_schema.parent.id = '
'localtable.parent_id')
select([parent,
lcl]).select_from(parent.join(lcl)).execute().fetchall()
finally:
testing.db.execute('DROP TABLE localtable')
def test_reflect_alt_owner_implicit(self):
meta = MetaData(testing.db)
parent = Table('parent', meta, autoload=True,
schema='test_schema')
child = Table('child', meta, autoload=True, schema='test_schema'
)
self.assert_compile(parent.join(child),
'test_schema.parent JOIN test_schema.child '
'ON test_schema.parent.id = '
'test_schema.child.parent_id')
select([parent,
child]).select_from(parent.join(child)).execute().fetchall()
def test_reflect_alt_owner_synonyms(self):
testing.db.execute('CREATE TABLE localtable (id INTEGER '
'PRIMARY KEY, parent_id INTEGER REFERENCES '
'test_schema.ptable(id))')
try:
meta = MetaData(testing.db)
lcl = Table('localtable', meta, autoload=True,
oracle_resolve_synonyms=True)
parent = meta.tables['test_schema.ptable']
self.assert_compile(parent.join(lcl),
'test_schema.ptable JOIN localtable ON '
'test_schema.ptable.id = '
'localtable.parent_id')
select([parent,
lcl]).select_from(parent.join(lcl)).execute().fetchall()
finally:
testing.db.execute('DROP TABLE localtable')
def test_reflect_remote_synonyms(self):
meta = MetaData(testing.db)
parent = Table('ptable', meta, autoload=True,
schema='test_schema',
oracle_resolve_synonyms=True)
child = Table('ctable', meta, autoload=True,
schema='test_schema',
oracle_resolve_synonyms=True)
self.assert_compile(parent.join(child),
'test_schema.ptable JOIN '
'test_schema.ctable ON test_schema.ptable.i'
'd = test_schema.ctable.parent_id')
select([parent,
child]).select_from(parent.join(child)).execute().fetchall()
class ConstraintTest(fixtures.TablesTest):
__only_on__ = 'oracle'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('foo', metadata, Column('id', Integer, primary_key=True))
def test_oracle_has_no_on_update_cascade(self):
bar = Table('bar', self.metadata,
Column('id', Integer, primary_key=True),
Column('foo_id', Integer,
ForeignKey('foo.id', onupdate='CASCADE')))
assert_raises(exc.SAWarning, bar.create)
bat = Table('bat', self.metadata,
Column('id', Integer, primary_key=True),
Column('foo_id', Integer),
ForeignKeyConstraint(['foo_id'], ['foo.id'],
onupdate='CASCADE'))
assert_raises(exc.SAWarning, bat.create)
class TwoPhaseTest(fixtures.TablesTest):
"""test cx_oracle two phase, which remains in a semi-broken state
so requires a carefully written test."""
__only_on__ = 'oracle+cx_oracle'
@classmethod
def define_tables(cls, metadata):
Table('datatable', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50))
)
def _connection(self):
conn = testing.db.connect()
conn.detach()
return conn
def _assert_data(self, rows):
eq_(
testing.db.scalar("select count(*) from datatable"),
rows
)
def test_twophase_prepare_false(self):
conn = self._connection()
for i in range(2):
trans = conn.begin_twophase()
conn.execute("select 1 from dual")
trans.prepare()
trans.commit()
conn.close()
self._assert_data(0)
def test_twophase_prepare_true(self):
conn = self._connection()
for i in range(2):
trans = conn.begin_twophase()
conn.execute("insert into datatable (id, data) "
"values (%s, 'somedata')" % i)
trans.prepare()
trans.commit()
conn.close()
self._assert_data(2)
def test_twophase_rollback(self):
conn = self._connection()
trans = conn.begin_twophase()
conn.execute("insert into datatable (id, data) "
"values (%s, 'somedata')" % 1)
trans.rollback()
trans = conn.begin_twophase()
conn.execute("insert into datatable (id, data) "
"values (%s, 'somedata')" % 1)
trans.prepare()
trans.commit()
conn.close()
self._assert_data(1)
def test_not_prepared(self):
conn = self._connection()
trans = conn.begin_twophase()
conn.execute("insert into datatable (id, data) "
"values (%s, 'somedata')" % 1)
trans.commit()
conn.close()
self._assert_data(1)
class DialectTypesTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = oracle.OracleDialect()
def test_no_clobs_for_string_params(self):
"""test that simple string params get a DBAPI type of
VARCHAR, not CLOB. This is to prevent setinputsizes
from setting up cx_oracle.CLOBs on
string-based bind params [ticket:793]."""
class FakeDBAPI(object):
def __getattr__(self, attr):
return attr
dialect = oracle.OracleDialect()
dbapi = FakeDBAPI()
b = bindparam("foo", "hello world!")
eq_(
b.type.dialect_impl(dialect).get_dbapi_type(dbapi),
'STRING'
)
b = bindparam("foo", "hello world!")
eq_(
b.type.dialect_impl(dialect).get_dbapi_type(dbapi),
'STRING'
)
def test_long(self):
self.assert_compile(oracle.LONG(), "LONG")
def test_type_adapt(self):
dialect = cx_oracle.dialect()
for start, test in [
(Date(), cx_oracle._OracleDate),
(oracle.OracleRaw(), cx_oracle._OracleRaw),
(String(), String),
(VARCHAR(), cx_oracle._OracleString),
(DATE(), cx_oracle._OracleDate),
(oracle.DATE(), oracle.DATE),
(String(50), cx_oracle._OracleString),
(Unicode(), cx_oracle._OracleNVarChar),
(Text(), cx_oracle._OracleText),
(UnicodeText(), cx_oracle._OracleUnicodeText),
(NCHAR(), cx_oracle._OracleNVarChar),
(oracle.RAW(50), cx_oracle._OracleRaw),
]:
assert isinstance(start.dialect_impl(dialect), test), \
"wanted %r got %r" % (test, start.dialect_impl(dialect))
def test_raw_compile(self):
self.assert_compile(oracle.RAW(), "RAW")
self.assert_compile(oracle.RAW(35), "RAW(35)")
def test_char_length(self):
self.assert_compile(VARCHAR(50), "VARCHAR(50 CHAR)")
oracle8dialect = oracle.dialect()
oracle8dialect.server_version_info = (8, 0)
self.assert_compile(VARCHAR(50), "VARCHAR(50)", dialect=oracle8dialect)
self.assert_compile(NVARCHAR(50), "NVARCHAR2(50)")
self.assert_compile(CHAR(50), "CHAR(50)")
def test_varchar_types(self):
dialect = oracle.dialect()
for typ, exp in [
(String(50), "VARCHAR2(50 CHAR)"),
(Unicode(50), "NVARCHAR2(50)"),
(NVARCHAR(50), "NVARCHAR2(50)"),
(VARCHAR(50), "VARCHAR(50 CHAR)"),
(oracle.NVARCHAR2(50), "NVARCHAR2(50)"),
(oracle.VARCHAR2(50), "VARCHAR2(50 CHAR)"),
(String(), "VARCHAR2"),
(Unicode(), "NVARCHAR2"),
(NVARCHAR(), "NVARCHAR2"),
(VARCHAR(), "VARCHAR"),
(oracle.NVARCHAR2(), "NVARCHAR2"),
(oracle.VARCHAR2(), "VARCHAR2"),
]:
self.assert_compile(typ, exp, dialect=dialect)
def test_interval(self):
for type_, expected in [(oracle.INTERVAL(),
'INTERVAL DAY TO SECOND'),
(oracle.INTERVAL(day_precision=3),
'INTERVAL DAY(3) TO SECOND'),
(oracle.INTERVAL(second_precision=5),
'INTERVAL DAY TO SECOND(5)'),
(oracle.INTERVAL(day_precision=2,
second_precision=5),
'INTERVAL DAY(2) TO SECOND(5)')]:
self.assert_compile(type_, expected)
class TypesTest(fixtures.TestBase):
__only_on__ = 'oracle'
__dialect__ = oracle.OracleDialect()
@testing.fails_on('+zxjdbc', 'zxjdbc lacks the FIXED_CHAR dbapi type')
def test_fixed_char(self):
m = MetaData(testing.db)
t = Table('t1', m,
Column('id', Integer, primary_key=True),
Column('data', CHAR(30), nullable=False)
)
t.create()
try:
t.insert().execute(
dict(id=1, data="value 1"),
dict(id=2, data="value 2"),
dict(id=3, data="value 3")
)
eq_(
t.select().where(t.c.data == 'value 2').execute().fetchall(),
[(2, 'value 2 ')]
)
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True)
assert type(t2.c.data.type) is CHAR
eq_(
t2.select().where(t2.c.data == 'value 2').execute().fetchall(),
[(2, 'value 2 ')]
)
finally:
t.drop()
@testing.requires.returning
@testing.provide_metadata
def test_int_not_float(self):
m = self.metadata
t1 = Table('t1', m, Column('foo', Integer))
t1.create()
r = t1.insert().values(foo=5).returning(t1.c.foo).execute()
x = r.scalar()
assert x == 5
assert isinstance(x, int)
x = t1.select().scalar()
assert x == 5
assert isinstance(x, int)
@testing.provide_metadata
def test_rowid(self):
metadata = self.metadata
t = Table('t1', metadata,
Column('x', Integer)
)
t.create()
t.insert().execute(x=5)
s1 = select([t])
s2 = select([column('rowid')]).select_from(s1)
rowid = s2.scalar()
# the ROWID type is not really needed here,
# as cx_oracle just treats it as a string,
# but we want to make sure the ROWID works...
rowid_col = column('rowid', oracle.ROWID)
s3 = select([t.c.x, rowid_col]).\
where(rowid_col == cast(rowid, oracle.ROWID))
eq_(s3.select().execute().fetchall(),
[(5, rowid)]
)
@testing.fails_on('+zxjdbc',
'Not yet known how to pass values of the '
'INTERVAL type')
@testing.provide_metadata
def test_interval(self):
metadata = self.metadata
interval_table = Table('intervaltable', metadata, Column('id',
Integer, primary_key=True,
test_needs_autoincrement=True),
Column('day_interval',
oracle.INTERVAL(day_precision=3)))
metadata.create_all()
interval_table.insert().\
execute(day_interval=datetime.timedelta(days=35,
seconds=5743))
row = interval_table.select().execute().first()
eq_(row['day_interval'], datetime.timedelta(days=35,
seconds=5743))
@testing.provide_metadata
def test_numerics(self):
m = self.metadata
t1 = Table('t1', m,
Column('intcol', Integer),
Column('numericcol', Numeric(precision=9, scale=2)),
Column('floatcol1', Float()),
Column('floatcol2', FLOAT()),
Column('doubleprec', oracle.DOUBLE_PRECISION),
Column('numbercol1', oracle.NUMBER(9)),
Column('numbercol2', oracle.NUMBER(9, 3)),
Column('numbercol3', oracle.NUMBER),
)
t1.create()
t1.insert().execute(
intcol=1,
numericcol=5.2,
floatcol1=6.5,
floatcol2=8.5,
doubleprec=9.5,
numbercol1=12,
numbercol2=14.85,
numbercol3=15.76
)
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True)
for row in (
t1.select().execute().first(),
t2.select().execute().first()
):
for i, (val, type_) in enumerate((
(1, int),
(decimal.Decimal("5.2"), decimal.Decimal),
(6.5, float),
(8.5, float),
(9.5, float),
(12, int),
(decimal.Decimal("14.85"), decimal.Decimal),
(15.76, float),
)):
eq_(row[i], val)
assert isinstance(row[i], type_), '%r is not %r' \
% (row[i], type_)
def test_numeric_no_decimal_mode(self):
engine = testing_engine(options=dict(coerce_to_decimal=False))
value = engine.scalar("SELECT 5.66 FROM DUAL")
assert isinstance(value, float)
value = testing.db.scalar("SELECT 5.66 FROM DUAL")
assert isinstance(value, decimal.Decimal)
@testing.only_on("oracle+cx_oracle", "cx_oracle-specific feature")
@testing.fails_if(
testing.requires.python3,
"cx_oracle always returns unicode on py3k")
def test_coerce_to_unicode(self):
engine = testing_engine(options=dict(coerce_to_unicode=True))
value = engine.scalar("SELECT 'hello' FROM DUAL")
assert isinstance(value, util.text_type)
value = testing.db.scalar("SELECT 'hello' FROM DUAL")
assert isinstance(value, util.binary_type)
@testing.provide_metadata
def test_numerics_broken_inspection(self):
"""Numeric scenarios where Oracle type info is 'broken',
returning us precision, scale of the form (0, 0) or (0, -127).
We convert to Decimal and let int()/float() processors take over.
"""
metadata = self.metadata
# this test requires cx_oracle 5
foo = Table('foo', metadata,
Column('idata', Integer),
Column('ndata', Numeric(20, 2)),
Column('ndata2', Numeric(20, 2)),
Column('nidata', Numeric(5, 0)),
Column('fdata', Float()),
)
foo.create()
foo.insert().execute({
'idata': 5,
'ndata': decimal.Decimal("45.6"),
'ndata2': decimal.Decimal("45.0"),
'nidata': decimal.Decimal('53'),
'fdata': 45.68392
})
stmt = "SELECT idata, ndata, ndata2, nidata, fdata FROM foo"
row = testing.db.execute(stmt).fetchall()[0]
eq_(
[type(x) for x in row],
[int, decimal.Decimal, decimal.Decimal, int, float]
)
eq_(
row,
(5, decimal.Decimal('45.6'), decimal.Decimal('45'),
53, 45.683920000000001)
)
# with a nested subquery,
# both Numeric values that don't have decimal places, regardless
# of their originating type, come back as ints with no useful
# typing information beyond "numeric". So native handler
# must convert to int.
# this means our Decimal converters need to run no matter what.
# totally sucks.
stmt = """
SELECT
(SELECT (SELECT idata FROM foo) FROM DUAL) AS idata,
(SELECT CAST((SELECT ndata FROM foo) AS NUMERIC(20, 2)) FROM DUAL)
AS ndata,
(SELECT CAST((SELECT ndata2 FROM foo) AS NUMERIC(20, 2)) FROM DUAL)
AS ndata2,
(SELECT CAST((SELECT nidata FROM foo) AS NUMERIC(5, 0)) FROM DUAL)
AS nidata,
(SELECT CAST((SELECT fdata FROM foo) AS FLOAT) FROM DUAL) AS fdata
FROM dual
"""
row = testing.db.execute(stmt).fetchall()[0]
eq_(
[type(x) for x in row],
[int, decimal.Decimal, int, int, decimal.Decimal]
)
eq_(
row,
(5, decimal.Decimal('45.6'), 45, 53, decimal.Decimal('45.68392'))
)
row = testing.db.execute(text(stmt,
typemap={
'idata': Integer(),
'ndata': Numeric(20, 2),
'ndata2': Numeric(20, 2),
'nidata': Numeric(5, 0),
'fdata': Float()
})).fetchall()[0]
eq_(
[type(x) for x in row],
[int, decimal.Decimal, decimal.Decimal, decimal.Decimal, float]
)
eq_(
row,
(5, decimal.Decimal('45.6'), decimal.Decimal('45'),
decimal.Decimal('53'), 45.683920000000001)
)
stmt = """
SELECT
anon_1.idata AS anon_1_idata,
anon_1.ndata AS anon_1_ndata,
anon_1.ndata2 AS anon_1_ndata2,
anon_1.nidata AS anon_1_nidata,
anon_1.fdata AS anon_1_fdata
FROM (SELECT idata, ndata, ndata2, nidata, fdata
FROM (
SELECT
(SELECT (SELECT idata FROM foo) FROM DUAL) AS idata,
(SELECT CAST((SELECT ndata FROM foo) AS NUMERIC(20, 2))
FROM DUAL) AS ndata,
(SELECT CAST((SELECT ndata2 FROM foo) AS NUMERIC(20, 2))
FROM DUAL) AS ndata2,
(SELECT CAST((SELECT nidata FROM foo) AS NUMERIC(5, 0))
FROM DUAL) AS nidata,
(SELECT CAST((SELECT fdata FROM foo) AS FLOAT) FROM DUAL)
AS fdata
FROM dual
)
WHERE ROWNUM >= 0) anon_1
"""
row = testing.db.execute(stmt).fetchall()[0]
eq_(
[type(x) for x in row],
[int, decimal.Decimal, int, int, decimal.Decimal]
)
eq_(
row,
(5, decimal.Decimal('45.6'), 45, 53, decimal.Decimal('45.68392'))
)
row = testing.db.execute(text(stmt,
typemap={
'anon_1_idata': Integer(),
'anon_1_ndata': Numeric(20, 2),
'anon_1_ndata2': Numeric(20, 2),
'anon_1_nidata': Numeric(5, 0),
'anon_1_fdata': Float()
})).fetchall()[0]
eq_(
[type(x) for x in row],
[int, decimal.Decimal, decimal.Decimal, decimal.Decimal, float]
)
eq_(
row,
(5, decimal.Decimal('45.6'), decimal.Decimal('45'),
decimal.Decimal('53'), 45.683920000000001)
)
row = testing.db.execute(text(stmt,
typemap={
'anon_1_idata': Integer(),
'anon_1_ndata': Numeric(20, 2, asdecimal=False),
'anon_1_ndata2': Numeric(20, 2, asdecimal=False),
'anon_1_nidata': Numeric(5, 0, asdecimal=False),
'anon_1_fdata': Float(asdecimal=True)
})).fetchall()[0]
eq_(
[type(x) for x in row],
[int, float, float, float, decimal.Decimal]
)
eq_(
row,
(5, 45.6, 45, 53, decimal.Decimal('45.68392'))
)
@testing.provide_metadata
def test_reflect_dates(self):
metadata = self.metadata
Table(
"date_types", metadata,
Column('d1', sqltypes.DATE),
Column('d2', oracle.DATE),
Column('d3', TIMESTAMP),
Column('d4', TIMESTAMP(timezone=True)),
Column('d5', oracle.INTERVAL(second_precision=5)),
)
metadata.create_all()
m = MetaData(testing.db)
t1 = Table(
"date_types", m,
autoload=True)
assert isinstance(t1.c.d1.type, oracle.DATE)
assert isinstance(t1.c.d1.type, DateTime)
assert isinstance(t1.c.d2.type, oracle.DATE)
assert isinstance(t1.c.d2.type, DateTime)
assert isinstance(t1.c.d3.type, TIMESTAMP)
assert not t1.c.d3.type.timezone
assert isinstance(t1.c.d4.type, TIMESTAMP)
assert t1.c.d4.type.timezone
assert isinstance(t1.c.d5.type, oracle.INTERVAL)
def test_reflect_all_types_schema(self):
types_table = Table('all_types', MetaData(testing.db),
Column('owner', String(30), primary_key=True),
Column('type_name', String(30), primary_key=True),
autoload=True, oracle_resolve_synonyms=True
)
for row in types_table.select().execute().fetchall():
[row[k] for k in row.keys()]
@testing.provide_metadata
def test_raw_roundtrip(self):
metadata = self.metadata
raw_table = Table('raw', metadata,
Column('id', Integer, primary_key=True),
Column('data', oracle.RAW(35))
)
metadata.create_all()
testing.db.execute(raw_table.insert(), id=1, data=b("ABCDEF"))
eq_(
testing.db.execute(raw_table.select()).first(),
(1, b("ABCDEF"))
)
@testing.provide_metadata
def test_reflect_nvarchar(self):
metadata = self.metadata
Table('t', metadata,
Column('data', sqltypes.NVARCHAR(255))
)
metadata.create_all()
m2 = MetaData(testing.db)
t2 = Table('t', m2, autoload=True)
assert isinstance(t2.c.data.type, sqltypes.NVARCHAR)
if testing.against('oracle+cx_oracle'):
# nvarchar returns unicode natively. cx_oracle
# _OracleNVarChar type should be at play here.
assert isinstance(
t2.c.data.type.dialect_impl(testing.db.dialect),
cx_oracle._OracleNVarChar)
data = u('m’a réveillé.')
t2.insert().execute(data=data)
res = t2.select().execute().first()['data']
eq_(res, data)
assert isinstance(res, util.text_type)
@testing.provide_metadata
def test_char_length(self):
metadata = self.metadata
t1 = Table('t1', metadata,
Column("c1", VARCHAR(50)),
Column("c2", NVARCHAR(250)),
Column("c3", CHAR(200))
)
t1.create()
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True)
eq_(t2.c.c1.type.length, 50)
eq_(t2.c.c2.type.length, 250)
eq_(t2.c.c3.type.length, 200)
@testing.provide_metadata
def test_long_type(self):
metadata = self.metadata
t = Table('t', metadata,
Column('data', oracle.LONG)
)
metadata.create_all(testing.db)
testing.db.execute(t.insert(), data='xyz')
eq_(
testing.db.scalar(select([t.c.data])),
"xyz"
)
def test_longstring(self):
metadata = MetaData(testing.db)
testing.db.execute("""
CREATE TABLE Z_TEST
(
ID NUMERIC(22) PRIMARY KEY,
ADD_USER VARCHAR2(20) NOT NULL
)
""")
try:
t = Table("z_test", metadata, autoload=True)
t.insert().execute(id=1.0, add_user='foobar')
assert t.select().execute().fetchall() == [(1, 'foobar')]
finally:
testing.db.execute("DROP TABLE Z_TEST")
@testing.fails_on('+zxjdbc', 'auto_convert_lobs not applicable')
def test_lobs_without_convert(self):
engine = testing_engine(options=dict(auto_convert_lobs=False))
metadata = MetaData()
t = Table("z_test", metadata, Column('id', Integer, primary_key=True),
Column('data', Text), Column('bindata', LargeBinary))
t.create(engine)
try:
engine.execute(t.insert(), id=1,
data='this is text',
bindata=b('this is binary'))
row = engine.execute(t.select()).first()
eq_(row['data'].read(), 'this is text')
eq_(row['bindata'].read(), b('this is binary'))
finally:
t.drop(engine)
class EuroNumericTest(fixtures.TestBase):
"""test the numeric output_type_handler when using non-US locale for NLS_LANG."""
__only_on__ = 'oracle+cx_oracle'
def setup(self):
self.old_nls_lang = os.environ.get('NLS_LANG', False)
os.environ['NLS_LANG'] = "GERMAN"
self.engine = testing_engine()
def teardown(self):
if self.old_nls_lang is not False:
os.environ['NLS_LANG'] = self.old_nls_lang
else:
del os.environ['NLS_LANG']
self.engine.dispose()
def test_output_type_handler(self):
for stmt, exp, kw in [
("SELECT 0.1 FROM DUAL", decimal.Decimal("0.1"), {}),
("SELECT 15 FROM DUAL", 15, {}),
("SELECT CAST(15 AS NUMERIC(3, 1)) FROM DUAL",
decimal.Decimal("15"), {}),
("SELECT CAST(0.1 AS NUMERIC(5, 2)) FROM DUAL",
decimal.Decimal("0.1"), {}),
("SELECT :num FROM DUAL", decimal.Decimal("2.5"),
{'num': decimal.Decimal("2.5")})
]:
test_exp = self.engine.scalar(stmt, **kw)
eq_(
test_exp,
exp
)
assert type(test_exp) is type(exp)
class DontReflectIOTTest(fixtures.TestBase):
"""test that index overflow tables aren't included in
table_names."""
__only_on__ = 'oracle'
def setup(self):
testing.db.execute("""
CREATE TABLE admin_docindex(
token char(20),
doc_id NUMBER,
token_frequency NUMBER,
token_offsets VARCHAR2(2000),
CONSTRAINT pk_admin_docindex PRIMARY KEY (token, doc_id))
ORGANIZATION INDEX
TABLESPACE users
PCTTHRESHOLD 20
OVERFLOW TABLESPACE users
""")
def teardown(self):
testing.db.execute("drop table admin_docindex")
def test_reflect_all(self):
m = MetaData(testing.db)
m.reflect()
eq_(
set(t.name for t in m.tables.values()),
set(['admin_docindex'])
)
class BufferedColumnTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'oracle'
@classmethod
def setup_class(cls):
global binary_table, stream, meta
meta = MetaData(testing.db)
binary_table = Table('binary_table', meta,
Column('id', Integer, primary_key=True),
Column('data', LargeBinary)
)
meta.create_all()
stream = os.path.join(
os.path.dirname(__file__), "..",
'binary_data_one.dat')
with open(stream, "rb") as file_:
stream = file_.read(12000)
for i in range(1, 11):
binary_table.insert().execute(id=i, data=stream)
@classmethod
def teardown_class(cls):
meta.drop_all()
def test_fetch(self):
result = binary_table.select().order_by(binary_table.c.id).\
execute().fetchall()
eq_(result, [(i, stream) for i in range(1, 11)])
@testing.fails_on('+zxjdbc', 'FIXME: zxjdbc should support this')
def test_fetch_single_arraysize(self):
eng = testing_engine(options={'arraysize': 1})
result = eng.execute(binary_table.select().
order_by(binary_table.c.id)).fetchall()
eq_(result, [(i, stream) for i in range(1, 11)])
class UnsupportedIndexReflectTest(fixtures.TestBase):
__only_on__ = 'oracle'
@testing.emits_warning("No column names")
@testing.provide_metadata
def test_reflect_functional_index(self):
metadata = self.metadata
Table('test_index_reflect', metadata,
Column('data', String(20), primary_key=True)
)
metadata.create_all()
testing.db.execute('CREATE INDEX DATA_IDX ON '
'TEST_INDEX_REFLECT (UPPER(DATA))')
m2 = MetaData(testing.db)
Table('test_index_reflect', m2, autoload=True)
class RoundTripIndexTest(fixtures.TestBase):
__only_on__ = 'oracle'
@testing.provide_metadata
def test_basic(self):
metadata = self.metadata
table = Table("sometable", metadata,
Column("id_a", Unicode(255), primary_key=True),
Column("id_b", Unicode(255), primary_key=True, unique=True),
Column("group", Unicode(255), primary_key=True),
Column("col", Unicode(255)),
UniqueConstraint('col', 'group'),
)
# "group" is a keyword, so lower case
normalind = Index('tableind', table.c.id_b, table.c.group)
metadata.create_all()
mirror = MetaData(testing.db)
mirror.reflect()
metadata.drop_all()
mirror.create_all()
inspect = MetaData(testing.db)
inspect.reflect()
def obj_definition(obj):
return obj.__class__, tuple([c.name for c in
obj.columns]), getattr(obj, 'unique', None)
# find what the primary k constraint name should be
primaryconsname = testing.db.scalar(
text(
"""SELECT constraint_name
FROM all_constraints
WHERE table_name = :table_name
AND owner = :owner
AND constraint_type = 'P' """),
table_name=table.name.upper(),
owner=testing.db.dialect.default_schema_name.upper())
reflectedtable = inspect.tables[table.name]
# make a dictionary of the reflected objects:
reflected = dict([(obj_definition(i), i) for i in
reflectedtable.indexes
| reflectedtable.constraints])
# assert we got primary key constraint and its name, Error
# if not in dict
assert reflected[(PrimaryKeyConstraint, ('id_a', 'id_b',
'group'), None)].name.upper() \
== primaryconsname.upper()
# Error if not in dict
eq_(
reflected[(Index, ('id_b', 'group'), False)].name,
normalind.name
)
assert (Index, ('id_b', ), True) in reflected
assert (Index, ('col', 'group'), True) in reflected
eq_(len(reflectedtable.constraints), 1)
eq_(len(reflectedtable.indexes), 3)
class SequenceTest(fixtures.TestBase, AssertsCompiledSQL):
def test_basic(self):
seq = Sequence('my_seq_no_schema')
dialect = oracle.OracleDialect()
assert dialect.identifier_preparer.format_sequence(seq) \
== 'my_seq_no_schema'
seq = Sequence('my_seq', schema='some_schema')
assert dialect.identifier_preparer.format_sequence(seq) \
== 'some_schema.my_seq'
seq = Sequence('My_Seq', schema='Some_Schema')
assert dialect.identifier_preparer.format_sequence(seq) \
== '"Some_Schema"."My_Seq"'
class ExecuteTest(fixtures.TestBase):
__only_on__ = 'oracle'
def test_basic(self):
eq_(testing.db.execute('/*+ this is a comment */ SELECT 1 FROM '
'DUAL').fetchall(), [(1, )])
def test_sequences_are_integers(self):
seq = Sequence('foo_seq')
seq.create(testing.db)
try:
val = testing.db.execute(seq)
eq_(val, 1)
assert type(val) is int
finally:
seq.drop(testing.db)
@testing.provide_metadata
def test_limit_offset_for_update(self):
metadata = self.metadata
# oracle can't actually do the ROWNUM thing with FOR UPDATE
# very well.
t = Table('t1', metadata, Column('id', Integer, primary_key=True),
Column('data', Integer)
)
metadata.create_all()
t.insert().execute(
{'id': 1, 'data': 1},
{'id': 2, 'data': 7},
{'id': 3, 'data': 12},
{'id': 4, 'data': 15},
{'id': 5, 'data': 32},
)
# here, we can't use ORDER BY.
eq_(
t.select(for_update=True).limit(2).execute().fetchall(),
[(1, 1),
(2, 7)]
)
# here, its impossible. But we'd prefer it to raise ORA-02014
# instead of issuing a syntax error.
assert_raises_message(
exc.DatabaseError,
"ORA-02014",
t.select(for_update=True).limit(2).offset(3).execute
)
class UnicodeSchemaTest(fixtures.TestBase):
__only_on__ = 'oracle'
@testing.provide_metadata
def test_quoted_column_non_unicode(self):
metadata = self.metadata
table = Table("atable", metadata,
Column("_underscorecolumn", Unicode(255), primary_key=True),
)
metadata.create_all()
table.insert().execute(
{'_underscorecolumn': u('’é')},
)
result = testing.db.execute(
table.select().where(table.c._underscorecolumn == u('’é'))
).scalar()
eq_(result, u('’é'))
@testing.provide_metadata
def test_quoted_column_unicode(self):
metadata = self.metadata
table = Table("atable", metadata,
Column(u("méil"), Unicode(255), primary_key=True),
)
metadata.create_all()
table.insert().execute(
{u('méil'): u('’é')},
)
result = testing.db.execute(
table.select().where(table.c[u('méil')] == u('’é'))
).scalar()
eq_(result, u('’é'))
class DBLinkReflectionTest(fixtures.TestBase):
__requires__ = 'oracle_test_dblink',
__only_on__ = 'oracle'
@classmethod
def setup_class(cls):
from sqlalchemy.testing import config
cls.dblink = config.file_config.get('sqla_testing', 'oracle_db_link')
with testing.db.connect() as conn:
conn.execute(
"create table test_table "
"(id integer primary key, data varchar2(50))")
conn.execute("create synonym test_table_syn "
"for test_table@%s" % cls.dblink)
@classmethod
def teardown_class(cls):
with testing.db.connect() as conn:
conn.execute("drop synonym test_table_syn")
conn.execute("drop table test_table")
def test_hello_world(self):
"""test that the synonym/dblink is functional."""
testing.db.execute("insert into test_table_syn (id, data) "
"values (1, 'some data')")
eq_(
testing.db.execute("select * from test_table_syn").first(),
(1, 'some data')
)
def test_reflection(self):
"""test the resolution of the synonym/dblink. """
m = MetaData()
t = Table('test_table_syn', m, autoload=True,
autoload_with=testing.db, oracle_resolve_synonyms=True)
eq_(list(t.c.keys()), ['id', 'data'])
eq_(list(t.primary_key), [t.c.id])
| []
| []
| [
"NLS_LANG"
]
| [] | ["NLS_LANG"] | python | 1 | 0 | |
cmd/root.go | // Copyright © 2018 Andreas Fritzler <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"os"
"path/filepath"
homedir "github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var kubeconfig string
var cfgFile string
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "garden-universe",
Short: "3D representation of a Gardener Kubernetes landscape",
Long: `3D representation of a Gardener Kubernetes landscape`,
}
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func init() {
cobra.OnInitialize(initConfig)
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.garden-universe.yaml)")
rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
if home := homeDir(); home != "" {
rootCmd.PersistentFlags().StringVarP(&kubeconfig, "kubeconfig", "k", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
rootCmd.PersistentFlags().StringVarP(&kubeconfig, "kubeconfig", "k", "", "absolute path to the kubeconfig file")
}
viper.BindPFlag("kubeconfig", rootCmd.PersistentFlags().Lookup("kubeconfig"))
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
// Find home directory.
home, err := homedir.Dir()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// Search config in home directory with name ".garden-universe" (without extension).
viper.AddConfigPath(home)
viper.SetConfigName(".garden-universe")
}
viper.AutomaticEnv() // read in environment variables that match
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
fmt.Println("Using config file:", viper.ConfigFileUsed())
}
}
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
}
return os.Getenv("USERPROFILE") // windows
}
| [
"\"HOME\"",
"\"USERPROFILE\""
]
| []
| [
"HOME",
"USERPROFILE"
]
| [] | ["HOME", "USERPROFILE"] | go | 2 | 0 | |
main.go | package main
import (
"context"
"crypto/md5"
"encoding/hex"
"fmt"
"io"
stdlog "log"
"net/http"
"os"
"path/filepath"
"runtime/debug"
"strings"
"time"
log "github.com/go-kit/log"
"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/baggage"
stdout "go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
oteltrace "go.opentelemetry.io/otel/trace"
"gopkg.in/alecthomas/kingpin.v2"
// Routing and Cloud storage.
"tapico-turborepo-remote-cache/gcs"
"github.com/gorilla/mux"
"github.com/graymeta/stow"
"github.com/graymeta/stow/local"
"github.com/graymeta/stow/s3"
)
var logger log.Logger
var (
app = kingpin.New("tapico-turborepo-remote-cache", "A tool to work with Vercel Turborepo to upload/retrieve cache artefacts to/from popular cloud providers")
verbose = app.Flag("verbose", "Verbose mode.").Short('v').Bool()
kind = app.Flag("kind", "Kind of storage provider to use (s3, gcs, local). ($CLOUD_PROVIDER_KIND)").Default("s3").Envar("CLOUD_PROVIDER_KIND").String()
useSecure = app.Flag("secure", "Enable secure access (or HTTPs endpoints).").Envar("CLOUD_SECURE").Bool()
bucketName = app.Flag("bucket", "The name of the bucket ($BUCKET_NAME)").Envar("BUCKET_NAME").Default("tapico-remote-cache").String()
enableBucketPerTeam = app.Flag("enable-bucket-per-team", "The name of the bucket").Bool()
allowedTurboTokens = app.Flag("turbo-token", "The comma separated list of TURBO_TOKEN that the server should accept ($TURBO_TOKEN)").Envar("TURBO_TOKEN").Required().String()
googleEndpoint = app.Flag("google.endpoint", "API Endpoint of cloud storage provide to use ($GOOGLE_ENDPOINT)").Envar("GOOGLE_ENDPOINT").String()
googleProjectID = app.Flag(
"google.project-id", "The project id relevant for Google Cloud Storage ($GOOGLE_PROJECT_ID).",
).Envar("GOOGLE_PROJECT_ID").String()
googleCredentialsJSON = app.Flag(
"google.credentials", "The path to the credentials file ($GOOGLE_APPLICATION_CREDENTIALS).",
).Envar("GOOGLE_APPLICATION_CREDENTIALS").String()
localStoragePath = app.Flag(
"local.project-id", "The relative path to storage the cache artefacts when 'local' is enabled ($CLOUD_FILESYSTEM_PATH).",
).Envar("CLOUD_FILESYSTEM_PATH").String()
awsEndpoint = app.Flag(
"s3.endpoint", "The endpoint to use to connect to a Amazon S3 compatible cloud storage provider ($AWS_ENDPOINT).",
).Envar("AWS_ENDPOINT").String()
awsAccessKeyID = app.Flag(
"s3.accessKeyId", "The Amazon S3 Access Key Id ($AWS_ACCESS_KEY_ID).",
).Envar("AWS_ACCESS_KEY_ID").String()
awsSecretKey = app.Flag(
"s3.secretKey", "The Amazon S3 secret key ($AWS_SECRET_ACCESS_KEY).",
).Envar("AWS_SECRET_ACCESS_KEY").String()
awsRegionName = app.Flag(
"s3.region", "The Amazon S3 region($AWS_S3_REGION_NAME).",
).Envar("AWS_S3_REGION_NAME").String()
)
func GetBucketName(name string) string {
if *enableBucketPerTeam {
hash := md5.Sum([]byte(name))
return hex.EncodeToString(hash[:])
}
return name
}
func getProviderConfig(kind string) (stow.ConfigMap, error) {
logger.Log("message", "getProviderConfig()", "kind", kind)
var config stow.ConfigMap
var shouldDisableSSL = "false"
if *useSecure {
shouldDisableSSL = "true"
}
if kind == "s3" {
logger.Log("message", "getting provider for Amazon S3")
config = stow.ConfigMap{
s3.ConfigEndpoint: *awsEndpoint,
s3.ConfigAccessKeyID: *awsAccessKeyID,
s3.ConfigSecretKey: *awsSecretKey,
s3.ConfigDisableSSL: shouldDisableSSL,
s3.ConfigRegion: *awsRegionName,
}
} else if kind == "gcs" {
logger.Log("message", "getting provider for Google Cloud Storage")
var googleCredentialsContents []byte
// check if the file exist that stored in the credentials environment file
if _, err := os.Stat(*googleCredentialsJSON); err == nil {
fileContents, err := os.ReadFile(*googleCredentialsJSON)
if err != nil {
googleCredentialsContents = fileContents
}
} else {
googleCredentialsContents = []byte(*googleCredentialsJSON)
}
// // check if a filee xists on the given path
// fileInfo, err := os.Stat(*googleCredentialsJSON)
// if err != nil {
// logger.Log("message", err)
// } else {
// logger.Log("fileInfo", fileInfo.Name())
// }
// fileContents, err := os.ReadFile(*googleCredentialsJSON)
// if errors.Is(err, os.ErrNotExist) {
// logger.Log("message", "the file does not exist")
// googleCredentialsContents = []byte(*googleCredentialsJSON)
// } else if err != nil {
// logger.Log("message", "the file does exist", "error", err)
// googleCredentialsContents = []byte(*googleCredentialsJSON)
// } else {
// logger.Log("message", "no file occurred")
// googleCredentialsContents = fileContents
// }
logger.Log("contents", string(googleCredentialsContents))
config = stow.ConfigMap{
gcs.ConfigProjectId: *googleProjectID,
gcs.ConfigJSON: string(googleCredentialsContents),
}
if *googleEndpoint != "" {
logger.Log("message", "Changing the Google Storage endpoint to", "endpoint=", *googleEndpoint)
config[gcs.ConfigEndpoint] = *googleEndpoint
}
} else {
logger.Log("message", "getting provider for Local Filesystem")
configPath, _ := filepath.Abs(*localStoragePath)
logger.Log(configPath)
config = stow.ConfigMap{
local.ConfigKeyPath: configPath,
}
}
// iterate through the list of config mappings and dump the values for debugging purposes
if *verbose {
for key, val := range config {
// fmt.Printf("Key: %d, Value: %s\n", key, val)
logger.Log("key", key, "value", val)
}
}
return config, nil
}
func GetContainerByName(name string) (stow.Container, error) {
config, err := getProviderConfig(*kind)
if err != nil {
return nil, err
}
// connect
location, err := stow.Dial(*kind, config)
if err != nil {
return nil, err
}
containers, item, err := location.Containers("", "", 100)
logger.Log("item", item)
for _, v := range containers {
logger.Log("message", "get container name", "value", v.Name())
}
if err != nil {
logger.Log("error", err)
} else {
for _, v := range containers {
logger.Log("message", "list of containers", "container", v)
}
}
var container stow.Container
logger.Log("message", "the name of the bucket is", "bucket", bucketName)
receivedContainer, err := location.Container(*bucketName)
if err != nil {
logger.Log("message", "failed to fetch existing container with the requested name")
logger.Log("error", err)
} else {
logger.Log("message", "found existing container")
container = receivedContainer
}
if receivedContainer == nil {
logger.Log("message", "failed to find an existing container")
createdContainer, err := location.CreateContainer(*bucketName)
if err != nil {
logger.Log("message", "failed to create container")
logger.Log(err)
return nil, err
}
logger.Log("message", "create the container for storing cache items")
container = createdContainer
}
logger.Log("message", fmt.Sprintf(`GetContainerByName() id: %s`, container.ID()))
logger.Log("message", fmt.Sprintf(`GetContainerByName() name: %s`, container.Name()))
return container, nil
}
func createCacheBlob(name string, teamID string, fileContents io.Reader, fileSize int64) (stow.Item, string, error) {
logger.Log("message", "createCacheBlob() called")
bucketName := GetBucketName(teamID)
container, err := GetContainerByName(bucketName)
if err != nil {
logger.Log("failed to get container by name", bucketName)
return nil, "", err
}
//
if container == nil {
logger.Log("message", "failed to lookup container reference")
return nil, "", nil
}
fullArtefactPath := fmt.Sprintf("%s/%s", teamID, name) //nolint
if *enableBucketPerTeam {
fullArtefactPath = fmt.Sprintf("%s", name) //nolint
}
logger.Log("message", "The full path where to store the artefact item", "path", fullArtefactPath)
//
logger.Log("message", "attempt to save item to cloud storage")
item, err := container.Put(fullArtefactPath, fileContents, fileSize, nil)
if err != nil {
logger.Log("message", "failed to save item to cloud storage")
logger.Log("error", err)
return nil, "", err
}
logger.Log("message", "attempt to return item")
itemMetadata, err := item.Metadata()
if err != nil {
logger.Log("error", err)
return nil, "", err
}
for value, name := range itemMetadata {
logger.Log("name", name, "value", value)
}
return item, fullArtefactPath, nil
}
func readCacheBlob(name string, teamID string) (stow.Item, error) {
logger.Log("message", "readCacheBlob() called")
bucketName := GetBucketName(teamID)
container, err := GetContainerByName(bucketName)
if err != nil {
logger.Log("message", "failed to get container api instance")
logger.Log(err)
logger.Log(err.Error())
return nil, err
}
//
if container == nil {
logger.Log("message", "failed to lookup container reference")
logger.Log("error", err)
return nil, nil
}
//
fullArtefactPath := fmt.Sprintf("%s/%s", teamID, name)
if *enableBucketPerTeam {
fullArtefactPath = fmt.Sprintf("%s", name) //nolint
}
logger.Log("message", "The full path where to store the artefact item", "path", fullArtefactPath)
//
logger.Log("message", "attempt to read item from cloud storage")
item, err := container.Item(fullArtefactPath)
if err != nil {
logger.Log("message", "failed to read item from cloud storage")
if err == stow.ErrNotFound {
logger.Log("message", "file was not found")
}
return nil, err
}
logger.Log("message", "attempt to return item")
itemMetadata, err := item.Metadata()
if err != nil {
logger.Log("error", err)
return nil, err
}
for value, name := range itemMetadata {
logger.Log("name", name, "value", value)
}
logger.Log("message", "attempt to return item")
logger.Log(item.Metadata())
return item, nil
}
func readCacheItem(w http.ResponseWriter, r *http.Request) {
logger.Log("message", "readCacheItem()")
pathParams := mux.Vars(r)
ctx := r.Context()
span := oteltrace.SpanFromContext(ctx)
bag := baggage.FromContext(ctx)
uk := attribute.Key("username")
span.AddEvent("handling this...", oteltrace.WithAttributes(uk.String(bag.Member("username").Value())))
artificateID := ""
if val, ok := pathParams["artificateId"]; ok {
artificateID = val
logger.Log("message", fmt.Sprintf("received the following artificateID=%s", artificateID))
} else {
w.WriteHeader(http.StatusNotFound)
w.Header().Set("Content-Type", "application/json")
_, err := w.Write([]byte(`{"error":{"message":"artificateID is missing","code":"required"}}`))
if err != nil {
logger.Log("message", err)
}
return
}
query := r.URL.Query()
if !query.Has("teamId") && !query.Has("slug") {
w.WriteHeader(http.StatusPreconditionFailed)
w.Header().Set("Content-Type", "application/json")
_, err := w.Write([]byte(`{"error":{"message":"teamID or slug is missing","code":"required"}}`))
if err != nil {
logger.Log("message", err)
}
return
}
// If teamId and slug are defined, we use slug over teamId
teamID := query.Get("teamId")
if query.Has("slug") {
teamID = query.Get("slug")
}
sanitisedteamID := GetBucketName(teamID)
logger.Log("message", fmt.Sprintf("received the following teamID=%s sanitisedteamID=%s", teamID, sanitisedteamID))
// Attempt to return the data from the cloud storage
item, err := readCacheBlob(artificateID, sanitisedteamID)
if err != nil {
logger.Log("message", "sending 404 as error occurred while reading cahe item", "error", err.Error())
logger.Log(err)
w.WriteHeader(http.StatusNotFound)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"error":{"message":"Artifact not found","code":"not_found"}}`))
return
}
// Attempt to read the file contents of the artificats
fileReference, err := item.Open()
if err != nil {
defer fileReference.Close()
logger.Log("message", "sending 404 as error occurred while opening cace item from cloud storage", "error", err.Error())
w.WriteHeader(http.StatusNotFound)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"error":{"message":"Artifact not found","code":"not_found"}}`))
return
}
defer fileReference.Close()
w.WriteHeader((http.StatusOK))
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "Authorization, Accept, Content-Type")
w.Header().Set("Access-Control-Allow-Methods", "OPTIONS, GET, POST, PUT, PATCH, DELETE")
n, err := io.Copy(w, fileReference)
if err != nil {
logger.Log("message", "error occurred while writing cache item to response", "error", err.Error())
logger.Log(err)
stdlog.Fatal(err)
}
logger.Log("message", fmt.Sprintf("total size of buffer=%d", n))
}
func writeCacheItem(w http.ResponseWriter, r *http.Request) {
logger.Log("message", "writeCacheItem()")
pathParams := mux.Vars(r)
ctx := r.Context()
span := oteltrace.SpanFromContext(ctx)
bag := baggage.FromContext(ctx)
uk := attribute.Key("username")
span.AddEvent("handling this...", oteltrace.WithAttributes(uk.String(bag.Member("username").Value())))
artificateID := ""
if val, ok := pathParams["artificateId"]; ok {
artificateID = val
logger.Log("message", fmt.Sprintf("received the following artificateID=%s", artificateID))
} else {
w.WriteHeader(http.StatusNotFound)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"error":{"message":"artificateID is missing","code":"required"}}`))
return
}
query := r.URL.Query()
if !query.Has("teamId") && !query.Has("slug") {
w.WriteHeader(http.StatusPreconditionFailed)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"error":{"message":"teamID or slug is missing","code":"required"}}`))
return
}
// If teamId and slug are defined, we use slug over teamId
teamID := query.Get("teamId")
if query.Has("slug") {
teamID = query.Get("slug")
}
sanitisedteamID := GetBucketName(teamID)
logger.Log("message", "received the following", "teamID", teamID, "sanitisedteamID", sanitisedteamID)
_, path, err := createCacheBlob(artificateID, sanitisedteamID, r.Body, r.ContentLength)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(fmt.Sprintf(`{"error":{"message":"failed to save cache item with id %s","code":"internal_error"}}`, artificateID)))
return
}
w.WriteHeader(http.StatusAccepted)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(fmt.Sprintf(`{"urls": ["%s"]}`, path)))
}
func initTracer() *sdktrace.TracerProvider {
// Create stdout exporter to be able to retrieve
// the collected spans.
_, err := stdout.New(stdout.WithPrettyPrint())
if err != nil {
stdlog.Fatal(err)
}
// For the demonstration, use sdktrace.AlwaysSample sampler to sample all traces.
// In a production application, use sdktrace.ProbabilitySampler with a desired probability.
tp := sdktrace.NewTracerProvider(
sdktrace.WithSampler(sdktrace.AlwaysSample()),
//sdktrace.WithBatcher(exporter),
sdktrace.WithResource(resource.NewWithAttributes(semconv.SchemaURL, semconv.ServiceNameKey.String("tapico-remote-cache-service"))),
)
otel.SetTracerProvider(tp)
otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))
return tp
}
func main() {
kingpin.Version("0.0.1")
kingpin.MustParse(app.Parse(os.Args[1:]))
fmt.Printf("projectID: %s kind: %s localStoragePath: %s aws.endpoint: %s google.endpoint: %s google.credentialsJsonPath: %s", *googleProjectID, *kind, *localStoragePath, *awsEndpoint, *googleEndpoint, *googleCredentialsJSON)
// Logfmt is a structured, key=val logging format that is easy to read and parse
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
// Direct any attempts to use Go's log package to our structured logger
stdlog.SetOutput(log.NewStdlibAdapter(logger))
// Log the timestamp (in UTC) and the callsite (file + line number) of the logging
// call for debugging in the future.
logger = log.With(logger, "ts", log.DefaultTimestampUTC, "loc", log.DefaultCaller)
tp := initTracer()
defer func() {
if err := tp.Shutdown(context.Background()); err != nil {
logger.Log("message", "Error shutting down tracer provider: %v", err)
}
}()
loggingMiddleware := LoggingMiddleware(logger)
tokenMiddleware := TokenMiddleware(logger)
r := mux.NewRouter()
r.Use(otelmux.Middleware("tapico-remote-cache"))
r.Use(tokenMiddleware)
// https://api.vercel.com/v8/artifacts/09b4848294e347d8?teamID=team_lMDgmODIeVfSbCQNQPDkX8cF
api := r.PathPrefix("/v8").Subrouter()
api.HandleFunc("/artifacts/{artificateId}", readCacheItem).Methods(http.MethodGet)
api.HandleFunc("/artifacts/{artificateId}", writeCacheItem).Methods(http.MethodPost)
api.HandleFunc("/artifacts/{artificateId}", writeCacheItem).Methods(http.MethodPut)
http.Handle("/", r)
loggedRouter := loggingMiddleware(r)
print("Starting the Tapico Turborepo remote cache server")
// Start server
address := os.Getenv("LISTEN_ADDRESS")
if len(address) > 0 {
err := http.ListenAndServe(address, loggedRouter)
if err != nil {
panic(err)
}
fmt.Printf("Started tapico-turborepo-remote-cache server at %s", address)
} else {
// Default port 8080
err := http.ListenAndServe("localhost:8080", loggedRouter)
if err != nil {
panic(err)
}
fmt.Printf("Started tapico-turborepo-remote-cache server at %s", "localhost:8080")
}
}
// responseWriter is a minimal wrapper for http.ResponseWriter that allows the
// written HTTP status code to be captured for logging.
type responseWriter struct {
http.ResponseWriter
status int
// wroteHeader bool
}
func wrapResponseWriter(w http.ResponseWriter) *responseWriter {
return &responseWriter{ResponseWriter: w}
}
func isElementExist(s []string, str string) bool {
for _, v := range s {
if v == str {
return true
}
}
return false
}
func TokenMiddleware(logger log.Logger) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
fn := func(res http.ResponseWriter, req *http.Request) {
logger.Log("message", "checking if received token is in the list of accepted tokens", "tokens", *allowedTurboTokens)
// get token from authentication header
var isAccepted = false
authorizationHeader := req.Header.Get("Authorization")
if authorizationHeader != "" {
logger.Log("message", "get auth header", authorizationHeader)
// Split up the Authorization header by space to get the part of Bearer
parts := strings.Split(authorizationHeader, "Bearer")
logger.Log("authHeaderParts", strings.Join(parts, ","))
if len(parts) == 2 {
token := strings.TrimSpace(parts[1])
logger.Log("token", token)
allowedTokensList := strings.Split(*allowedTurboTokens, ",")
if isElementExist(allowedTokensList, token) {
isAccepted = true
} else {
logger.Log("message", "the token passed via --turbo-token is missing the received token", "receivedToken", token, "allowedTokens", *allowedTurboTokens)
}
}
}
// if iAccepted is true we run the next http handler, if not we return a 403
if isAccepted {
logger.Log("message", "TURBO_TOKEN token found in allowance token list")
next.ServeHTTP(res, req)
} else {
logger.Log("message", "missing TURBO_TOKEN")
res.WriteHeader(http.StatusUnauthorized)
res.Header().Set("Content-Type", "application/json")
res.Write([]byte(`{"error":{"message":"no permission to access endpoint with given TURBO_TOKEN","code":"permission_denied"}}`))
return
}
}
return http.HandlerFunc(fn)
}
}
func LoggingMiddleware(logger log.Logger) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
w.WriteHeader(http.StatusInternalServerError)
logger.Log(
"err", err,
"trace", debug.Stack(),
)
}
}()
start := time.Now()
wrapped := wrapResponseWriter(w)
next.ServeHTTP(wrapped, r)
logger.Log(
"status", wrapped.status,
"method", r.Method,
"path", r.URL.EscapedPath(),
"duration", time.Since(start),
)
}
return http.HandlerFunc(fn)
}
}
| [
"\"LISTEN_ADDRESS\""
]
| []
| [
"LISTEN_ADDRESS"
]
| [] | ["LISTEN_ADDRESS"] | go | 1 | 0 | |
cmd/influx/cli/cli.go | // Package cli contains the logic of the influx command line client.
package cli // import "github.com/influxdata/influxdb/cmd/influx/cli"
import (
"bytes"
"context"
"encoding/csv"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"os/signal"
"path/filepath"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"syscall"
"text/tabwriter"
"golang.org/x/crypto/ssh/terminal"
"github.com/influxdata/influxdb/client"
"github.com/influxdata/influxdb/importer/v8"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxql"
"github.com/peterh/liner"
)
// ErrBlankCommand is returned when a parsed command is empty.
var ErrBlankCommand = errors.New("empty input")
// CommandLine holds CLI configuration and state.
type CommandLine struct {
Line *liner.State
Host string
Port int
Database string
Type QueryLanguage
Ssl bool
RetentionPolicy string
ClientVersion string
ServerVersion string
Pretty bool // controls pretty print for json
Format string // controls the output format. Valid values are json, csv, or column
Execute string
ShowVersion bool
Import bool
Chunked bool
ChunkSize int
NodeID int
Quit chan struct{}
IgnoreSignals bool // Ignore signals normally caught by this process (used primarily for testing)
ForceTTY bool // Force the CLI to act as if it were connected to a TTY
osSignals chan os.Signal
historyFilePath string
Client *client.Client
ClientConfig client.Config // Client config options.
ImporterConfig v8.Config // Importer configuration options.
}
// New returns an instance of CommandLine with the specified client version.
func New(version string) *CommandLine {
return &CommandLine{
ClientVersion: version,
Quit: make(chan struct{}, 1),
osSignals: make(chan os.Signal, 1),
Chunked: true,
}
}
// Run executes the CLI.
func (c *CommandLine) Run() error {
hasTTY := c.ForceTTY || terminal.IsTerminal(int(os.Stdin.Fd()))
var promptForPassword bool
// determine if they set the password flag but provided no value
for _, v := range os.Args {
v = strings.ToLower(v)
if (strings.HasPrefix(v, "-password") || strings.HasPrefix(v, "--password")) && c.ClientConfig.Password == "" {
promptForPassword = true
break
}
}
// Check if we will be able to prompt for the password later.
if promptForPassword && !hasTTY {
return errors.New("unable to prompt for a password with no TTY")
}
// Read environment variables for username/password.
if c.ClientConfig.Username == "" {
c.ClientConfig.Username = os.Getenv("INFLUX_USERNAME")
}
// If we are going to be prompted for a password, always use the entered password.
if promptForPassword {
// Open the liner (temporarily) and prompt for the password.
p, e := func() (string, error) {
l := liner.NewLiner()
defer l.Close()
return l.PasswordPrompt("password: ")
}()
if e != nil {
return errors.New("Unable to parse password")
}
c.ClientConfig.Password = p
} else if c.ClientConfig.Password == "" {
c.ClientConfig.Password = os.Getenv("INFLUX_PASSWORD")
}
if err := c.Connect(""); err != nil {
msg := "Please check your connection settings and ensure 'influxd' is running."
if !c.Ssl && strings.Contains(err.Error(), "malformed HTTP response") {
// Attempt to connect with SSL and disable secure SSL for this test.
c.Ssl = true
unsafeSsl := c.ClientConfig.UnsafeSsl
c.ClientConfig.UnsafeSsl = true
if err := c.Connect(""); err == nil {
msg = "Please use the -ssl flag to connect using SSL."
}
c.Ssl = false
c.ClientConfig.UnsafeSsl = unsafeSsl
} else if c.Ssl && !c.ClientConfig.UnsafeSsl && strings.Contains(err.Error(), "certificate is valid for") {
// Attempt to connect with an insecure connection just to see if it works.
c.ClientConfig.UnsafeSsl = true
if err := c.Connect(""); err == nil {
msg = "You may use -unsafeSsl to connect anyway, but the SSL connection will not be secure."
}
c.ClientConfig.UnsafeSsl = false
}
return fmt.Errorf("Failed to connect to %s: %s\n%s", c.Client.Addr(), err.Error(), msg)
}
// Modify precision.
c.SetPrecision(c.ClientConfig.Precision)
if c.Execute != "" {
switch c.Type {
case QueryLanguageFlux:
return c.ExecuteFluxQuery(c.Execute)
default:
// Make the non-interactive mode send everything through the CLI's parser
// the same way the interactive mode works
lines := strings.Split(c.Execute, "\n")
for _, line := range lines {
if err := c.ParseCommand(line); err != nil {
return err
}
}
}
return nil
}
if c.Import {
addr := net.JoinHostPort(c.Host, strconv.Itoa(c.Port))
u, e := client.ParseConnectionString(addr, c.Ssl)
if e != nil {
return e
}
// Copy the latest importer config and inject the latest client config
// into it.
config := c.ImporterConfig
config.Config = c.ClientConfig
config.URL = u
i := v8.NewImporter(config)
if err := i.Import(); err != nil {
err = fmt.Errorf("ERROR: %s", err)
return err
}
return nil
}
if !hasTTY {
cmd, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
switch c.Type {
case QueryLanguageFlux:
return c.ExecuteFluxQuery(string(cmd))
default:
return c.ExecuteQuery(string(cmd))
}
}
if !c.IgnoreSignals {
// register OS signals for graceful termination
signal.Notify(c.osSignals, syscall.SIGINT, syscall.SIGTERM)
}
if len(c.ServerVersion) == 0 {
fmt.Printf("WARN: Connected to %s, but found no server version.\n", c.Client.Addr())
fmt.Printf("Are you sure an InfluxDB server is listening at the given address?\n")
} else {
fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.ServerVersion)
}
c.Version()
if c.Type == QueryLanguageFlux {
repl, err := getFluxREPL(c.Host, c.Port, c.Ssl, c.ClientConfig.Username, c.ClientConfig.Password)
if err != nil {
return err
}
repl.Run()
os.Exit(0)
}
c.Line = liner.NewLiner()
defer c.Line.Close()
c.Line.SetMultiLineMode(true)
// Only load/write history if HOME environment variable is set.
var historyDir string
if runtime.GOOS == "windows" {
if userDir := os.Getenv("USERPROFILE"); userDir != "" {
historyDir = userDir
}
}
if homeDir := os.Getenv("HOME"); homeDir != "" {
historyDir = homeDir
}
// Attempt to load the history file.
if historyDir != "" {
c.historyFilePath = filepath.Join(historyDir, ".influx_history")
if historyFile, err := os.Open(c.historyFilePath); err == nil {
c.Line.ReadHistory(historyFile)
historyFile.Close()
}
}
// read from prompt until exit is run
return c.mainLoop()
}
// mainLoop runs the main prompt loop for the CLI.
func (c *CommandLine) mainLoop() error {
for {
select {
case <-c.osSignals:
c.exit()
return nil
case <-c.Quit:
c.exit()
return nil
default:
l, e := c.Line.Prompt("> ")
if e == io.EOF {
// Instead of die, register that someone exited the program gracefully
l = "exit"
} else if e != nil {
c.exit()
return e
}
if err := c.ParseCommand(l); err != ErrBlankCommand && !strings.HasPrefix(strings.TrimSpace(l), "auth") {
l = influxql.Sanitize(l)
c.Line.AppendHistory(l)
c.saveHistory()
}
}
}
}
// ParseCommand parses an instruction and calls the related method
// or executes the command as a query against InfluxDB.
func (c *CommandLine) ParseCommand(cmd string) error {
lcmd := strings.TrimSpace(strings.ToLower(cmd))
tokens := strings.Fields(lcmd)
if len(tokens) > 0 {
switch tokens[0] {
case "exit", "quit":
close(c.Quit)
case "gopher":
c.gopher()
case "connect":
return c.Connect(cmd)
case "auth":
c.SetAuth(cmd)
case "help":
c.help()
case "history":
c.history()
case "format":
c.SetFormat(cmd)
case "precision":
c.SetPrecision(cmd)
case "consistency":
c.SetWriteConsistency(cmd)
case "settings":
c.Settings()
case "chunked":
c.Chunked = !c.Chunked
if c.Chunked {
fmt.Println("chunked responses enabled")
} else {
fmt.Println("chunked reponses disabled")
}
case "chunk":
c.SetChunkSize(cmd)
case "pretty":
c.Pretty = !c.Pretty
if c.Pretty {
fmt.Println("Pretty print enabled")
} else {
fmt.Println("Pretty print disabled")
}
case "use":
c.use(cmd)
case "node":
c.node(cmd)
case "insert":
return c.Insert(cmd)
case "clear":
c.clear(cmd)
default:
return c.ExecuteQuery(cmd)
}
return nil
}
return ErrBlankCommand
}
// Connect connects to a server.
func (c *CommandLine) Connect(cmd string) error {
// normalize cmd
cmd = strings.ToLower(cmd)
// Remove the "connect" keyword if it exists
addr := strings.TrimSpace(strings.Replace(cmd, "connect", "", -1))
if addr == "" {
// If they didn't provide a connection string, use the current settings
addr = net.JoinHostPort(c.Host, strconv.Itoa(c.Port))
}
URL, err := client.ParseConnectionString(addr, c.Ssl)
if err != nil {
return err
}
// Create copy of the current client config and create a new client.
ClientConfig := c.ClientConfig
ClientConfig.UserAgent = "InfluxDBShell/" + c.ClientVersion
ClientConfig.URL = URL
ClientConfig.Proxy = http.ProxyFromEnvironment
client, err := client.NewClient(ClientConfig)
if err != nil {
return fmt.Errorf("Could not create client %s", err)
}
c.Client = client
_, v, err := c.Client.Ping()
if err != nil {
return err
}
c.ServerVersion = v
// Update the command with the current connection information
if host, port, err := net.SplitHostPort(ClientConfig.URL.Host); err == nil {
c.Host = host
if i, err := strconv.Atoi(port); err == nil {
c.Port = i
}
}
return nil
}
// SetAuth sets client authentication credentials.
func (c *CommandLine) SetAuth(cmd string) {
// If they pass in the entire command, we should parse it
// auth <username> <password>
args := strings.Fields(cmd)
if len(args) == 3 {
args = args[1:]
} else {
args = []string{}
}
if len(args) == 2 {
c.ClientConfig.Username = args[0]
c.ClientConfig.Password = args[1]
} else {
u, e := c.Line.Prompt("username: ")
if e != nil {
fmt.Printf("Unable to process input: %s", e)
return
}
c.ClientConfig.Username = strings.TrimSpace(u)
p, e := c.Line.PasswordPrompt("password: ")
if e != nil {
fmt.Printf("Unable to process input: %s", e)
return
}
c.ClientConfig.Password = p
}
// Update the client as well
c.Client.SetAuth(c.ClientConfig.Username, c.ClientConfig.Password)
}
func (c *CommandLine) clear(cmd string) {
args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ")
v := strings.ToLower(strings.Join(args[1:], " "))
switch v {
case "database", "db":
c.Database = ""
fmt.Println("database context cleared")
return
case "retention policy", "rp":
c.RetentionPolicy = ""
fmt.Println("retention policy context cleared")
return
default:
if len(args) > 1 {
fmt.Printf("invalid command %q.\n", v)
}
fmt.Println(`Possible commands for 'clear' are:
# Clear the database context
clear database
clear db
# Clear the retention policy context
clear retention policy
clear rp
`)
}
}
func (c *CommandLine) use(cmd string) {
args := strings.SplitAfterN(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ", 2)
if len(args) != 2 {
fmt.Printf("Could not parse database name from %q.\n", cmd)
return
}
stmt := args[1]
db, rp, err := parseDatabaseAndRetentionPolicy([]byte(stmt))
if err != nil {
fmt.Printf("Unable to parse database or retention policy from %s", stmt)
return
}
if !c.databaseExists(db) {
fmt.Println("DB does not exist!")
return
}
c.Database = db
fmt.Printf("Using database %s\n", db)
if rp != "" {
if !c.retentionPolicyExists(db, rp) {
return
}
c.RetentionPolicy = rp
fmt.Printf("Using retention policy %s\n", rp)
}
}
func (c *CommandLine) databaseExists(db string) bool {
// Validate if specified database exists
response, err := c.Client.Query(client.Query{Command: "SHOW DATABASES"})
if err != nil {
fmt.Printf("ERR: %s\n", err)
return false
} else if err := response.Error(); err != nil {
if c.ClientConfig.Username == "" {
fmt.Printf("ERR: %s\n", err)
return false
}
// TODO(jsternberg): Fix SHOW DATABASES to be user-aware #6397.
// If we are unable to run SHOW DATABASES, display a warning and use the
// database anyway in case the person doesn't have permission to run the
// command, but does have permission to use the database.
fmt.Printf("WARN: %s\n", err)
} else {
// Verify the provided database exists
if databaseExists := func() bool {
for _, result := range response.Results {
for _, row := range result.Series {
if row.Name == "databases" {
for _, values := range row.Values {
for _, database := range values {
if database == db {
return true
}
}
}
}
}
}
return false
}(); !databaseExists {
fmt.Printf("ERR: Database %s doesn't exist. Run SHOW DATABASES for a list of existing databases.\n", db)
return false
}
}
return true
}
func (c *CommandLine) retentionPolicyExists(db, rp string) bool {
// Validate if specified database exists
response, err := c.Client.Query(client.Query{Command: fmt.Sprintf("SHOW RETENTION POLICIES ON %q", db)})
if err != nil {
fmt.Printf("ERR: %s\n", err)
return false
} else if err := response.Error(); err != nil {
if c.ClientConfig.Username == "" {
fmt.Printf("ERR: %s\n", err)
return false
}
fmt.Printf("WARN: %s\n", err)
} else {
// Verify the provided database exists
if retentionPolicyExists := func() bool {
for _, result := range response.Results {
for _, row := range result.Series {
for _, values := range row.Values {
for i, v := range values {
if i != 0 {
continue
}
if v == rp {
return true
}
}
}
}
}
return false
}(); !retentionPolicyExists {
fmt.Printf("ERR: RETENTION POLICY %s doesn't exist. Run SHOW RETENTION POLICIES ON %q for a list of existing retention polices.\n", rp, db)
return false
}
}
return true
}
func (c *CommandLine) node(cmd string) {
args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ")
if len(args) != 2 {
fmt.Println("Improper number of arguments for 'node' command, requires exactly one.")
return
}
if args[1] == "clear" {
c.NodeID = 0
return
}
id, err := strconv.Atoi(args[1])
if err != nil {
fmt.Printf("Unable to parse node id from %s. Must be an integer or 'clear'.\n", args[1])
return
}
c.NodeID = id
}
// SetChunkSize sets the chunk size
// 0 sets it back to the default
func (c *CommandLine) SetChunkSize(cmd string) {
// normalize cmd
cmd = strings.ToLower(cmd)
cmd = strings.Join(strings.Fields(cmd), " ")
// Remove the "chunk size" keyword if it exists
cmd = strings.TrimPrefix(cmd, "chunk size ")
// Remove the "chunk" keyword if it exists
// allows them to use `chunk 50` as a shortcut
cmd = strings.TrimPrefix(cmd, "chunk ")
if n, err := strconv.ParseInt(cmd, 10, 64); err == nil {
c.ChunkSize = int(n)
if c.ChunkSize <= 0 {
c.ChunkSize = 0
}
fmt.Printf("chunk size set to %d\n", c.ChunkSize)
} else {
fmt.Printf("unable to parse chunk size from %q\n", cmd)
}
}
// SetPrecision sets client precision.
func (c *CommandLine) SetPrecision(cmd string) {
// normalize cmd
cmd = strings.ToLower(cmd)
// Remove the "precision" keyword if it exists
cmd = strings.TrimSpace(strings.Replace(cmd, "precision", "", -1))
switch cmd {
case "h", "m", "s", "ms", "u", "ns":
c.ClientConfig.Precision = cmd
c.Client.SetPrecision(c.ClientConfig.Precision)
case "rfc3339":
c.ClientConfig.Precision = ""
c.Client.SetPrecision(c.ClientConfig.Precision)
default:
fmt.Printf("Unknown precision %q. Please use rfc3339, h, m, s, ms, u or ns.\n", cmd)
}
}
// SetFormat sets output format.
func (c *CommandLine) SetFormat(cmd string) {
// normalize cmd
cmd = strings.ToLower(cmd)
// Remove the "format" keyword if it exists
cmd = strings.TrimSpace(strings.Replace(cmd, "format", "", -1))
switch cmd {
case "json", "csv", "column":
c.Format = cmd
default:
fmt.Printf("Unknown format %q. Please use json, csv, or column.\n", cmd)
}
}
// SetWriteConsistency sets write consistency level.
func (c *CommandLine) SetWriteConsistency(cmd string) {
// normalize cmd
cmd = strings.ToLower(cmd)
// Remove the "consistency" keyword if it exists
cmd = strings.TrimSpace(strings.Replace(cmd, "consistency", "", -1))
_, err := models.ParseConsistencyLevel(cmd)
if err != nil {
fmt.Printf("Unknown consistency level %q. Please use any, one, quorum, or all.\n", cmd)
return
}
c.ClientConfig.WriteConsistency = cmd
}
// isWhitespace returns true if the rune is a space, tab, or newline.
func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' }
// isLetter returns true if the rune is a letter.
func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') }
// isDigit returns true if the rune is a digit.
func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') }
// isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer.
func isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' }
// isIdentChar returns true if the rune can be used in an unquoted identifier.
func isNotIdentChar(ch rune) bool { return !(isLetter(ch) || isDigit(ch) || ch == '_') }
func parseUnquotedIdentifier(stmt string) (string, string) {
if fields := strings.FieldsFunc(stmt, isNotIdentChar); len(fields) > 0 {
return fields[0], strings.TrimPrefix(stmt, fields[0])
}
return "", stmt
}
func parseDoubleQuotedIdentifier(stmt string) (string, string) {
escapeNext := false
fields := strings.FieldsFunc(stmt, func(ch rune) bool {
if ch == '\\' {
escapeNext = true
} else if ch == '"' {
if !escapeNext {
return true
}
escapeNext = false
}
return false
})
if len(fields) > 0 {
return fields[0], strings.TrimPrefix(stmt, "\""+fields[0]+"\"")
}
return "", stmt
}
func parseNextIdentifier(stmt string) (ident, remainder string) {
if len(stmt) > 0 {
switch {
case isWhitespace(rune(stmt[0])):
return parseNextIdentifier(stmt[1:])
case isIdentFirstChar(rune(stmt[0])):
return parseUnquotedIdentifier(stmt)
case stmt[0] == '"':
return parseDoubleQuotedIdentifier(stmt)
}
}
return "", stmt
}
func (c *CommandLine) parseInto(stmt string) *client.BatchPoints {
ident, stmt := parseNextIdentifier(stmt)
db, rp := c.Database, c.RetentionPolicy
if strings.HasPrefix(stmt, ".") {
db = ident
ident, stmt = parseNextIdentifier(stmt[1:])
}
if strings.HasPrefix(stmt, " ") {
rp = ident
stmt = stmt[1:]
}
return &client.BatchPoints{
Points: []client.Point{
client.Point{Raw: stmt},
},
Database: db,
RetentionPolicy: rp,
Precision: c.ClientConfig.Precision,
WriteConsistency: c.ClientConfig.WriteConsistency,
}
}
func (c *CommandLine) parseInsert(stmt string) (*client.BatchPoints, error) {
i, point := parseNextIdentifier(stmt)
if !strings.EqualFold(i, "insert") {
return nil, fmt.Errorf("found %s, expected INSERT", i)
}
if i, r := parseNextIdentifier(point); strings.EqualFold(i, "into") {
bp := c.parseInto(r)
return bp, nil
}
return &client.BatchPoints{
Points: []client.Point{
client.Point{Raw: point},
},
Database: c.Database,
RetentionPolicy: c.RetentionPolicy,
Precision: c.ClientConfig.Precision,
WriteConsistency: c.ClientConfig.WriteConsistency,
}, nil
}
// Insert runs an INSERT statement.
func (c *CommandLine) Insert(stmt string) error {
bp, err := c.parseInsert(stmt)
if err != nil {
fmt.Printf("ERR: %s\n", err)
return nil
}
if _, err := c.Client.Write(*bp); err != nil {
fmt.Printf("ERR: %s\n", err)
if c.Database == "" {
fmt.Println("Note: error may be due to not setting a database or retention policy.")
fmt.Println(`Please set a database with the command "use <database>" or`)
fmt.Println("INSERT INTO <database>.<retention-policy> <point>")
}
}
return nil
}
// query creates a query struct to be used with the client.
func (c *CommandLine) query(query string) client.Query {
return client.Query{
Command: query,
Database: c.Database,
RetentionPolicy: c.RetentionPolicy,
Chunked: c.Chunked,
ChunkSize: c.ChunkSize,
NodeID: c.NodeID,
}
}
// ExecuteQuery runs any query statement.
func (c *CommandLine) ExecuteQuery(query string) error {
// If we have a retention policy, we need to rewrite the statement sources
if c.RetentionPolicy != "" {
pq, err := influxql.NewParser(strings.NewReader(query)).ParseQuery()
if err != nil {
fmt.Printf("ERR: %s\n", err)
return err
}
for _, stmt := range pq.Statements {
if selectStatement, ok := stmt.(*influxql.SelectStatement); ok {
influxql.WalkFunc(selectStatement.Sources, func(n influxql.Node) {
if t, ok := n.(*influxql.Measurement); ok {
if t.Database == "" && c.Database != "" {
t.Database = c.Database
}
if t.RetentionPolicy == "" && c.RetentionPolicy != "" {
t.RetentionPolicy = c.RetentionPolicy
}
}
})
}
}
query = pq.String()
}
ctx := context.Background()
if !c.IgnoreSignals {
done := make(chan struct{})
defer close(done)
var cancel func()
ctx, cancel = context.WithCancel(ctx)
go func() {
select {
case <-done:
case <-c.osSignals:
cancel()
}
}()
}
response, err := c.Client.QueryContext(ctx, c.query(query))
if err != nil {
if err.Error() == "" {
err = ctx.Err()
if err == context.Canceled {
err = errors.New("aborted by user")
} else if err == nil {
err = errors.New("no data received")
}
}
fmt.Printf("ERR: %s\n", err)
return err
}
c.FormatResponse(response, os.Stdout)
if err := response.Error(); err != nil {
fmt.Printf("ERR: %s\n", response.Error())
if c.Database == "" {
fmt.Println("Warning: It is possible this error is due to not setting a database.")
fmt.Println(`Please set a database with the command "use <database>".`)
}
return err
}
return nil
}
// FormatResponse formats output to the previously chosen format.
func (c *CommandLine) FormatResponse(response *client.Response, w io.Writer) {
switch c.Format {
case "json":
c.writeJSON(response, w)
case "csv":
c.writeCSV(response, w)
case "column":
c.writeColumns(response, w)
default:
fmt.Fprintf(w, "Unknown output format %q.\n", c.Format)
}
}
func (c *CommandLine) writeJSON(response *client.Response, w io.Writer) {
var data []byte
var err error
if c.Pretty {
data, err = json.MarshalIndent(response, "", " ")
} else {
data, err = json.Marshal(response)
}
if err != nil {
fmt.Fprintf(w, "Unable to parse json: %s\n", err)
return
}
fmt.Fprintln(w, string(data))
}
func tagsEqual(prev, current map[string]string) bool {
return reflect.DeepEqual(prev, current)
}
func columnsEqual(prev, current []string) bool {
return reflect.DeepEqual(prev, current)
}
func headersEqual(prev, current models.Row) bool {
if prev.Name != current.Name {
return false
}
return tagsEqual(prev.Tags, current.Tags) && columnsEqual(prev.Columns, current.Columns)
}
func (c *CommandLine) writeCSV(response *client.Response, w io.Writer) {
csvw := csv.NewWriter(w)
var previousHeaders models.Row
for _, result := range response.Results {
suppressHeaders := len(result.Series) > 0 && headersEqual(previousHeaders, result.Series[0])
if !suppressHeaders && len(result.Series) > 0 {
previousHeaders = models.Row{
Name: result.Series[0].Name,
Tags: result.Series[0].Tags,
Columns: result.Series[0].Columns,
}
}
// Create a tabbed writer for each result as they won't always line up
rows := c.formatResults(result, "\t", suppressHeaders)
for _, r := range rows {
csvw.Write(strings.Split(r, "\t"))
}
}
csvw.Flush()
}
func (c *CommandLine) writeColumns(response *client.Response, w io.Writer) {
// Create a tabbed writer for each result as they won't always line up
writer := new(tabwriter.Writer)
writer.Init(w, 0, 8, 1, ' ', 0)
var previousHeaders models.Row
for i, result := range response.Results {
// Print out all messages first
for _, m := range result.Messages {
fmt.Fprintf(w, "%s: %s.\n", m.Level, m.Text)
}
// Check to see if the headers are the same as the previous row. If so, suppress them in the output
suppressHeaders := len(result.Series) > 0 && headersEqual(previousHeaders, result.Series[0])
if !suppressHeaders && len(result.Series) > 0 {
previousHeaders = models.Row{
Name: result.Series[0].Name,
Tags: result.Series[0].Tags,
Columns: result.Series[0].Columns,
}
}
// If we are suppressing headers, don't output the extra line return. If we
// aren't suppressing headers, then we put out line returns between results
// (not before the first result, and not after the last result).
if !suppressHeaders && i > 0 {
fmt.Fprintln(writer, "")
}
rows := c.formatResults(result, "\t", suppressHeaders)
for _, r := range rows {
fmt.Fprintln(writer, r)
}
}
writer.Flush()
}
// formatResults will behave differently if you are formatting for columns or csv
func (c *CommandLine) formatResults(result client.Result, separator string, suppressHeaders bool) []string {
rows := []string{}
// Create a tabbed writer for each result as they won't always line up
for i, row := range result.Series {
// gather tags
tags := []string{}
for k, v := range row.Tags {
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
sort.Strings(tags)
}
columnNames := []string{}
// Only put name/tags in a column if format is csv
if c.Format == "csv" {
if len(tags) > 0 {
columnNames = append([]string{"tags"}, columnNames...)
}
if row.Name != "" {
columnNames = append([]string{"name"}, columnNames...)
}
}
columnNames = append(columnNames, row.Columns...)
// Output a line separator if we have more than one set or results and format is column
if i > 0 && c.Format == "column" && !suppressHeaders {
rows = append(rows, "")
}
// If we are column format, we break out the name/tag to separate lines
if c.Format == "column" && !suppressHeaders {
if row.Name != "" {
n := fmt.Sprintf("name: %s", row.Name)
rows = append(rows, n)
}
if len(tags) > 0 {
t := fmt.Sprintf("tags: %s", (strings.Join(tags, ", ")))
rows = append(rows, t)
}
}
if !suppressHeaders {
rows = append(rows, strings.Join(columnNames, separator))
}
// if format is column, write dashes under each column
if c.Format == "column" && !suppressHeaders {
lines := []string{}
for _, columnName := range columnNames {
lines = append(lines, strings.Repeat("-", len(columnName)))
}
rows = append(rows, strings.Join(lines, separator))
}
for _, v := range row.Values {
var values []string
if c.Format == "csv" {
if row.Name != "" {
values = append(values, row.Name)
}
if len(tags) > 0 {
values = append(values, strings.Join(tags, ","))
}
}
for _, vv := range v {
values = append(values, interfaceToString(vv))
}
rows = append(rows, strings.Join(values, separator))
}
}
return rows
}
func interfaceToString(v interface{}) string {
switch t := v.(type) {
case nil:
return ""
case bool:
return fmt.Sprintf("%v", v)
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr:
return fmt.Sprintf("%d", t)
case float32, float64:
return fmt.Sprintf("%v", t)
default:
return fmt.Sprintf("%v", t)
}
}
// Settings prints current settings.
func (c *CommandLine) Settings() {
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 1, 1, ' ', 0)
fmt.Fprintln(w, "Setting\tValue")
fmt.Fprintln(w, "--------\t--------")
if c.Port > 0 {
fmt.Fprintf(w, "Host\t%s:%d\n", c.Host, c.Port)
} else {
fmt.Fprintf(w, "Host\t%s\n", c.Host)
}
fmt.Fprintf(w, "Username\t%s\n", c.ClientConfig.Username)
fmt.Fprintf(w, "Database\t%s\n", c.Database)
fmt.Fprintf(w, "RetentionPolicy\t%s\n", c.RetentionPolicy)
fmt.Fprintf(w, "Pretty\t%v\n", c.Pretty)
fmt.Fprintf(w, "Format\t%s\n", c.Format)
fmt.Fprintf(w, "Write Consistency\t%s\n", c.ClientConfig.WriteConsistency)
fmt.Fprintf(w, "Chunked\t%v\n", c.Chunked)
fmt.Fprintf(w, "Chunk Size\t%d\n", c.ChunkSize)
fmt.Fprintln(w)
w.Flush()
}
func (c *CommandLine) help() {
fmt.Println(`Usage:
connect <host:port> connects to another node specified by host:port
auth prompts for username and password
pretty toggles pretty print for the json format
chunked turns on chunked responses from server
chunk size <size> sets the size of the chunked responses. Set to 0 to reset to the default chunked size
use <db_name> sets current database
format <format> specifies the format of the server responses: json, csv, or column
precision <format> specifies the format of the timestamp: rfc3339, h, m, s, ms, u or ns
consistency <level> sets write consistency level: any, one, quorum, or all
history displays command history
settings outputs the current settings for the shell
clear clears settings such as database or retention policy. run 'clear' for help
exit/quit/ctrl+d quits the influx shell
show databases show database names
show series show series information
show measurements show measurement information
show tag keys show tag key information
show field keys show field key information
A full list of influxql commands can be found at:
https://docs.influxdata.com/influxdb/latest/query_language/spec/`)
}
func (c *CommandLine) history() {
var buf bytes.Buffer
c.Line.WriteHistory(&buf)
fmt.Print(buf.String())
}
func (c *CommandLine) saveHistory() {
if c.historyFilePath == "" {
return
}
if historyFile, err := os.Create(c.historyFilePath); err != nil {
fmt.Printf("There was an error writing history file: %s\n", err)
} else {
c.Line.WriteHistory(historyFile)
historyFile.Close()
}
}
func (c *CommandLine) gopher() {
fmt.Println(`
.-::-::://:-::- .:/++/'
'://:-''/oo+//++o+/.://o- ./+:
.:-. '++- .o/ '+yydhy' o-
.:/. .h: :osoys .smMN- :/
-/:.' s- /MMMymh. '/y/ s'
-+s:'''' d -mMMms// '-/o:
-/++/++/////:. o: '... s- :s.
:+-+s-' ':/' 's- /+ 'o:
'+-'o: /ydhsh. '//. '-o- o-
.y. o: .MMMdm+y ':+++:::/+:.' s:
.-h/ y- 'sdmds'h -+ydds:::-.' 'h.
.//-.d' o: '.' 'dsNMMMNh:.:++' :y
+y. 'd 's. .s:mddds: ++ o/
'N- odd 'o/. './o-s-' .---+++' o-
'N' yNd .://:/:::::. -s -+/s/./s' 'o/'
so' .h '''' ////s: '+. .s +y'
os/-.y' 's' 'y::+ +d'
'.:o/ -+:-:.' so.---.'
o' 'd-.''/s'
.s' :y.''.y
-s mo:::'
:: yh
// '''' /M'
o+ .s///:/. 'N:
:+ /: -s' ho
's- -/s/:+/.+h' +h
ys' ':' '-. -d
oh .h
/o .s
s. .h
-y .d
m/ -h
+d /o
'N- y:
h: m.
s- -d
o- s+
+- 'm'
s/ oo--.
y- /s ':+'
s' 'od--' .d:
-+ ':o: ':+-/+
y- .:+- '
//o- '.:+/.
.-:+/' ''-/+/.
./:' ''.:o+/-'
.+o:/:/+-' ''.-+ooo/-'
o: -h///++////-.
/: .o/
//+ 'y
./sooy.`)
}
// Version prints the CLI version.
func (c *CommandLine) Version() {
fmt.Println("InfluxDB shell version:", c.ClientVersion)
}
func (c *CommandLine) exit() {
// write to history file
c.saveHistory()
// release line resources
c.Line.Close()
c.Line = nil
}
func (c *CommandLine) ExecuteFluxQuery(query string) error {
ctx := context.Background()
if !c.IgnoreSignals {
done := make(chan struct{})
defer close(done)
var cancel func()
ctx, cancel = context.WithCancel(ctx)
go func() {
select {
case <-done:
case <-c.osSignals:
cancel()
}
}()
}
repl, err := getFluxREPL(c.Host, c.Port, c.Ssl, c.ClientConfig.Username, c.ClientConfig.Password)
if err != nil {
return err
}
return repl.Input(query)
}
type QueryLanguage uint8
const (
QueryLanguageInfluxQL QueryLanguage = iota
QueryLanguageFlux
)
func (l *QueryLanguage) Set(s string) error {
switch s {
case "influxql":
*l = QueryLanguageInfluxQL
case "flux":
*l = QueryLanguageFlux
default:
return fmt.Errorf("%q not supported: specify influxql or flux", s)
}
return nil
}
func (l *QueryLanguage) String() string {
switch *l {
case QueryLanguageInfluxQL:
return "influxql"
case QueryLanguageFlux:
return "flux"
}
return fmt.Sprintf("QueryLanguage(%d)", uint8(*l))
}
| [
"\"INFLUX_USERNAME\"",
"\"INFLUX_PASSWORD\"",
"\"USERPROFILE\"",
"\"HOME\""
]
| []
| [
"INFLUX_USERNAME",
"HOME",
"USERPROFILE",
"INFLUX_PASSWORD"
]
| [] | ["INFLUX_USERNAME", "HOME", "USERPROFILE", "INFLUX_PASSWORD"] | go | 4 | 0 | |
src/cmd/internal/goobj/goobj_test.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package goobj
import (
"debug/elf"
"debug/macho"
"debug/pe"
"fmt"
"internal/testenv"
"internal/xcoff"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
)
var (
buildDir string
go1obj string
go2obj string
goarchive string
cgoarchive string
)
func TestMain(m *testing.M) {
if !testenv.HasGoBuild() {
return
}
if err := buildGoobj(); err != nil {
fmt.Println(err)
os.RemoveAll(buildDir)
os.Exit(1)
}
exit := m.Run()
os.RemoveAll(buildDir)
os.Exit(exit)
}
func copyDir(dst, src string) error {
err := os.MkdirAll(dst, 0777)
if err != nil {
return err
}
fis, err := ioutil.ReadDir(src)
if err != nil {
return err
}
for _, fi := range fis {
err = copyFile(filepath.Join(dst, fi.Name()), filepath.Join(src, fi.Name()))
if err != nil {
return err
}
}
return nil
}
func copyFile(dst, src string) (err error) {
var s, d *os.File
s, err = os.Open(src)
if err != nil {
return err
}
defer s.Close()
d, err = os.Create(dst)
if err != nil {
return err
}
defer func() {
e := d.Close()
if err == nil {
err = e
}
}()
_, err = io.Copy(d, s)
if err != nil {
return err
}
return nil
}
func buildGoobj() error {
var err error
buildDir, err = ioutil.TempDir("", "TestGoobj")
if err != nil {
return err
}
go1obj = filepath.Join(buildDir, "go1.o")
go2obj = filepath.Join(buildDir, "go2.o")
goarchive = filepath.Join(buildDir, "go.a")
gotool, err := testenv.GoTool()
if err != nil {
return err
}
go1src := filepath.Join("testdata", "go1.go")
go2src := filepath.Join("testdata", "go2.go")
out, err := exec.Command(gotool, "tool", "compile", "-o", go1obj, go1src).CombinedOutput()
if err != nil {
return fmt.Errorf("go tool compile -o %s %s: %v\n%s", go1obj, go1src, err, out)
}
out, err = exec.Command(gotool, "tool", "compile", "-o", go2obj, go2src).CombinedOutput()
if err != nil {
return fmt.Errorf("go tool compile -o %s %s: %v\n%s", go2obj, go2src, err, out)
}
out, err = exec.Command(gotool, "tool", "pack", "c", goarchive, go1obj, go2obj).CombinedOutput()
if err != nil {
return fmt.Errorf("go tool pack c %s %s %s: %v\n%s", goarchive, go1obj, go2obj, err, out)
}
if testenv.HasCGO() {
gopath := filepath.Join(buildDir, "gopath")
err = copyDir(filepath.Join(gopath, "src", "mycgo"), filepath.Join("testdata", "mycgo"))
if err == nil {
err = ioutil.WriteFile(filepath.Join(gopath, "src", "mycgo", "go.mod"), []byte("module mycgo\n"), 0666)
}
if err != nil {
return err
}
cmd := exec.Command(gotool, "install", "-gcflags=all="+os.Getenv("GO_GCFLAGS"), "mycgo")
cmd.Dir = filepath.Join(gopath, "src", "mycgo")
cmd.Env = append(os.Environ(), "GOPATH="+gopath)
out, err = cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("go install mycgo: %v\n%s", err, out)
}
pat := filepath.Join(gopath, "pkg", "*", "mycgo.a")
ms, err := filepath.Glob(pat)
if err != nil {
return err
}
if len(ms) == 0 {
return fmt.Errorf("cannot found paths for pattern %s", pat)
}
cgoarchive = ms[0]
}
return nil
}
func TestParseGoobj(t *testing.T) {
path := go1obj
f, err := os.Open(path)
if err != nil {
t.Fatal(err)
}
defer f.Close()
p, err := Parse(f, "mypkg")
if err != nil {
t.Fatal(err)
}
if p.Arch != runtime.GOARCH {
t.Errorf("%s: got %v, want %v", path, p.Arch, runtime.GOARCH)
}
var found bool
for _, s := range p.Syms {
if s.Name == "mypkg.go1" {
found = true
break
}
}
if !found {
t.Errorf(`%s: symbol "mypkg.go1" not found`, path)
}
}
func TestParseArchive(t *testing.T) {
path := goarchive
f, err := os.Open(path)
if err != nil {
t.Fatal(err)
}
defer f.Close()
p, err := Parse(f, "mypkg")
if err != nil {
t.Fatal(err)
}
if p.Arch != runtime.GOARCH {
t.Errorf("%s: got %v, want %v", path, p.Arch, runtime.GOARCH)
}
var found1 bool
var found2 bool
for _, s := range p.Syms {
if s.Name == "mypkg.go1" {
found1 = true
}
if s.Name == "mypkg.go2" {
found2 = true
}
}
if !found1 {
t.Errorf(`%s: symbol "mypkg.go1" not found`, path)
}
if !found2 {
t.Errorf(`%s: symbol "mypkg.go2" not found`, path)
}
}
func TestParseCGOArchive(t *testing.T) {
testenv.MustHaveCGO(t)
path := cgoarchive
f, err := os.Open(path)
if err != nil {
t.Fatal(err)
}
defer f.Close()
p, err := Parse(f, "mycgo")
if err != nil {
t.Fatal(err)
}
if p.Arch != runtime.GOARCH {
t.Errorf("%s: got %v, want %v", path, p.Arch, runtime.GOARCH)
}
var found1 bool
var found2 bool
for _, s := range p.Syms {
if s.Name == "mycgo.go1" {
found1 = true
}
if s.Name == "mycgo.go2" {
found2 = true
}
}
if !found1 {
t.Errorf(`%s: symbol "mycgo.go1" not found`, path)
}
if !found2 {
t.Errorf(`%s: symbol "mycgo.go2" not found`, path)
}
c1 := "c1"
c2 := "c2"
found1 = false
found2 = false
switch runtime.GOOS {
case "darwin":
c1 = "_" + c1
c2 = "_" + c2
for _, obj := range p.Native {
mf, err := macho.NewFile(obj)
if err != nil {
t.Fatal(err)
}
if mf.Symtab == nil {
continue
}
for _, s := range mf.Symtab.Syms {
switch s.Name {
case c1:
found1 = true
case c2:
found2 = true
}
}
}
case "windows":
if runtime.GOARCH == "386" {
c1 = "_" + c1
c2 = "_" + c2
}
for _, obj := range p.Native {
pf, err := pe.NewFile(obj)
if err != nil {
t.Fatal(err)
}
for _, s := range pf.Symbols {
switch s.Name {
case c1:
found1 = true
case c2:
found2 = true
}
}
}
case "aix":
c1 = "." + c1
c2 = "." + c2
for _, obj := range p.Native {
xf, err := xcoff.NewFile(obj)
if err != nil {
t.Fatal(err)
}
for _, s := range xf.Symbols {
switch s.Name {
case c1:
found1 = true
case c2:
found2 = true
}
}
}
default:
for _, obj := range p.Native {
ef, err := elf.NewFile(obj)
if err != nil {
t.Fatal(err)
}
syms, err := ef.Symbols()
if err != nil {
t.Fatal(err)
}
for _, s := range syms {
switch s.Name {
case c1:
found1 = true
case c2:
found2 = true
}
}
}
}
if !found1 {
t.Errorf(`%s: symbol %q not found`, path, c1)
}
if !found2 {
t.Errorf(`%s: symbol %q not found`, path, c2)
}
}
| [
"\"GO_GCFLAGS\""
]
| []
| [
"GO_GCFLAGS"
]
| [] | ["GO_GCFLAGS"] | go | 1 | 0 | |
rpc/namespaces/personal/api.go | package personal
import (
"bytes"
"context"
"fmt"
"os"
"time"
"github.com/tendermint/tendermint/libs/log"
sdkcrypto "github.com/cosmos/cosmos-sdk/crypto"
"github.com/cosmos/cosmos-sdk/crypto/keyring"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/cosmos/ethermint/crypto/ethsecp256k1"
"github.com/cosmos/ethermint/crypto/hd"
"github.com/cosmos/ethermint/rpc/namespaces/eth"
rpctypes "github.com/cosmos/ethermint/rpc/types"
)
// PrivateAccountAPI is the personal_ prefixed set of APIs in the Web3 JSON-RPC spec.
type PrivateAccountAPI struct {
ethAPI *eth.PublicEthereumAPI
logger log.Logger
keyInfos []keyring.Info // all keys, both locked and unlocked. unlocked keys are stored in ethAPI.keys
}
// NewAPI creates an instance of the public Personal Eth API.
func NewAPI(ethAPI *eth.PublicEthereumAPI) *PrivateAccountAPI {
api := &PrivateAccountAPI{
ethAPI: ethAPI,
logger: log.NewTMLogger(log.NewSyncWriter(os.Stdout)).With("module", "json-rpc", "namespace", "personal"),
}
err := api.ethAPI.GetKeyringInfo()
if err != nil {
return api
}
api.keyInfos, err = api.ethAPI.ClientCtx().Keyring.List()
if err != nil {
return api
}
return api
}
// ImportRawKey armors and encrypts a given raw hex encoded ECDSA key and stores it into the key directory.
// The name of the key will have the format "personal_<length-keys>", where <length-keys> is the total number of
// keys stored on the keyring.
// NOTE: The key will be both armored and encrypted using the same passphrase.
func (api *PrivateAccountAPI) ImportRawKey(privkey, password string) (common.Address, error) {
api.logger.Debug("personal_importRawKey")
priv, err := crypto.HexToECDSA(privkey)
if err != nil {
return common.Address{}, err
}
privKey := ðsecp256k1.PrivKey{Key: crypto.FromECDSA(priv)}
armor := sdkcrypto.EncryptArmorPrivKey(privKey, password, ethsecp256k1.KeyType)
// ignore error as we only care about the length of the list
list, _ := api.ethAPI.ClientCtx().Keyring.List()
privKeyName := fmt.Sprintf("personal_%d", len(list))
if err := api.ethAPI.ClientCtx().Keyring.ImportPrivKey(privKeyName, armor, password); err != nil {
return common.Address{}, err
}
addr := common.BytesToAddress(privKey.PubKey().Address().Bytes())
info, err := api.ethAPI.ClientCtx().Keyring.Key(privKeyName)
if err != nil {
return common.Address{}, err
}
// append key and info to be able to lock and list the account
//api.ethAPI.keys = append(api.ethAPI.keys, privKey)
api.keyInfos = append(api.keyInfos, info)
api.logger.Info("key successfully imported", "name", privKeyName, "address", addr.String())
return addr, nil
}
// ListAccounts will return a list of addresses for accounts this node manages.
func (api *PrivateAccountAPI) ListAccounts() ([]common.Address, error) {
api.logger.Debug("personal_listAccounts")
addrs := []common.Address{}
for _, info := range api.keyInfos {
addressBytes := info.GetPubKey().Address().Bytes()
addrs = append(addrs, common.BytesToAddress(addressBytes))
}
return addrs, nil
}
// LockAccount will lock the account associated with the given address when it's unlocked.
// It removes the key corresponding to the given address from the API's local keys.
func (api *PrivateAccountAPI) LockAccount(address common.Address) bool {
api.logger.Debug("personal_lockAccount", "address", address.String())
keys := api.ethAPI.GetKeys()
for i, key := range keys {
if !bytes.Equal(key.PubKey().Address().Bytes(), address.Bytes()) {
continue
}
tmp := make([]ethsecp256k1.PrivKey, len(keys)-1)
copy(tmp[:i], keys[:i])
copy(tmp[i:], keys[i+1:])
api.ethAPI.SetKeys(tmp)
api.logger.Debug("account unlocked", "address", address.String())
return true
}
return false
}
// NewAccount will create a new account and returns the address for the new account.
func (api *PrivateAccountAPI) NewAccount(password string) (common.Address, error) {
api.logger.Debug("personal_newAccount")
name := "key_" + time.Now().UTC().Format(time.RFC3339)
info, _, err := api.ethAPI.ClientCtx().Keyring.NewMnemonic(name, keyring.English, password, hd.EthSecp256k1)
if err != nil {
return common.Address{}, err
}
api.keyInfos = append(api.keyInfos, info)
addr := common.BytesToAddress(info.GetPubKey().Address().Bytes())
api.logger.Info("Your new key was generated", "address", addr.String())
api.logger.Info("Please backup your key file!", "path", os.Getenv("HOME")+"/.ethermintd/"+name)
api.logger.Info("Please remember your password!")
return addr, nil
}
// UnlockAccount will unlock the account associated with the given address with
// the given password for duration seconds. If duration is nil it will use a
// default of 300 seconds. It returns an indication if the account was unlocked.
// It exports the private key corresponding to the given address from the keyring and stores it in the API's local keys.
func (api *PrivateAccountAPI) UnlockAccount(_ context.Context, addr common.Address, password string, _ *uint64) (bool, error) { // nolint: interfacer
api.logger.Debug("personal_unlockAccount", "address", addr.String())
// TODO: use duration
var keyInfo keyring.Info
for _, info := range api.keyInfos {
addressBytes := info.GetPubKey().Address().Bytes()
if bytes.Equal(addressBytes, addr[:]) {
keyInfo = info
break
}
}
if keyInfo == nil {
return false, fmt.Errorf("cannot find key with given address %s", addr.String())
}
// exporting private key only works on local keys
if keyInfo.GetType() != keyring.TypeLocal {
return false, fmt.Errorf("key type must be %s, got %s", keyring.TypeLedger.String(), keyInfo.GetType().String())
}
armor, err := api.ethAPI.ClientCtx().Keyring.ExportPrivKeyArmor(keyInfo.GetName(), password)
if err != nil {
return false, err
}
privKey, algo, err := sdkcrypto.UnarmorDecryptPrivKey(armor, password)
if err != nil {
return false, err
}
if algo != ethsecp256k1.KeyType {
return false, fmt.Errorf("invalid key algorithm, got %s, expected %s", algo, ethsecp256k1.KeyType)
}
ethermintPrivKey, ok := privKey.(*ethsecp256k1.PrivKey)
if !ok {
return false, fmt.Errorf("invalid private key type %T, expected %T", privKey, ðsecp256k1.PrivKey{})
}
api.ethAPI.SetKeys(append(api.ethAPI.GetKeys(), *ethermintPrivKey))
api.logger.Debug("account unlocked", "address", addr.String())
return true, nil
}
// SendTransaction will create a transaction from the given arguments and
// tries to sign it with the key associated with args.To. If the given password isn't
// able to decrypt the key it fails.
func (api *PrivateAccountAPI) SendTransaction(_ context.Context, args rpctypes.SendTxArgs, _ string) (common.Hash, error) {
return api.ethAPI.SendTransaction(args)
}
// Sign calculates an Ethereum ECDSA signature for:
// keccak256("\x19Ethereum Signed Message:\n" + len(message) + message))
//
// Note, the produced signature conforms to the secp256k1 curve R, S and V values,
// where the V value will be 27 or 28 for legacy reasons.
//
// The key used to calculate the signature is decrypted with the given password.
//
// https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_sign
func (api *PrivateAccountAPI) Sign(_ context.Context, data hexutil.Bytes, addr common.Address, _ string) (hexutil.Bytes, error) {
api.logger.Debug("personal_sign", "data", data, "address", addr.String())
key, ok := rpctypes.GetKeyByAddress(api.ethAPI.GetKeys(), addr)
if !ok {
return nil, fmt.Errorf("cannot find key with address %s", addr.String())
}
sig, err := crypto.Sign(accounts.TextHash(data), key.ToECDSA())
if err != nil {
return nil, err
}
sig[crypto.RecoveryIDOffset] += 27 // transform V from 0/1 to 27/28
return sig, nil
}
// EcRecover returns the address for the account that was used to create the signature.
// Note, this function is compatible with eth_sign and personal_sign. As such it recovers
// the address of:
// hash = keccak256("\x19Ethereum Signed Message:\n"${message length}${message})
// addr = ecrecover(hash, signature)
//
// Note, the signature must conform to the secp256k1 curve R, S and V values, where
// the V value must be 27 or 28 for legacy reasons.
//
// https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_ecRecove
func (api *PrivateAccountAPI) EcRecover(_ context.Context, data, sig hexutil.Bytes) (common.Address, error) {
api.logger.Debug("personal_ecRecover", "data", data, "sig", sig)
if len(sig) != crypto.SignatureLength {
return common.Address{}, fmt.Errorf("signature must be %d bytes long", crypto.SignatureLength)
}
if sig[crypto.RecoveryIDOffset] != 27 && sig[crypto.RecoveryIDOffset] != 28 {
return common.Address{}, fmt.Errorf("invalid Ethereum signature (V is not 27 or 28)")
}
sig[crypto.RecoveryIDOffset] -= 27 // Transform yellow paper V from 27/28 to 0/1
pubkey, err := crypto.SigToPub(accounts.TextHash(data), sig)
if err != nil {
return common.Address{}, err
}
return crypto.PubkeyToAddress(*pubkey), nil
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
source/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fiblist.conf.settings.base")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
test_package/conanfile.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
from conans import ConanFile, CMake, tools, RunEnvironment
import os
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
if self.settings.os == "Windows" and not self.options['pcre'].shared:
cmake.definitions['PCRE_STATIC'] = True
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
with tools.environment_append(RunEnvironment(self).vars):
bin_path = os.path.join("bin", "test_package")
arguments = "%sw+ Bincrafters" % ("\\" if self.settings.os == "Windows" else "\\\\")
if self.settings.os == "Windows":
self.run("%s %s" % (bin_path, arguments))
elif self.settings.os == "Macos":
self.run("DYLD_LIBRARY_PATH=%s %s %s" % (os.environ.get('DYLD_LIBRARY_PATH', ''), bin_path, arguments))
else:
self.run("LD_LIBRARY_PATH=%s %s %s" % (os.environ.get('LD_LIBRARY_PATH', ''), bin_path, arguments))
| []
| []
| [
"LD_LIBRARY_PATH",
"DYLD_LIBRARY_PATH"
]
| [] | ["LD_LIBRARY_PATH", "DYLD_LIBRARY_PATH"] | python | 2 | 0 | |
vendor/github.com/Microsoft/hcsshim/test/functional/test.go | package functional
import (
"context"
"os"
"os/exec"
"strconv"
"time"
"github.com/Microsoft/hcsshim/internal/cow"
"github.com/Microsoft/hcsshim/internal/hcsoci"
"github.com/sirupsen/logrus"
)
var pauseDurationOnCreateContainerFailure time.Duration
func init() {
if len(os.Getenv("HCSSHIM_FUNCTIONAL_TESTS_DEBUG")) > 0 {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetFormatter(&logrus.TextFormatter{FullTimestamp: true})
}
// This allows for debugging a utility VM.
s := os.Getenv("HCSSHIM_FUNCTIONAL_TESTS_PAUSE_ON_CREATECONTAINER_FAIL_IN_MINUTES")
if s != "" {
if t, err := strconv.Atoi(s); err == nil {
pauseDurationOnCreateContainerFailure = time.Duration(t) * time.Minute
}
}
// Try to stop any pre-existing compute processes
cmd := exec.Command("powershell", `get-computeprocess | stop-computeprocess -force`)
cmd.Run()
}
func CreateContainerTestWrapper(ctx context.Context, options *hcsoci.CreateOptions) (cow.Container, *hcsoci.Resources, error) {
if pauseDurationOnCreateContainerFailure != 0 {
options.DoNotReleaseResourcesOnFailure = true
}
s, r, err := hcsoci.CreateContainer(ctx, options)
if err != nil {
logrus.Warnf("Test is pausing for %s for debugging CreateContainer failure", pauseDurationOnCreateContainerFailure)
time.Sleep(pauseDurationOnCreateContainerFailure)
hcsoci.ReleaseResources(ctx, r, options.HostingSystem, true)
}
return s, r, err
}
| [
"\"HCSSHIM_FUNCTIONAL_TESTS_DEBUG\"",
"\"HCSSHIM_FUNCTIONAL_TESTS_PAUSE_ON_CREATECONTAINER_FAIL_IN_MINUTES\""
]
| []
| [
"HCSSHIM_FUNCTIONAL_TESTS_DEBUG",
"HCSSHIM_FUNCTIONAL_TESTS_PAUSE_ON_CREATECONTAINER_FAIL_IN_MINUTES"
]
| [] | ["HCSSHIM_FUNCTIONAL_TESTS_DEBUG", "HCSSHIM_FUNCTIONAL_TESTS_PAUSE_ON_CREATECONTAINER_FAIL_IN_MINUTES"] | go | 2 | 0 | |
examples/chat2/main.go | package main
import (
"context"
"crypto/rand"
"encoding/hex"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
mrand "math/rand"
"net"
"net/http"
"os"
"time"
"github.com/ethereum/go-ethereum/crypto"
logging "github.com/ipfs/go-log"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/protocol"
wakuprotocol "github.com/status-im/go-waku/waku/v2/protocol"
"github.com/multiformats/go-multiaddr"
"github.com/status-im/go-waku/waku/v2/dnsdisc"
"github.com/status-im/go-waku/waku/v2/node"
"github.com/status-im/go-waku/waku/v2/protocol/filter"
"github.com/status-im/go-waku/waku/v2/protocol/lightpush"
"github.com/status-im/go-waku/waku/v2/protocol/store"
)
var DefaultContentTopic string = wakuprotocol.NewContentTopic("toy-chat", 2, "huilong", "proto").String()
func main() {
mrand.Seed(time.Now().UTC().UnixNano())
nickFlag := flag.String("nick", "", "nickname to use in chat. will be generated if empty")
fleetFlag := flag.String("fleet", "wakuv2.prod", "Select the fleet to connect to. (wakuv2.prod, wakuv2.test)")
contentTopicFlag := flag.String("contenttopic", DefaultContentTopic, "content topic to use for the chat")
nodeKeyFlag := flag.String("nodekey", "", "private key for this node. will be generated if empty")
staticNodeFlag := flag.String("staticnode", "", "connects to a node. will get a random node from fleets.status.im if empty")
relayFlag := flag.Bool("relay", true, "enable relay protocol")
storeNodeFlag := flag.String("storenode", "", "connects to a store node to retrieve messages. will get a random node from fleets.status.im if empty")
port := flag.Int("port", 0, "port. Will be random if 0")
payloadV1Flag := flag.Bool("payloadV1", false, "use Waku v1 payload encoding/encryption. default false")
filterFlag := flag.Bool("filter", false, "enable filter protocol")
filterNodeFlag := flag.String("filternode", "", "multiaddr of peer to to request content filtering of messages")
lightPushFlag := flag.Bool("lightpush", false, "enable lightpush protocol")
lightPushNodeFlag := flag.String("lightpushnode", "", "Multiaddr of peer to to request lightpush of published messages")
keepAliveFlag := flag.Int64("keep-alive", 20, "interval in seconds for pinging peers to keep the connection alive.")
dnsDiscoveryFlag := flag.Bool("dns-discovery", false, "enable dns discovery")
dnsDiscoveryUrlFlag := flag.String("dns-discovery-url", "", "URL for DNS node list in format 'enrtree://<key>@<fqdn>'")
dnsDiscoveryNameServerFlag := flag.String("dns-discovery-nameserver", "", "DNS name server IP to query (empty to use system default)")
flag.Parse()
hostAddr, _ := net.ResolveTCPAddr("tcp", fmt.Sprintf("0.0.0.0:%d", *port))
if *fleetFlag != "wakuv2.prod" && *fleetFlag != "wakuv2.test" {
fmt.Println("Invalid fleet. Valid values are wakuv2.prod and wakuv2.test")
return
}
// use the nickname from the cli flag, or a default if blank
nodekey := *nodeKeyFlag
if len(nodekey) == 0 {
var err error
nodekey, err = randomHex(32)
if err != nil {
fmt.Println("Could not generate random key")
return
}
}
prvKey, err := crypto.HexToECDSA(nodekey)
ctx := context.Background()
opts := []node.WakuNodeOption{
node.WithPrivateKey(prvKey),
node.WithHostAddress(hostAddr),
node.WithWakuStore(false, false),
node.WithKeepAlive(time.Duration(*keepAliveFlag) * time.Second),
}
if *relayFlag {
opts = append(opts, node.WithWakuRelay())
}
if *filterFlag {
opts = append(opts, node.WithWakuFilter(false))
}
if *lightPushFlag || *lightPushNodeFlag != "" {
*lightPushFlag = true // If a lightpushnode was set and lightpush flag was false
opts = append(opts, node.WithLightPush())
}
wakuNode, err := node.New(ctx, opts...)
if err != nil {
fmt.Print(err)
return
}
if *lightPushFlag {
addPeer(wakuNode, *lightPushNodeFlag, lightpush.LightPushID_v20beta1)
}
if *filterFlag {
addPeer(wakuNode, *filterNodeFlag, filter.FilterID_v20beta1)
}
if err := wakuNode.Start(); err != nil {
panic(err)
}
// use the nickname from the cli flag, or a default if blank
nick := *nickFlag
if len(nick) == 0 {
nick = defaultNick(wakuNode.Host().ID())
}
// join the chat
chat, err := NewChat(ctx, wakuNode, wakuNode.Host().ID(), *contentTopicFlag, *payloadV1Flag, *lightPushFlag, nick)
if err != nil {
panic(err)
}
// Display panic level to reduce log noise
lvl, err := logging.LevelFromString("panic")
if err != nil {
panic(err)
}
logging.SetAllLoggers(lvl)
ui := NewChatUI(ctx, chat)
// Connect to a static node or use random node from fleets.status.im
go func() {
time.Sleep(200 * time.Millisecond)
staticnode := *staticNodeFlag
storenode := *storeNodeFlag
var fleetData []byte
if len(staticnode) == 0 || len(storenode) == 0 {
fleetData = getFleetData()
}
if len(staticnode) == 0 {
ui.displayMessage(fmt.Sprintf("No static peers configured. Choosing one at random from %s fleet...", *fleetFlag))
staticnode = getRandomFleetNode(fleetData, *fleetFlag)
}
ctx, cancel := context.WithTimeout(ctx, time.Duration(5)*time.Second)
defer cancel()
err = wakuNode.DialPeer(ctx, staticnode)
if err != nil {
ui.displayMessage("Could not connect to peer: " + err.Error())
return
} else {
ui.displayMessage("Connected to peer: " + staticnode)
}
enableDiscovery := *dnsDiscoveryFlag
dnsDiscoveryUrl := *dnsDiscoveryUrlFlag
dnsDiscoveryNameServer := *dnsDiscoveryNameServerFlag
if enableDiscovery && dnsDiscoveryUrl != "" {
ui.displayMessage(fmt.Sprintf("attempting DNS discovery with %s", dnsDiscoveryUrl))
multiaddresses, err := dnsdisc.RetrieveNodes(ctx, dnsDiscoveryUrl, dnsdisc.WithNameserver(dnsDiscoveryNameServer))
if err != nil {
ui.displayMessage("DNS discovery error: " + err.Error())
} else {
for _, m := range multiaddresses {
go func(ctx context.Context, m multiaddr.Multiaddr) {
ctx, cancel := context.WithTimeout(ctx, time.Duration(3)*time.Second)
defer cancel()
err = wakuNode.DialPeerWithMultiAddress(ctx, m)
if err != nil {
ui.displayMessage("error dialing peer: " + err.Error())
}
}(ctx, m)
}
}
}
if len(storenode) == 0 {
ui.displayMessage(fmt.Sprintf("No store node configured. Choosing one at random from %s fleet...", *fleetFlag))
storenode = getRandomFleetNode(fleetData, *fleetFlag)
}
storeNodeId, err := addPeer(wakuNode, storenode, store.StoreID_v20beta3)
if err != nil {
ui.displayMessage("Could not connect to storenode: " + err.Error())
return
} else {
ui.displayMessage("Connected to storenode: " + storenode)
}
time.Sleep(300 * time.Millisecond)
ui.displayMessage("Querying historic messages")
tCtx, _ := context.WithTimeout(ctx, 5*time.Second)
q := store.Query{
ContentTopics: []string{*contentTopicFlag},
}
response, err := wakuNode.Store().Query(tCtx, q,
store.WithAutomaticRequestId(),
store.WithPeer(*storeNodeId),
store.WithPaging(true, 0))
if err != nil {
ui.displayMessage("Could not query storenode: " + err.Error())
} else {
chat.displayMessages(response.Messages)
}
}()
//draw the UI
if err = ui.Run(); err != nil {
printErr("error running text UI: %s", err)
}
wakuNode.Stop()
// TODO: filter unsubscribeAll
}
// Generates a random hex string with a length of n
func randomHex(n int) (string, error) {
bytes := make([]byte, n)
if _, err := rand.Read(bytes); err != nil {
return "", err
}
return hex.EncodeToString(bytes), nil
}
// printErr is like fmt.Printf, but writes to stderr.
func printErr(m string, args ...interface{}) {
fmt.Fprintf(os.Stderr, m, args...)
}
// defaultNick generates a nickname based on the $USER environment variable and
// the last 8 chars of a peer ID.
func defaultNick(p peer.ID) string {
return fmt.Sprintf("%s-%s", os.Getenv("USER"), shortID(p))
}
// shortID returns the last 8 chars of a base58-encoded peer id.
func shortID(p peer.ID) string {
pretty := p.Pretty()
return pretty[len(pretty)-8:]
}
func getFleetData() []byte {
url := "https://fleets.status.im"
httpClient := http.Client{
Timeout: time.Second * 2,
}
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
log.Fatal(err)
}
res, getErr := httpClient.Do(req)
if getErr != nil {
log.Fatal(getErr)
}
if res.Body != nil {
defer res.Body.Close()
}
body, readErr := ioutil.ReadAll(res.Body)
if readErr != nil {
log.Fatal(readErr)
}
return body
}
func getRandomFleetNode(data []byte, fleetId string) string {
var result map[string]interface{}
json.Unmarshal(data, &result)
fleets := result["fleets"].(map[string]interface{})
fleet := fleets[fleetId].(map[string]interface{})
waku := fleet["waku"].(map[string]interface{})
var wakunodes []string
for v := range waku {
wakunodes = append(wakunodes, v)
break
}
randKey := wakunodes[mrand.Intn(len(wakunodes))]
return waku[randKey].(string)
}
func addPeer(wakuNode *node.WakuNode, addr string, protocol protocol.ID) (*peer.ID, error) {
if addr == "" {
return nil, errors.New("invalid multiaddress")
}
ma, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return nil, err
}
return wakuNode.AddPeer(ma, protocol)
}
| [
"\"USER\""
]
| []
| [
"USER"
]
| [] | ["USER"] | go | 1 | 0 | |
core/settings.py | """
Django settings for core project.
Generated by 'django-admin startproject' using Django 3.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', '')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', False)
ALLOWED_HOSTS = [
'localhost',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'graphene_django',
'quiz'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DB_NAME', 'postgres'),
'USER': os.environ.get('DB_USER', 'postgres'),
'PASSWORD': os.environ.get('DB_PASSWORD', 'postgres'),
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| []
| []
| [
"DB_PASSWORD",
"DB_NAME",
"SECRET_KEY",
"DEBUG",
"DB_USER"
]
| [] | ["DB_PASSWORD", "DB_NAME", "SECRET_KEY", "DEBUG", "DB_USER"] | python | 5 | 0 | |
subfunctions/ALE_multi_account.py | #// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#// SPDX-License-Identifier: Apache-2.0
# Assisted Log Enabler for AWS - Find resources that are not logging, and turn them on.
# Joshua "DozerCat" McKiddy - Customer Incident Response Team (CIRT) - AWS
import logging
import os
import json
import boto3
import time
import datetime
import argparse
import csv
import string
import random
from botocore.exceptions import ClientError
from datetime import timezone
current_date = datetime.datetime.now(tz=timezone.utc)
current_date_string = str(current_date)
timestamp_date = datetime.datetime.now(tz=timezone.utc).strftime("%Y-%m-%d-%H%M%S")
timestamp_date_string = str(timestamp_date)
sts = boto3.client('sts')
s3 = boto3.client('s3')
cloudtrail = boto3.client('cloudtrail')
organizations = boto3.client('organizations')
region = os.environ['AWS_REGION']
region_list = ['af-south-1', 'ap-east-1', 'ap-south-1', 'ap-northeast-1', 'ap-northeast-2', 'ap-northeast-3', 'ap-southeast-1', 'ap-southeast-2', 'ca-central-1', 'eu-central-1', 'eu-west-1', 'eu-west-2', 'eu-west-3', 'eu-north-1', 'eu-south-1', 'me-south-1', 'sa-east-1', 'us-east-1', 'us-east-2', 'us-west-1', 'us-west-2']
# 0. Define random string for S3 Bucket Name
def random_string_generator():
lower_letters = string.ascii_lowercase
numbers = string.digits
unique_end = (''.join(random.choice(lower_letters + numbers) for char in range(6)))
return unique_end
# 1. Obtain the AWS Accounts inside of AWS Organizations
def org_account_grab():
"""Function to list accounts inside of AWS Organizations"""
try:
OrgAccountIdList: list = []
org_account_list = organizations.list_accounts()
for accounts in org_account_list['Accounts']:
OrgAccountIdList.append(accounts['Id'])
get_organization_id = organizations.describe_organization()
organization_id = get_organization_id['Organization']['Id']
except Exception as exception_handle:
logging.error(exception_handle)
logging.error("Multi account mode is only for accounts using AWS Organizations.")
logging.error("Please run the Assisted Log Enabler in single account mode to turn on AWS Logs.")
exit()
return OrgAccountIdList, organization_id
# 2. Obtain the current AWS Account Number
def get_account_number():
"""Function to grab AWS Account number that Assisted Log Enabler runs from."""
sts = boto3.client('sts')
account_number = sts.get_caller_identity()["Account"]
return account_number
# 3. Create a Bucket and Lifecycle Policy
def create_bucket(organization_id, account_number, unique_end):
"""Function to create the bucket for storing logs"""
try:
logging.info("Creating bucket in %s" % account_number)
logging.info("CreateBucket API Call")
if region == 'us-east-1':
logging_bucket_dict = s3.create_bucket(
Bucket="aws-log-collection-" + account_number + "-" + region + "-" + unique_end
)
else:
logging_bucket_dict = s3.create_bucket(
Bucket="aws-log-collection-" + account_number + "-" + region + "-" + unique_end,
CreateBucketConfiguration={
'LocationConstraint': region
}
)
logging.info("Bucket Created.")
logging.info("Setting lifecycle policy.")
lifecycle_policy = s3.put_bucket_lifecycle_configuration(
Bucket="aws-log-collection-" + account_number + "-" + region + "-" + unique_end,
LifecycleConfiguration={
'Rules': [
{
'Expiration': {
'Days': 365
},
'Status': 'Enabled',
'Prefix': '',
'ID': 'LogStorage',
'Transitions': [
{
'Days': 90,
'StorageClass': 'INTELLIGENT_TIERING'
}
]
}
]
}
)
logging.info("Lifecycle Policy successfully set.")
create_ct_path = s3.put_object(
Bucket="aws-log-collection-" + account_number + "-" + region + "-" + unique_end,
Key='cloudtrail/AWSLogs/' + account_number + '/')
create_ct_path_vpc = s3.put_object(
Bucket="aws-log-collection-" + account_number + "-" + region + "-" + unique_end,
Key='vpcflowlogs/')
create_ct_path_r53 = s3.put_object(
Bucket="aws-log-collection-" + account_number + "-" + region + "-" + unique_end,
Key='r53querylogs/')
bucket_policy = s3.put_bucket_policy(
Bucket="aws-log-collection-" + account_number + "-" + region + "-" + unique_end,
Policy='{"Version": "2012-10-17", "Statement": [{"Sid": "AWSCloudTrailAclCheck20150319","Effect": "Allow","Principal": {"Service": "cloudtrail.amazonaws.com"},"Action": "s3:GetBucketAcl","Resource": "arn:aws:s3:::aws-log-collection-' + account_number + '-' + region + '-' + unique_end + '"},{"Sid": "AWSCloudTrailWrite20150319","Effect": "Allow","Principal": {"Service": "cloudtrail.amazonaws.com"},"Action": "s3:PutObject","Resource": "arn:aws:s3:::aws-log-collection-' + account_number + '-' + region + '-' + unique_end + '/cloudtrail/AWSLogs/' + account_number + '/*","Condition": {"StringEquals": {"s3:x-amz-acl": "bucket-owner-full-control"}}},{"Sid": "AWSLogDeliveryAclCheck","Effect": "Allow","Principal": {"Service": "delivery.logs.amazonaws.com"},"Action": "s3:GetBucketAcl","Resource": "arn:aws:s3:::aws-log-collection-' + account_number + '-' + region + '-' + unique_end + '"},{"Sid": "AWSLogDeliveryWriteVPC","Effect": "Allow","Principal": {"Service": "delivery.logs.amazonaws.com"},"Action": "s3:PutObject","Resource": "arn:aws:s3:::aws-log-collection-' + account_number + '-' + region + '-' + unique_end + '/vpcflowlogs/*","Condition": {"StringEquals": {"s3:x-amz-acl": "bucket-owner-full-control"}}},{"Sid": "AWSLogDeliveryWriteR53","Effect": "Allow","Principal": {"Service": "delivery.logs.amazonaws.com"},"Action": "s3:PutObject","Resource": "arn:aws:s3:::aws-log-collection-' + account_number + '-' + region + '-' + unique_end + '/r53querylogs/*","Condition": {"StringEquals": {"s3:x-amz-acl": "bucket-owner-full-control"}}}]}'
)
logging.info("Setting the S3 bucket Public Access to Blocked")
logging.info("PutPublicAccessBlock API Call")
bucket_private = s3.put_public_access_block(
Bucket="aws-log-collection-" + account_number + "-" + region + "-" + unique_end,
PublicAccessBlockConfiguration={
'BlockPublicAcls': True,
'IgnorePublicAcls': True,
'BlockPublicPolicy': True,
'RestrictPublicBuckets': True
},
)
except Exception as exception_handle:
logging.error(exception_handle)
return account_number
# 4. Find VPCs and turn flow logs on if not on already.
def flow_log_activator(account_number, OrgAccountIdList, region_list, unique_end):
"""Function to define the list of VPCs without logging turned on"""
logging.info("Creating a list of VPCs without Flow Logs on.")
for org_account in OrgAccountIdList:
for aws_region in region_list:
sts = boto3.client('sts')
RoleArn = 'arn:aws:iam::%s:role/Assisted_Log_Enabler_IAM_Role' % org_account
logging.info('Assuming Target Role %s for Assisted Log Enabler...' % RoleArn)
assisted_log_enabler_sts = sts.assume_role(
RoleArn=RoleArn,
RoleSessionName='assisted-log-enabler-activation',
DurationSeconds=3600,
)
ec2_ma = boto3.client(
'ec2',
aws_access_key_id=assisted_log_enabler_sts['Credentials']['AccessKeyId'],
aws_secret_access_key=assisted_log_enabler_sts['Credentials']['SecretAccessKey'],
aws_session_token=assisted_log_enabler_sts['Credentials']['SessionToken'],
region_name=aws_region
)
logging.info("Creating a list of VPCs without Flow Logs on in region " + aws_region + ".")
try:
VPCList: list = []
FlowLogList: list = []
logging.info("DescribeVpcs API Call")
vpcs = ec2_ma.describe_vpcs()
for vpc_id in vpcs["Vpcs"]:
VPCList.append(vpc_id["VpcId"])
logging.info("List of VPCs found within account " + org_account + ", region " + aws_region + ":")
print(VPCList)
vpcflowloglist = ec2_ma.describe_flow_logs()
logging.info("DescribeFlowLogs API Call")
for resource_id in vpcflowloglist["FlowLogs"]:
FlowLogList.append(resource_id["ResourceId"])
working_list = (list(set(VPCList) - set(FlowLogList)))
logging.info("List of VPCs found within account " + org_account + ", region " + aws_region + " WITHOUT VPC Flow Logs:")
print(working_list)
for no_logs in working_list:
logging.info(no_logs + " does not have VPC Flow logging on. It will be turned on within this function.")
logging.info("Activating logs for VPCs that do not have them turned on.")
logging.info("If all VPCs have Flow Logs turned on, you will get an MissingParameter error. That is normal.")
logging.info("CreateFlowLogs API Call")
flow_log_on = ec2_ma.create_flow_logs(
ResourceIds=working_list,
ResourceType='VPC',
TrafficType='ALL',
LogDestinationType='s3',
LogDestination='arn:aws:s3:::aws-log-collection-' + account_number + '-' + region + '-' + unique_end + '/vpcflowlogs',
LogFormat='${version} ${account-id} ${interface-id} ${srcaddr} ${dstaddr} ${srcport} ${dstport} ${protocol} ${packets} ${bytes} ${start} ${end} ${action} ${log-status} ${vpc-id} ${type} ${tcp-flags} ${subnet-id} ${sublocation-type} ${sublocation-id} ${region} ${pkt-srcaddr} ${pkt-dstaddr} ${instance-id} ${az-id} ${pkt-src-aws-service} ${pkt-dst-aws-service} ${flow-direction} ${traffic-path}'
)
logging.info("VPC Flow Logs are turned on for account " + org_account + ".")
except Exception as exception_handle:
logging.error(exception_handle)
# 5. Turn on EKS audit and authenticator logs.
def eks_logging(region_list, OrgAccountIdList):
"""Function to turn on logging for EKS Clusters"""
for org_account in OrgAccountIdList:
for aws_region in region_list:
logging.info("Turning on audit and authenticator logging for EKS clusters in AWS account " + org_account + ", in region " + aws_region + ".")
sts = boto3.client('sts')
RoleArn = 'arn:aws:iam::%s:role/Assisted_Log_Enabler_IAM_Role' % org_account
logging.info('Assuming Target Role %s for Assisted Log Enabler...' % RoleArn)
assisted_log_enabler_sts = sts.assume_role(
RoleArn=RoleArn,
RoleSessionName='assisted-log-enabler-activation',
DurationSeconds=3600,
)
eks_ma = boto3.client(
'eks',
aws_access_key_id=assisted_log_enabler_sts['Credentials']['AccessKeyId'],
aws_secret_access_key=assisted_log_enabler_sts['Credentials']['SecretAccessKey'],
aws_session_token=assisted_log_enabler_sts['Credentials']['SessionToken'],
region_name=aws_region
)
try:
logging.info("ListClusters API Call")
eks_clusters = eks_ma.list_clusters()
eks_cluster_list = eks_clusters ['clusters']
logging.info("EKS Clusters found in " + aws_region + ":")
print(eks_cluster_list)
for cluster in eks_cluster_list:
logging.info("UpdateClusterConfig API Call")
eks_activate = eks_ma.update_cluster_config(
name=cluster,
logging={
'clusterLogging': [
{
'types': [
'audit',
],
'enabled': True
},
{
'types': [
'authenticator',
],
'enabled': True
},
]
}
)
if eks_activate['update']['status'] == 'InProgress':
logging.info(cluster + " EKS Cluster is currently updating. Status: InProgress")
elif eks_activate['update']['status'] == 'Failed':
logging.info(cluster + " EKS Cluster failed to turn on logs. Please check if you have permissions to update the logging configuration of EKS. Status: Failed")
elif eks_activate['update']['status'] == 'Cancelled':
logging.info(cluster + " EKS Cluster log update was cancelled. Status: Cancelled.")
else:
logging.info(cluster + " EKS Cluster has audit and authenticator logs turned on.")
except Exception as exception_handle:
logging.error(exception_handle)
# 6. Turn on Route 53 Query Logging.
def route_53_query_logs(region_list, account_number, OrgAccountIdList, unique_end):
"""Function to turn on Route 53 Query Logs for VPCs"""
for org_account in OrgAccountIdList:
for aws_region in region_list:
logging.info("Turning on Route 53 Query Logging on in AWS Account " + org_account + " VPCs, in region " + aws_region + ".")
sts = boto3.client('sts')
RoleArn = 'arn:aws:iam::%s:role/Assisted_Log_Enabler_IAM_Role' % org_account
logging.info('Assuming Target Role %s for Assisted Log Enabler...' % RoleArn)
assisted_log_enabler_sts = sts.assume_role(
RoleArn=RoleArn,
RoleSessionName='assisted-log-enabler-activation',
DurationSeconds=3600,
)
ec2_ma = boto3.client(
'ec2',
aws_access_key_id=assisted_log_enabler_sts['Credentials']['AccessKeyId'],
aws_secret_access_key=assisted_log_enabler_sts['Credentials']['SecretAccessKey'],
aws_session_token=assisted_log_enabler_sts['Credentials']['SessionToken'],
region_name=aws_region
)
route53resolver_ma = boto3.client(
'route53resolver',
aws_access_key_id=assisted_log_enabler_sts['Credentials']['AccessKeyId'],
aws_secret_access_key=assisted_log_enabler_sts['Credentials']['SecretAccessKey'],
aws_session_token=assisted_log_enabler_sts['Credentials']['SessionToken'],
region_name=aws_region
)
try:
VPCList: list = []
QueryLogList: list = []
logging.info("DescribeVpcs API Call")
vpcs = ec2_ma.describe_vpcs()
for vpc_id in vpcs["Vpcs"]:
VPCList.append(vpc_id["VpcId"])
logging.info("List of VPCs found within account " + org_account + ", region " + aws_region + ":")
print(VPCList)
logging.info("ListResolverQueryLogConfigAssociations API Call")
query_log_details = route53resolver_ma.list_resolver_query_log_config_associations()
for query_log_vpc_id in query_log_details['ResolverQueryLogConfigAssociations']:
QueryLogList.append(query_log_vpc_id['ResourceId'])
r53_working_list = (list(set(VPCList) - set(QueryLogList)))
logging.info("List of VPCs found within account " + org_account + ", region " + aws_region + " WITHOUT Route 53 Query Logs:")
print(r53_working_list)
for no_query_logs in r53_working_list:
logging.info(no_query_logs + " does not have Route 53 Query logging on. It will be turned on within this function.")
logging.info("Activating logs for VPCs that do not have Route 53 Query logging turned on.")
logging.info("CreateResolverQueryLogConfig API Call")
create_query_log = route53resolver_ma.create_resolver_query_log_config(
Name='Assisted_Log_Enabler_Query_Logs_' + aws_region,
DestinationArn='arn:aws:s3:::aws-log-collection-' + account_number + '-' + region + '-' + unique_end + '/r53querylogs',
CreatorRequestId=timestamp_date_string,
Tags=[
{
'Key': 'Workflow',
'Value': 'assisted-log-enabler'
},
]
)
r53_query_log_id = create_query_log['ResolverQueryLogConfig']['Id']
logging.info("Route 53 Query Logging Created. Resource ID:" + r53_query_log_id)
for vpc in r53_working_list:
logging.info("Associating " + vpc + " with the created Route 53 Query Logging.")
logging.info("AssocateResolverQueryLogConfig")
activate_r5_logs = route53resolver_ma.associate_resolver_query_log_config(
ResolverQueryLogConfigId=r53_query_log_id,
ResourceId=vpc
)
except Exception as exception_handle:
logging.error(exception_handle)
# 7. Turn on S3 Logging.
def s3_logs(region_list, account_number, OrgAccountIdList, unique_end):
"""Function to turn on Bucket Logs for Buckets"""
for org_account in OrgAccountIdList:
for aws_region in region_list:
logging.info("Turning on Bucket Logging on in AWS Account " + org_account + " Buckets, in region " + aws_region + ".")
sts = boto3.client('sts')
RoleArn = 'arn:aws:iam::%s:role/Assisted_Log_Enabler_IAM_Role' % org_account
logging.info('Assuming Target Role %s for Assisted Log Enabler...' % RoleArn)
assisted_log_enabler_sts = sts.assume_role(
RoleArn=RoleArn,
RoleSessionName='assisted-log-enabler-activation',
DurationSeconds=3600,
)
s3_ma = boto3.client(
's3',
aws_access_key_id=assisted_log_enabler_sts['Credentials']['AccessKeyId'],
aws_secret_access_key=assisted_log_enabler_sts['Credentials']['SecretAccessKey'],
aws_session_token=assisted_log_enabler_sts['Credentials']['SessionToken'],
region_name=aws_region
)
try:
S3List: list = []
S3LogList: list = []
logging.info("ListBuckets API Call")
buckets = s3_ma.list_buckets()
for bucket in buckets['Buckets']:
s3region=s3_ma.get_bucket_location(Bucket=bucket["Name"])['LocationConstraint']
if s3region == aws_region:
S3List.append(bucket["Name"])
elif s3region is None and aws_region == 'us-east-1':
S3List.append(bucket["Name"])
if S3List != []:
logging.info("List of Buckets found within account " + org_account + ", region " + aws_region + ":")
print(S3List)
logging.info("Parsed out buckets created by Assisted Log Enabler for AWS in " + aws_region)
logging.info("Checking remaining buckets to see if logs were enabled by Assisted Log Enabler for AWS in " + aws_region)
logging.info("GetBucketLogging API Call")
for bucket in S3List:
if 'aws-s3-log-collection-' + org_account + '-' + aws_region not in str(bucket):
s3temp=s3_ma.get_bucket_logging(Bucket=bucket)
if 'TargetBucket' not in str(s3temp):
S3LogList.append(bucket)
if S3LogList != []:
logging.info("List of Buckets found within account " + org_account + ", region " + aws_region + " WITHOUT S3 Bucket Logs:")
print(S3LogList)
for bucket in S3LogList:
logging.info(bucket + " does not have S3 BUCKET logging on. It will be turned on within this function.")
logging.info("Creating S3 Logging Bucket")
"""Function to create the bucket for storing logs"""
account_number = sts.get_caller_identity()["Account"]
logging.info("Creating bucket in %s" % org_account)
logging.info("CreateBucket API Call")
if aws_region == 'us-east-1':
logging_bucket_dict = s3_ma.create_bucket(
Bucket="aws-s3-log-collection-" + org_account + "-" + aws_region + "-" + unique_end
)
else:
logging_bucket_dict = s3_ma.create_bucket(
Bucket="aws-s3-log-collection-" + org_account + "-" + aws_region + "-" + unique_end,
CreateBucketConfiguration={
'LocationConstraint': aws_region
}
)
logging.info("Bucket " + "aws-s3-log-collection-" + org_account + "-" + aws_region + "-" + unique_end + " Created.")
logging.info("Setting lifecycle policy.")
logging.info("PutBucketLifecycleConfiguration API Call")
lifecycle_policy = s3_ma.put_bucket_lifecycle_configuration(
Bucket="aws-s3-log-collection-" + org_account + "-" + aws_region + "-" + unique_end,
LifecycleConfiguration={
'Rules': [
{
'Expiration': {
'Days': 365
},
'Status': 'Enabled',
'Prefix': '',
'ID': 'LogStorage',
'Transitions': [
{
'Days': 90,
'StorageClass': 'INTELLIGENT_TIERING'
}
]
}
]
}
)
logging.info("Lifecycle Policy successfully set.")
logging.info("Setting the S3 bucket Public Access to Blocked")
logging.info("PutPublicAccessBlock API Call")
bucket_private = s3_ma.put_public_access_block(
Bucket="aws-s3-log-collection-" + org_account + "-" + aws_region + "-" + unique_end,
PublicAccessBlockConfiguration={
'BlockPublicAcls': True,
'IgnorePublicAcls': True,
'BlockPublicPolicy': True,
'RestrictPublicBuckets': True
},
)
logging.info("GetBucketAcl API Call")
id=s3_ma.get_bucket_acl(Bucket="aws-s3-log-collection-" + org_account + "-" + aws_region + "-" + unique_end)['Owner']['ID']
logging.info("PutBucketAcl API Call")
s3_ma.put_bucket_acl(Bucket="aws-s3-log-collection-" + org_account + "-" + aws_region + "-" + unique_end,GrantReadACP='uri=http://acs.amazonaws.com/groups/s3/LogDelivery',GrantWrite='uri=http://acs.amazonaws.com/groups/s3/LogDelivery',GrantFullControl='id=' + id)
for bucket in S3LogList:
logging.info("Activating logs for S3 Bucket " + bucket)
logging.info("PutBucketLogging API Call")
create_s3_log = s3_ma.put_bucket_logging(
Bucket=bucket,
BucketLoggingStatus={
'LoggingEnabled': {
'TargetBucket': 'aws-s3-log-collection-' + org_account + '-' + aws_region + '-' + unique_end,
'TargetGrants': [
{
'Permission': 'FULL_CONTROL',
'Grantee': {
'Type': 'Group',
'URI': 'http://acs.amazonaws.com/groups/s3/LogDelivery'
},
},
],
'TargetPrefix': 's3logs/' + bucket
}
}
)
else:
logging.info("No S3 Bucket WITHOUT Logging enabled on account " + org_account + " region " + aws_region)
else:
logging.info("No S3 Buckets found within account " + org_account + ", region " + aws_region + ":")
except Exception as exception_handle:
logging.error(exception_handle)
def run_eks():
"""Function that runs the defined EKS logging code"""
OrgAccountIdList, organization_id = org_account_grab()
eks_logging(region_list, OrgAccountIdList)
logging.info("This is the end of the script. Please feel free to validate that logs have been turned on.")
def run_vpc_flow_logs():
"""Function that runs the defined VPC Flow Log logging code"""
unique_end = random_string_generator()
account_number = get_account_number()
OrgAccountIdList, organization_id = org_account_grab()
create_bucket(organization_id, account_number, unique_end)
flow_log_activator(account_number, OrgAccountIdList, region_list, unique_end)
logging.info("This is the end of the script. Please feel free to validate that logs have been turned on.")
def run_r53_query_logs():
"""Function that runs the defined R53 Query Logging code"""
unique_end = random_string_generator()
account_number = get_account_number()
OrgAccountIdList, organization_id = org_account_grab()
create_bucket(organization_id, account_number, unique_end)
route_53_query_logs(region_list, account_number, OrgAccountIdList, unique_end)
logging.info("This is the end of the script. Please feel free to validate that logs have been turned on.")
def run_s3_logs():
"""Function that runs the defined Bucket Logging code"""
unique_end = random_string_generator()
account_number = get_account_number()
OrgAccountIdList, organization_id = org_account_grab()
create_bucket(organization_id, account_number, unique_end)
s3_logs(region_list, account_number, OrgAccountIdList, unique_end)
logging.info("This is the end of the script. Please feel free to validate that logs have been turned on.")
def lambda_handler(event, context):
"""Function that runs all of the previously defined functions"""
unique_end = random_string_generator()
account_number = get_account_number()
OrgAccountIdList, organization_id = org_account_grab()
create_bucket(organization_id, account_number, unique_end)
flow_log_activator(account_number, OrgAccountIdList, region_list, unique_end)
eks_logging(region_list, OrgAccountIdList)
route_53_query_logs(region_list, account_number, OrgAccountIdList, unique_end)
s3_logs(region_list, account_number, OrgAccountIdList, unique_end)
logging.info("This is the end of the script. Please feel free to validate that logs have been turned on.")
if __name__ == '__main__':
event = "event"
context = "context"
lambda_handler(event, context)
| []
| []
| [
"AWS_REGION"
]
| [] | ["AWS_REGION"] | python | 1 | 0 | |
synapse/server.py | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file provides some classes for setting up (partially-populated)
# homeservers; either as a full homeserver as a real application, or a small
# partial one for unit test mocking.
# Imports required for the default HomeServer() implementation
import abc
import logging
import os
from twisted.mail.smtp import sendmail
from twisted.web.client import BrowserLikePolicyForHTTPS
from synapse.api.auth import Auth
from synapse.api.filtering import Filtering
from synapse.api.ratelimiting import Ratelimiter
from synapse.appservice.api import ApplicationServiceApi
from synapse.appservice.scheduler import ApplicationServiceScheduler
from synapse.config.homeserver import HomeServerConfig
from synapse.crypto import context_factory
from synapse.crypto.keyring import Keyring
from synapse.events.builder import EventBuilderFactory
from synapse.events.spamcheck import SpamChecker
from synapse.events.third_party_rules import ThirdPartyEventRules
from synapse.events.utils import EventClientSerializer
from synapse.federation.federation_client import FederationClient
from synapse.federation.federation_server import (
FederationHandlerRegistry,
FederationServer,
ReplicationFederationHandlerRegistry,
)
from synapse.federation.send_queue import FederationRemoteSendQueue
from synapse.federation.sender import FederationSender
from synapse.federation.transport.client import TransportLayerClient
from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer
from synapse.groups.groups_server import GroupsServerHandler
from synapse.handlers import Handlers
from synapse.handlers.account_validity import AccountValidityHandler
from synapse.handlers.acme import AcmeHandler
from synapse.handlers.appservice import ApplicationServicesHandler
from synapse.handlers.auth import AuthHandler, MacaroonGenerator
from synapse.handlers.deactivate_account import DeactivateAccountHandler
from synapse.handlers.device import DeviceHandler, DeviceWorkerHandler
from synapse.handlers.devicemessage import DeviceMessageHandler
from synapse.handlers.e2e_keys import E2eKeysHandler
from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler
from synapse.handlers.events import EventHandler, EventStreamHandler
from synapse.handlers.groups_local import GroupsLocalHandler
from synapse.handlers.initial_sync import InitialSyncHandler
from synapse.handlers.message import EventCreationHandler, MessageHandler
from synapse.handlers.pagination import PaginationHandler
from synapse.handlers.presence import PresenceHandler
from synapse.handlers.profile import BaseProfileHandler, MasterProfileHandler
from synapse.handlers.read_marker import ReadMarkerHandler
from synapse.handlers.receipts import ReceiptsHandler
from synapse.handlers.register import RegistrationHandler
from synapse.handlers.room import RoomContextHandler, RoomCreationHandler
from synapse.handlers.room_list import RoomListHandler
from synapse.handlers.room_member import RoomMemberMasterHandler
from synapse.handlers.room_member_worker import RoomMemberWorkerHandler
from synapse.handlers.set_password import SetPasswordHandler
from synapse.handlers.stats import StatsHandler
from synapse.handlers.sync import SyncHandler
from synapse.handlers.typing import TypingHandler
from synapse.handlers.user_directory import UserDirectoryHandler
from synapse.http.client import InsecureInterceptableContextFactory, SimpleHttpClient
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
from synapse.notifier import Notifier
from synapse.push.action_generator import ActionGenerator
from synapse.push.pusherpool import PusherPool
from synapse.rest.media.v1.media_repository import (
MediaRepository,
MediaRepositoryResource,
)
from synapse.secrets import Secrets
from synapse.server_notices.server_notices_manager import ServerNoticesManager
from synapse.server_notices.server_notices_sender import ServerNoticesSender
from synapse.server_notices.worker_server_notices_sender import (
WorkerServerNoticesSender,
)
from synapse.state import StateHandler, StateResolutionHandler
from synapse.storage import DataStores, Storage
from synapse.streams.events import EventSources
from synapse.util import Clock
from synapse.util.distributor import Distributor
logger = logging.getLogger(__name__)
class HomeServer(object):
"""A basic homeserver object without lazy component builders.
This will need all of the components it requires to either be passed as
constructor arguments, or the relevant methods overriding to create them.
Typically this would only be used for unit tests.
For every dependency in the DEPENDENCIES list below, this class creates one
method,
def get_DEPENDENCY(self)
which returns the value of that dependency. If no value has yet been set
nor was provided to the constructor, it will attempt to call a lazy builder
method called
def build_DEPENDENCY(self)
which must be implemented by the subclass. This code may call any of the
required "get" methods on the instance to obtain the sub-dependencies that
one requires.
Attributes:
config (synapse.config.homeserver.HomeserverConfig):
_listening_services (list[twisted.internet.tcp.Port]): TCP ports that
we are listening on to provide HTTP services.
"""
__metaclass__ = abc.ABCMeta
DEPENDENCIES = [
"http_client",
"federation_client",
"federation_server",
"handlers",
"auth",
"room_creation_handler",
"state_handler",
"state_resolution_handler",
"presence_handler",
"sync_handler",
"typing_handler",
"room_list_handler",
"acme_handler",
"auth_handler",
"device_handler",
"stats_handler",
"e2e_keys_handler",
"e2e_room_keys_handler",
"event_handler",
"event_stream_handler",
"initial_sync_handler",
"application_service_api",
"application_service_scheduler",
"application_service_handler",
"device_message_handler",
"profile_handler",
"event_creation_handler",
"deactivate_account_handler",
"set_password_handler",
"notifier",
"event_sources",
"keyring",
"pusherpool",
"event_builder_factory",
"filtering",
"http_client_context_factory",
"simple_http_client",
"proxied_http_client",
"media_repository",
"media_repository_resource",
"federation_transport_client",
"federation_sender",
"receipts_handler",
"macaroon_generator",
"tcp_replication",
"read_marker_handler",
"action_generator",
"user_directory_handler",
"groups_local_handler",
"groups_server_handler",
"groups_attestation_signing",
"groups_attestation_renewer",
"secrets",
"spam_checker",
"third_party_event_rules",
"room_member_handler",
"federation_registry",
"server_notices_manager",
"server_notices_sender",
"message_handler",
"pagination_handler",
"room_context_handler",
"sendmail",
"registration_handler",
"account_validity_handler",
"saml_handler",
"event_client_serializer",
"storage",
]
REQUIRED_ON_MASTER_STARTUP = ["user_directory_handler", "stats_handler"]
# This is overridden in derived application classes
# (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be
# instantiated during setup() for future return by get_datastore()
DATASTORE_CLASS = abc.abstractproperty()
def __init__(self, hostname: str, config: HomeServerConfig, reactor=None, **kwargs):
"""
Args:
hostname : The hostname for the server.
config: The full config for the homeserver.
"""
if not reactor:
from twisted.internet import reactor
self._reactor = reactor
self.hostname = hostname
self.config = config
self._building = {}
self._listening_services = []
self.start_time = None
self.clock = Clock(reactor)
self.distributor = Distributor()
self.ratelimiter = Ratelimiter()
self.admin_redaction_ratelimiter = Ratelimiter()
self.registration_ratelimiter = Ratelimiter()
self.datastores = None
# Other kwargs are explicit dependencies
for depname in kwargs:
setattr(self, depname, kwargs[depname])
def setup(self):
logger.info("Setting up.")
self.start_time = int(self.get_clock().time())
self.datastores = DataStores(self.DATASTORE_CLASS, self)
logger.info("Finished setting up.")
def setup_master(self):
"""
Some handlers have side effects on instantiation (like registering
background updates). This function causes them to be fetched, and
therefore instantiated, to run those side effects.
"""
for i in self.REQUIRED_ON_MASTER_STARTUP:
getattr(self, "get_" + i)()
def get_reactor(self):
"""
Fetch the Twisted reactor in use by this HomeServer.
"""
return self._reactor
def get_ip_from_request(self, request):
# X-Forwarded-For is handled by our custom request type.
return request.getClientIP()
def is_mine(self, domain_specific_string):
return domain_specific_string.domain == self.hostname
def is_mine_id(self, string):
return string.split(":", 1)[1] == self.hostname
def get_clock(self):
return self.clock
def get_datastore(self):
return self.datastores.main
def get_datastores(self):
return self.datastores
def get_config(self):
return self.config
def get_distributor(self):
return self.distributor
def get_ratelimiter(self):
return self.ratelimiter
def get_registration_ratelimiter(self):
return self.registration_ratelimiter
def get_admin_redaction_ratelimiter(self):
return self.admin_redaction_ratelimiter
def build_federation_client(self):
return FederationClient(self)
def build_federation_server(self):
return FederationServer(self)
def build_handlers(self):
return Handlers(self)
def build_notifier(self):
return Notifier(self)
def build_auth(self):
return Auth(self)
def build_http_client_context_factory(self):
return (
InsecureInterceptableContextFactory()
if self.config.use_insecure_ssl_client_just_for_testing_do_not_use
else BrowserLikePolicyForHTTPS()
)
def build_simple_http_client(self):
return SimpleHttpClient(self)
def build_proxied_http_client(self):
return SimpleHttpClient(
self,
http_proxy=os.getenvb(b"http_proxy"),
https_proxy=os.getenvb(b"HTTPS_PROXY"),
)
def build_room_creation_handler(self):
return RoomCreationHandler(self)
def build_sendmail(self):
return sendmail
def build_state_handler(self):
return StateHandler(self)
def build_state_resolution_handler(self):
return StateResolutionHandler(self)
def build_presence_handler(self):
return PresenceHandler(self)
def build_typing_handler(self):
return TypingHandler(self)
def build_sync_handler(self):
return SyncHandler(self)
def build_room_list_handler(self):
return RoomListHandler(self)
def build_auth_handler(self):
return AuthHandler(self)
def build_macaroon_generator(self):
return MacaroonGenerator(self)
def build_device_handler(self):
if self.config.worker_app:
return DeviceWorkerHandler(self)
else:
return DeviceHandler(self)
def build_device_message_handler(self):
return DeviceMessageHandler(self)
def build_e2e_keys_handler(self):
return E2eKeysHandler(self)
def build_e2e_room_keys_handler(self):
return E2eRoomKeysHandler(self)
def build_acme_handler(self):
return AcmeHandler(self)
def build_application_service_api(self):
return ApplicationServiceApi(self)
def build_application_service_scheduler(self):
return ApplicationServiceScheduler(self)
def build_application_service_handler(self):
return ApplicationServicesHandler(self)
def build_event_handler(self):
return EventHandler(self)
def build_event_stream_handler(self):
return EventStreamHandler(self)
def build_initial_sync_handler(self):
return InitialSyncHandler(self)
def build_profile_handler(self):
if self.config.worker_app:
return BaseProfileHandler(self)
else:
return MasterProfileHandler(self)
def build_event_creation_handler(self):
return EventCreationHandler(self)
def build_deactivate_account_handler(self):
return DeactivateAccountHandler(self)
def build_set_password_handler(self):
return SetPasswordHandler(self)
def build_event_sources(self):
return EventSources(self)
def build_keyring(self):
return Keyring(self)
def build_event_builder_factory(self):
return EventBuilderFactory(self)
def build_filtering(self):
return Filtering(self)
def build_pusherpool(self):
return PusherPool(self)
def build_http_client(self):
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
self.config
)
return MatrixFederationHttpClient(self, tls_client_options_factory)
def build_media_repository_resource(self):
# build the media repo resource. This indirects through the HomeServer
# to ensure that we only have a single instance of
return MediaRepositoryResource(self)
def build_media_repository(self):
return MediaRepository(self)
def build_federation_transport_client(self):
return TransportLayerClient(self)
def build_federation_sender(self):
if self.should_send_federation():
return FederationSender(self)
elif not self.config.worker_app:
return FederationRemoteSendQueue(self)
else:
raise Exception("Workers cannot send federation traffic")
def build_receipts_handler(self):
return ReceiptsHandler(self)
def build_read_marker_handler(self):
return ReadMarkerHandler(self)
def build_tcp_replication(self):
raise NotImplementedError()
def build_action_generator(self):
return ActionGenerator(self)
def build_user_directory_handler(self):
return UserDirectoryHandler(self)
def build_groups_local_handler(self):
return GroupsLocalHandler(self)
def build_groups_server_handler(self):
return GroupsServerHandler(self)
def build_groups_attestation_signing(self):
return GroupAttestationSigning(self)
def build_groups_attestation_renewer(self):
return GroupAttestionRenewer(self)
def build_secrets(self):
return Secrets()
def build_stats_handler(self):
return StatsHandler(self)
def build_spam_checker(self):
return SpamChecker(self)
def build_third_party_event_rules(self):
return ThirdPartyEventRules(self)
def build_room_member_handler(self):
if self.config.worker_app:
return RoomMemberWorkerHandler(self)
return RoomMemberMasterHandler(self)
def build_federation_registry(self):
if self.config.worker_app:
return ReplicationFederationHandlerRegistry(self)
else:
return FederationHandlerRegistry()
def build_server_notices_manager(self):
if self.config.worker_app:
raise Exception("Workers cannot send server notices")
return ServerNoticesManager(self)
def build_server_notices_sender(self):
if self.config.worker_app:
return WorkerServerNoticesSender(self)
return ServerNoticesSender(self)
def build_message_handler(self):
return MessageHandler(self)
def build_pagination_handler(self):
return PaginationHandler(self)
def build_room_context_handler(self):
return RoomContextHandler(self)
def build_registration_handler(self):
return RegistrationHandler(self)
def build_account_validity_handler(self):
return AccountValidityHandler(self)
def build_saml_handler(self):
from synapse.handlers.saml_handler import SamlHandler
return SamlHandler(self)
def build_event_client_serializer(self):
return EventClientSerializer(self)
def build_storage(self) -> Storage:
return Storage(self, self.datastores)
def remove_pusher(self, app_id, push_key, user_id):
return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
def should_send_federation(self):
"Should this server be sending federation traffic directly?"
return self.config.send_federation and (
not self.config.worker_app
or self.config.worker_app == "synapse.app.federation_sender"
)
def _make_dependency_method(depname):
def _get(hs):
try:
return getattr(hs, depname)
except AttributeError:
pass
try:
builder = getattr(hs, "build_%s" % (depname))
except AttributeError:
builder = None
if builder:
# Prevent cyclic dependencies from deadlocking
if depname in hs._building:
raise ValueError("Cyclic dependency while building %s" % (depname,))
hs._building[depname] = 1
dep = builder()
setattr(hs, depname, dep)
del hs._building[depname]
return dep
raise NotImplementedError(
"%s has no %s nor a builder for it" % (type(hs).__name__, depname)
)
setattr(HomeServer, "get_%s" % (depname), _get)
# Build magic accessors for every dependency
for depname in HomeServer.DEPENDENCIES:
_make_dependency_method(depname)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
functions/fl_client/google/main.py | import sys
import json
import numpy as np
from bson.binary import Binary
import pickle
import requests
import tensorflow as tf
from tensorflow import keras
import os
import bson
from bson.json_util import dumps
np.random.seed(2)
class Client:
client_id = 0
def __init__(self, lr, optim, local_epochs, model, server_weights, data_client,
train_images_url,
train_labels_url,
test_images_url,
test_labels_url,
input_size,
hidden_size,
output_size,
input_shape_x,
input_shape_y,
batch_size,
):
self.proxies = {
"http": "http://proxy.in.tum.de:8080/",
"https": "http://proxy.in.tum.de:8080/",
"ftp": "ftp://proxy.in.tum.de:8080/",
"no_proxy": "172.24.65.16"
}
self.data_client = data_client
self.server_weights = server_weights
self.train_images_url = train_images_url
self.train_labels_url = train_labels_url
self.test_images_url = test_images_url
self.test_labels_url = test_labels_url
self.config = {}
self.config['input_size'] = input_size
self.config['hidden_size'] = hidden_size
self.config['output_size'] = output_size
self.config['input_shape'] = (input_shape_x, input_shape_y, 1)
self.config['batch_size'] = batch_size
self.config['model'] = model
self.config['lr'] = lr
self.config['optim'] = optim
self.config['local_epochs'] = local_epochs
train_images = pickle.loads(requests.get(self.train_images_url, allow_redirects=True).content)
train_labels = pickle.loads(requests.get(self.train_labels_url, allow_redirects=True).content)
self.all_samples = self.get_data_from_server()
self.X = train_images[self.all_samples]
self.y = train_labels[self.all_samples]
self.y = self.y.astype('int32')
self.X = self.X / 255.0
if self.config['model'] == "cnn":
self.X = self.X .reshape(self.X .shape[0], input_shape_x, input_shape_y, 1)
elif self.config['model'] == "mnistnn":
self.X = self.X.reshape(-1, input_shape_x * input_shape_y)
# self.datapoints = 2500
def create_model(self):
model = tf.keras.models.Sequential([
keras.layers.Dense(self.config['hidden_size'], activation='relu', input_shape=(self.config['input_size'],)),
keras.layers.Dense(self.config['output_size'])
])
optim = self.config['optim']
lr = float(self.config['lr'])
if optim == "adam":
opt = tf.keras.optimizers.Adam(lr=lr)
elif optim == "ndam":
opt = tf.keras.optimizers.Nadam(lr=lr)
elif optim == "sgd":
opt = tf.keras.optimizers.SGD(lr=lr)
model.compile(optimizer=opt,
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
self.model = model
def get_data_from_server(self):
if(self.data_client):
indices = np.fromiter(self.data_client, np.int32)
return indices
else:
print("Data for the client, does not exist")
def create_model_cnn(self):
model = tf.keras.models.Sequential([
keras.layers.Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=self.config['input_shape']),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Conv2D(64, kernel_size=(5, 5), activation='relu'),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(512, activation='relu'),
keras.layers.Dense(self.config['output_size'])
])
optim = self.config['optim']
lr = float(self.config['lr'])
if optim == "adam":
opt = tf.keras.optimizers.Adam(lr=lr)
elif optim == "ndam":
opt = tf.keras.optimizers.Nadam(lr=lr)
elif optim == "sgd":
opt = tf.keras.optimizers.SGD(lr=lr)
model.compile(optimizer=opt,
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
self.model = model
def get_model_weights_cardinality(self):
return self.model.get_weights(), self.cardinality
def set_model_weights(self, weights):
# self.model.set_weights(list(weights.values()))
self.model.set_weights(weights)
def create_datasetObject(self):
# dataset_train = tf.data.Dataset.from_tensor_slices((self.X[self.all_samples], self.y[self.all_samples]))
dataset_train = tf.data.Dataset.from_tensor_slices((self.X, self.y))
dataset_train = dataset_train.shuffle(len(self.y))
dataset_train = dataset_train.batch(self.config['batch_size'])
# dataset_val = tf.data.Dataset.from_tensor_slices((self.X_val, self.y_val))
# dataset_val = dataset_val.shuffle(len(self.y))
self.dataset_train = dataset_train
self.cardinality = tf.data.experimental.cardinality(self.dataset_train).numpy()*self.config['batch_size']
print("Cardinality Client {0} is {1}".format(self.client_id, tf.data.experimental.cardinality(self.dataset_train).numpy()))
return self.dataset_train
def request_update(self):
# X = self.X[self.all_samples]
# y = self.y[self.all_samples]
dataset_train = self.create_datasetObject()
# X_val =
# y_val = self.y_val
epochs = int(self.config['local_epochs'])
self.model.fit(dataset_train,
epochs=epochs,
batch_size=self.config['batch_size']
)
def get_weights_from_server(self):
weights = []
if(self.server_weights):
weights = pickle.loads(self.server_weights)
else:
print("No weights for Server exist, initialize it first")
return weights
def write_updated_weights_client(self, weights, cardinality):
weights_serialized = Binary(pickle.dumps(weights, protocol=2), subtype=128)
new_values = {'weights':weights_serialized, 'cardinality': int(cardinality)}
#print("Data updated with id {0} for Client {1}".format(new_values, client_id))
return new_values
def main(request):
request_json = bson.BSON(request.data).decode()
try:
client_obj = Client(request_json["lr"],
request_json["optim"],
request_json["local_epochs"],
request_json["model"],
request_json["server"],
request_json["client"],
request_json["train_images_url"],
request_json["train_labels_url"],
request_json["test_images_url"],
request_json["test_labels_url"],
request_json["data_sampling"]["input_size"],
request_json["data_sampling"]["hidden_size"],
request_json["data_sampling"]["output_size"],
request_json["data_sampling"]["input_shape_x"],
request_json["data_sampling"]["input_shape_y"],
request_json["data_sampling"]["batch_size"])
except:
return {'Error' : 'Input parameters should include a string to sentiment analyse.'}
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# client.create_model(config['hidden_size'], config['input_size'], config['output_size'])
if client_obj.config['model'] == "cnn":
client_obj.create_model_cnn()
else:
client_obj.create_model()
server_weights_updated = client_obj.get_weights_from_server()
client_obj.set_model_weights(server_weights_updated)
client_obj.request_update()
updated_weights, cardinality = client_obj.get_model_weights_cardinality()
new_weights = client_obj.write_updated_weights_client(updated_weights, cardinality)
new_weights = bson.BSON.encode(new_weights)
return dumps(new_weights)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
pkg/exchange/max/exchange.go | package max
import (
"context"
"fmt"
"math"
"os"
"sort"
"strconv"
"time"
"github.com/google/uuid"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/time/rate"
"github.com/ycdesu/spreaddog/pkg/datatype"
maxapi "github.com/ycdesu/spreaddog/pkg/exchange/max/maxapi"
"github.com/ycdesu/spreaddog/pkg/fixedpoint"
"github.com/ycdesu/spreaddog/pkg/types"
"github.com/ycdesu/spreaddog/pkg/util"
)
var closedOrderQueryLimiter = rate.NewLimiter(rate.Every(6*time.Second), 1)
var tradeQueryLimiter = rate.NewLimiter(rate.Every(4*time.Second), 1)
var accountQueryLimiter = rate.NewLimiter(rate.Every(5*time.Second), 1)
var marketDataLimiter = rate.NewLimiter(rate.Every(5*time.Second), 1)
var log = logrus.WithField("exchange", "max")
type Exchange struct {
client *maxapi.RestClient
key, secret string
}
func New(key, secret string) *Exchange {
baseURL := maxapi.ProductionAPIURL
if override := os.Getenv("MAX_API_BASE_URL"); len(override) > 0 {
baseURL = override
}
client := maxapi.NewRestClient(baseURL)
client.Auth(key, secret)
return &Exchange{
client: client,
key: key,
secret: secret,
}
}
func (e *Exchange) Name() types.ExchangeName {
return types.ExchangeMax
}
func (e *Exchange) QueryTicker(ctx context.Context, symbol string) (*types.Ticker, error) {
ticker, err := e.client.PublicService.Ticker(toLocalSymbol(symbol))
if err != nil {
return nil, err
}
return &types.Ticker{
Time: ticker.Time,
Volume: util.MustParseFloat(ticker.Volume),
Last: util.MustParseFloat(ticker.Last),
Open: util.MustParseFloat(ticker.Open),
High: util.MustParseFloat(ticker.High),
Low: util.MustParseFloat(ticker.Low),
Buy: util.MustParseFloat(ticker.Buy),
Sell: util.MustParseFloat(ticker.Sell),
}, nil
}
func (e *Exchange) QueryTickers(ctx context.Context, symbol ...string) (map[string]types.Ticker, error) {
if err := marketDataLimiter.Wait(ctx); err != nil {
return nil, err
}
var tickers = make(map[string]types.Ticker)
if len(symbol) == 1 {
ticker, err := e.QueryTicker(ctx, symbol[0])
if err != nil {
return nil, err
}
tickers[toGlobalSymbol(symbol[0])] = *ticker
} else {
maxTickers, err := e.client.PublicService.Tickers()
if err != nil {
return nil, err
}
m := make(map[string]struct{})
exists := struct{}{}
for _, s := range symbol {
m[toGlobalSymbol(s)] = exists
}
for k, v := range maxTickers {
if _, ok := m[toGlobalSymbol(k)]; len(symbol) != 0 && !ok {
continue
}
tickers[toGlobalSymbol(k)] = types.Ticker{
Time: v.Time,
Volume: util.MustParseFloat(v.Volume),
Last: util.MustParseFloat(v.Last),
Open: util.MustParseFloat(v.Open),
High: util.MustParseFloat(v.High),
Low: util.MustParseFloat(v.Low),
Buy: util.MustParseFloat(v.Buy),
Sell: util.MustParseFloat(v.Sell),
}
}
}
return tickers, nil
}
func (e *Exchange) QueryMarkets(ctx context.Context) (types.MarketMap, error) {
log.Info("querying market info...")
remoteMarkets, err := e.client.PublicService.Markets()
if err != nil {
return nil, err
}
markets := types.MarketMap{}
for _, m := range remoteMarkets {
symbol := toGlobalSymbol(m.ID)
market := types.Market{
Symbol: symbol,
PricePrecision: m.QuoteUnitPrecision,
VolumePrecision: m.BaseUnitPrecision,
QuoteCurrency: toGlobalCurrency(m.QuoteUnit),
BaseCurrency: toGlobalCurrency(m.BaseUnit),
MinNotional: m.MinQuoteAmount,
MinAmount: m.MinQuoteAmount,
MinQuantity: m.MinBaseAmount,
MaxQuantity: 10000.0,
StepSize: 1.0 / math.Pow10(m.BaseUnitPrecision), // make it like 0.0001
MinPrice: 1.0 / math.Pow10(m.QuoteUnitPrecision), // used in the price formatter
MaxPrice: 10000.0,
TickSize: 1.0 / math.Pow10(m.QuoteUnitPrecision),
}
markets[symbol] = market
}
return markets, nil
}
func (e *Exchange) NewStream() types.Stream {
return NewStream(e.key, e.secret)
}
func (e *Exchange) QueryOpenOrders(ctx context.Context, symbol string) (orders []types.Order, err error) {
maxOrders, err := e.client.OrderService.Open(toLocalSymbol(symbol), maxapi.QueryOrderOptions{})
if err != nil {
return orders, err
}
for _, maxOrder := range maxOrders {
order, err := toGlobalOrder(maxOrder)
if err != nil {
return orders, err
}
orders = append(orders, *order)
}
return orders, err
}
// lastOrderID is not supported on MAX
func (e *Exchange) QueryClosedOrders(ctx context.Context, symbol string, since, until time.Time, lastOrderID uint64) (orders []types.Order, err error) {
if err := closedOrderQueryLimiter.Wait(ctx); err != nil {
return nil, err
}
numBatches := 3
limit := 1000 // max limit = 1000
offset := limit * numBatches
orderIDs := make(map[uint64]struct{}, limit*2)
for ; offset > 0; offset -= limit {
log.Infof("querying %s closed orders offset %d ~ ", symbol, offset)
maxOrders, err := e.client.OrderService.Closed(toLocalSymbol(symbol), maxapi.QueryOrderOptions{
Offset: offset,
Limit: limit,
})
if err != nil {
return orders, err
}
if len(maxOrders) == 0 {
break
}
for _, maxOrder := range maxOrders {
if maxOrder.CreatedAt.Before(since) {
continue
}
if maxOrder.CreatedAt.After(until) {
return orders, err
}
order, err := toGlobalOrder(maxOrder)
if err != nil {
return orders, err
}
if _, ok := orderIDs[order.OrderID]; ok {
log.Infof("skipping duplicated order: %d", order.OrderID)
}
orderIDs[order.OrderID] = struct{}{}
orders = append(orders, *order)
}
}
return orders, err
}
func (e *Exchange) CancelAllOrders(ctx context.Context) ([]types.Order, error) {
var req = e.client.OrderService.NewOrderCancelAllRequest()
var maxOrders, err = req.Do(ctx)
if err != nil {
return nil, err
}
return toGlobalOrders(maxOrders)
}
func (e *Exchange) CancelOrdersBySymbol(ctx context.Context, symbol string) ([]types.Order, error) {
var req = e.client.OrderService.NewOrderCancelAllRequest()
req.Market(toLocalSymbol(symbol))
var maxOrders, err = req.Do(ctx)
if err != nil {
return nil, err
}
return toGlobalOrders(maxOrders)
}
func (e *Exchange) CancelOrdersByGroupID(ctx context.Context, groupID uint32) ([]types.Order, error) {
var req = e.client.OrderService.NewOrderCancelAllRequest()
req.GroupID(groupID)
var maxOrders, err = req.Do(ctx)
if err != nil {
return nil, err
}
return toGlobalOrders(maxOrders)
}
func (e *Exchange) CancelOrders(ctx context.Context, orders ...types.Order) (err2 error) {
var groupIDs = make(map[uint32]struct{})
var orphanOrders []types.Order
for _, o := range orders {
if o.GroupID > 0 {
groupIDs[o.GroupID] = struct{}{}
} else {
orphanOrders = append(orphanOrders, o)
}
}
if len(groupIDs) > 0 {
for groupID := range groupIDs {
var req = e.client.OrderService.NewOrderCancelAllRequest()
req.GroupID(groupID)
if _, err := req.Do(ctx); err != nil {
log.WithError(err).Errorf("group id order cancel error")
err2 = err
}
}
}
for _, o := range orphanOrders {
var req = e.client.OrderService.NewOrderCancelRequest()
if o.OrderID > 0 {
req.ID(o.OrderID)
} else if len(o.ClientOrderID) > 0 {
req.ClientOrderID(o.ClientOrderID)
} else {
return fmt.Errorf("order id or client order id is not defined, order=%+v", o)
}
if err := req.Do(ctx); err != nil {
log.WithError(err).Errorf("order cancel error")
err2 = err
}
}
return err2
}
func toMaxSubmitOrder(o types.SubmitOrder) (*maxapi.Order, error) {
symbol := toLocalSymbol(o.Symbol)
orderType, err := toLocalOrderType(o.Type)
if err != nil {
return nil, err
}
clientOrderID := o.ClientOrderID
if len(clientOrderID) == 0 {
clientOrderID = uuid.New().String()
}
volumeInString := o.QuantityString
if len(volumeInString) == 0 {
if o.Market.Symbol != "" {
volumeInString = o.Market.FormatQuantity(o.Quantity)
} else {
volumeInString = strconv.FormatFloat(o.Quantity, 'f', 8, 64)
}
}
maxOrder := maxapi.Order{
Market: symbol,
Side: toLocalSideType(o.Side),
OrderType: orderType,
// Price: priceInString,
Volume: volumeInString,
GroupID: o.GroupID,
ClientOID: clientOrderID,
}
switch o.Type {
case types.OrderTypeStopLimit, types.OrderTypeLimit, types.OrderTypeLimitMaker:
priceInString := o.PriceString
if len(priceInString) == 0 {
if o.Market.Symbol != "" {
priceInString = o.Market.FormatPrice(o.Price)
} else {
priceInString = strconv.FormatFloat(o.Price, 'f', 8, 64)
}
}
maxOrder.Price = priceInString
}
// set stop price field for limit orders
switch o.Type {
case types.OrderTypeStopLimit, types.OrderTypeStopMarket:
if len(o.StopPriceString) == 0 {
return nil, fmt.Errorf("stop price string can not be empty")
}
priceInString := o.StopPriceString
if len(priceInString) == 0 {
if o.Market.Symbol != "" {
priceInString = o.Market.FormatPrice(o.StopPrice)
} else {
priceInString = strconv.FormatFloat(o.StopPrice, 'f', 8, 64)
}
}
maxOrder.StopPrice = priceInString
}
return &maxOrder, nil
}
func (e *Exchange) SubmitOrders(ctx context.Context, orders ...types.SubmitOrder) (createdOrders types.OrderSlice, err error) {
if len(orders) <= 10 {
var ordersBySymbol = map[string][]maxapi.Order{}
for _, o := range orders {
maxOrder, err := toMaxSubmitOrder(o)
if err != nil {
return nil, err
}
ordersBySymbol[maxOrder.Market] = append(ordersBySymbol[maxOrder.Market], *maxOrder)
}
for symbol, orders := range ordersBySymbol {
req := e.client.OrderService.NewCreateMultiOrderRequest()
req.Market(symbol)
req.AddOrders(orders...)
orderResponses, err := req.Do(ctx)
if err != nil {
return createdOrders, err
}
for _, resp := range *orderResponses {
if len(resp.Error) > 0 {
log.Errorf("multi-order submit error: %s", resp.Error)
continue
}
o, err := toGlobalOrder(resp.Order)
if err != nil {
return createdOrders, err
}
createdOrders = append(createdOrders, *o)
}
}
return createdOrders, nil
}
for _, order := range orders {
maxOrder, err := toMaxSubmitOrder(order)
if err != nil {
return createdOrders, err
}
// TODO: replace OrderType string type
req := e.client.OrderService.NewCreateOrderRequest().
Market(maxOrder.Market).
Side(maxOrder.Side).
OrderType(string(maxOrder.OrderType)).
ClientOrderID(maxOrder.ClientOID)
if len(maxOrder.Volume) > 0 {
req.Volume(maxOrder.Volume)
}
if len(maxOrder.Price) > 0 {
req.Price(maxOrder.Price)
}
if len(maxOrder.StopPrice) > 0 {
req.StopPrice(maxOrder.StopPrice)
}
retOrder, err := req.Do(ctx)
if err != nil {
return createdOrders, err
}
if retOrder == nil {
return createdOrders, errors.New("returned nil order")
}
createdOrder, err := toGlobalOrder(*retOrder)
if err != nil {
return createdOrders, err
}
createdOrders = append(createdOrders, *createdOrder)
}
return createdOrders, err
}
// PlatformFeeCurrency
func (e *Exchange) PlatformFeeCurrency() string {
return toGlobalCurrency("max")
}
func (e *Exchange) getLaunchDate() (time.Time, error) {
// MAX launch date June 21th, 2018
loc, err := time.LoadLocation("Asia/Taipei")
if err != nil {
return time.Time{}, err
}
return time.Date(2018, time.June, 21, 0, 0, 0, 0, loc), nil
}
func (e *Exchange) QueryAccount(ctx context.Context) (*types.Account, error) {
if err := accountQueryLimiter.Wait(ctx); err != nil {
return nil, err
}
userInfo, err := e.client.AccountService.Me()
if err != nil {
return nil, err
}
var balances = make(types.BalanceMap)
for _, a := range userInfo.Accounts {
balances[toGlobalCurrency(a.Currency)] = types.Balance{
Currency: toGlobalCurrency(a.Currency),
Available: fixedpoint.Must(fixedpoint.NewFromString(a.Balance)),
Locked: fixedpoint.Must(fixedpoint.NewFromString(a.Locked)),
}
}
vipLevel, err := e.client.AccountService.VipLevel()
if err != nil {
return nil, err
}
// MAX returns the fee rate in the following format:
// "maker_fee": 0.0005 -> 0.05%
// "taker_fee": 0.0015 -> 0.15%
a := &types.Account{
MakerCommission: fixedpoint.NewFromFloat(vipLevel.Current.MakerFee), // 0.15% = 0.0015
TakerCommission: fixedpoint.NewFromFloat(vipLevel.Current.TakerFee), // 0.15% = 0.0015
}
a.UpdateBalances(balances)
return a, nil
}
func (e *Exchange) QueryWithdrawHistory(ctx context.Context, asset string, since, until time.Time) (allWithdraws []types.Withdraw, err error) {
startTime := since
limit := 1000
txIDs := map[string]struct{}{}
emptyTime := time.Time{}
if startTime == emptyTime {
startTime, err = e.getLaunchDate()
if err != nil {
return nil, err
}
}
for startTime.Before(until) {
// startTime ~ endTime must be in 60 days
endTime := startTime.AddDate(0, 0, 60)
if endTime.After(until) {
endTime = until
}
log.Infof("querying withdraw %s: %s <=> %s", asset, startTime, endTime)
req := e.client.AccountService.NewGetWithdrawalHistoryRequest()
if len(asset) > 0 {
req.Currency(toLocalCurrency(asset))
}
withdraws, err := req.
From(startTime.Unix()).
To(endTime.Unix()).
Limit(limit).
Do(ctx)
if err != nil {
return allWithdraws, err
}
if len(withdraws) == 0 {
startTime = endTime
continue
}
for i := len(withdraws) - 1; i >= 0; i-- {
d := withdraws[i]
if _, ok := txIDs[d.TxID]; ok {
continue
}
// we can convert this later
status := d.State
switch d.State {
case "confirmed":
status = "completed" // make it compatible with binance
case "submitting", "submitted", "accepted",
"rejected", "suspect", "approved", "delisted_processing",
"processing", "retryable", "sent", "canceled",
"failed", "pending",
"kgi_manually_processing", "kgi_manually_confirmed", "kgi_possible_failed",
"sygna_verifying":
default:
status = d.State
}
txIDs[d.TxID] = struct{}{}
withdraw := types.Withdraw{
Exchange: types.ExchangeMax,
ApplyTime: datatype.Time(time.Unix(d.CreatedAt, 0)),
Asset: toGlobalCurrency(d.Currency),
Amount: util.MustParseFloat(d.Amount),
Address: "",
AddressTag: "",
TransactionID: d.TxID,
TransactionFee: util.MustParseFloat(d.Fee),
TransactionFeeCurrency: d.FeeCurrency,
// WithdrawOrderID: d.WithdrawOrderID,
// Network: d.Network,
Status: status,
}
allWithdraws = append(allWithdraws, withdraw)
}
// go next time frame
if len(withdraws) < limit {
startTime = endTime
} else {
// its in descending order, so we get the first record
startTime = time.Unix(withdraws[0].CreatedAt, 0)
}
}
return allWithdraws, nil
}
func (e *Exchange) QueryDepositHistory(ctx context.Context, asset string, since, until time.Time) (allDeposits []types.Deposit, err error) {
startTime := since
limit := 1000
txIDs := map[string]struct{}{}
emptyTime := time.Time{}
if startTime == emptyTime {
startTime, err = e.getLaunchDate()
if err != nil {
return nil, err
}
}
for startTime.Before(until) {
// startTime ~ endTime must be in 90 days
endTime := startTime.AddDate(0, 0, 60)
if endTime.After(until) {
endTime = until
}
log.Infof("querying deposit history %s: %s <=> %s", asset, startTime, endTime)
req := e.client.AccountService.NewGetDepositHistoryRequest()
if len(asset) > 0 {
req.Currency(toLocalCurrency(asset))
}
deposits, err := req.
From(startTime.Unix()).
To(endTime.Unix()).
Limit(limit).
Do(ctx)
if err != nil {
return nil, err
}
for i := len(deposits) - 1; i >= 0; i-- {
d := deposits[i]
if _, ok := txIDs[d.TxID]; ok {
continue
}
allDeposits = append(allDeposits, types.Deposit{
Exchange: types.ExchangeMax,
Time: datatype.Time(time.Unix(d.CreatedAt, 0)),
Amount: util.MustParseFloat(d.Amount),
Asset: toGlobalCurrency(d.Currency),
Address: "", // not supported
AddressTag: "", // not supported
TransactionID: d.TxID,
Status: toGlobalDepositStatus(d.State),
})
}
if len(deposits) < limit {
startTime = endTime
} else {
startTime = time.Unix(deposits[0].CreatedAt, 0)
}
}
return allDeposits, err
}
func (e *Exchange) QueryAccountBalances(ctx context.Context) (types.BalanceMap, error) {
if err := accountQueryLimiter.Wait(ctx); err != nil {
return nil, err
}
accounts, err := e.client.AccountService.Accounts()
if err != nil {
return nil, err
}
var balances = make(types.BalanceMap)
for _, a := range accounts {
balances[toGlobalCurrency(a.Currency)] = types.Balance{
Currency: toGlobalCurrency(a.Currency),
Available: fixedpoint.Must(fixedpoint.NewFromString(a.Balance)),
Locked: fixedpoint.Must(fixedpoint.NewFromString(a.Locked)),
}
}
return balances, nil
}
func (e *Exchange) QueryTrades(ctx context.Context, symbol string, options *types.TradeQueryOptions) (trades []types.Trade, err error) {
if err := tradeQueryLimiter.Wait(ctx); err != nil {
return nil, err
}
req := e.client.TradeService.NewPrivateTradeRequest()
req.Market(toLocalSymbol(symbol))
if options.Limit > 0 {
req.Limit(options.Limit)
} else {
req.Limit(1000)
}
// MAX uses exclusive last trade ID
if options.LastTradeID > 0 {
req.From(options.LastTradeID)
}
// make it compatible with binance, we need the last trade id for the next page.
req.OrderBy("asc")
remoteTrades, err := req.Do(ctx)
if err != nil {
return nil, err
}
for _, t := range remoteTrades {
localTrade, err := toGlobalTrade(t)
if err != nil {
logger.WithError(err).Errorf("can not convert trade: %+v", t)
continue
}
trades = append(trades, *localTrade)
}
return trades, nil
}
func (e *Exchange) QueryRewards(ctx context.Context, startTime time.Time) ([]types.Reward, error) {
var from = startTime
var emptyTime = time.Time{}
if from == emptyTime {
from = time.Unix(maxapi.TimestampSince, 0)
}
var now = time.Now()
for {
if from.After(now) {
return nil, nil
}
// scan by 30 days
// an user might get most 14 commission records by currency per day
// limit 1000 / 14 = 71 days
to := from.Add(time.Hour * 24 * 30)
req := e.client.RewardService.NewRewardsRequest()
req.From(from.Unix())
req.To(to.Unix())
req.Limit(1000)
maxRewards, err := req.Do(ctx)
if err != nil {
return nil, err
}
if len(maxRewards) == 0 {
// next page
from = to
continue
}
rewards, err := toGlobalRewards(maxRewards)
if err != nil {
return nil, err
}
// sort them in the ascending order
sort.Sort(types.RewardSliceByCreationTime(rewards))
return rewards, nil
}
return nil, errors.New("unknown error")
}
func (e *Exchange) QueryKLines(ctx context.Context, symbol string, interval types.Interval, options types.KLineQueryOptions) ([]types.KLine, error) {
if err := marketDataLimiter.Wait(ctx); err != nil {
return nil, err
}
var limit = 5000
if options.Limit > 0 {
// default limit == 500
limit = options.Limit
}
// workaround for the kline query, because MAX does not support query by end time
// so we need to use the given end time and the limit number to calculate the start time
if options.EndTime != nil && options.StartTime == nil {
startTime := options.EndTime.Add(-time.Duration(limit) * interval.Duration())
options.StartTime = &startTime
}
if options.StartTime == nil {
return nil, errors.New("start time can not be empty")
}
log.Infof("querying kline %s %s %+v", symbol, interval, options)
localKLines, err := e.client.PublicService.KLines(toLocalSymbol(symbol), string(interval), *options.StartTime, limit)
if err != nil {
return nil, err
}
var kLines []types.KLine
for _, k := range localKLines {
kLines = append(kLines, k.KLine())
}
return kLines, nil
}
func (e *Exchange) QueryAveragePrice(ctx context.Context, symbol string) (float64, error) {
ticker, err := e.client.PublicService.Ticker(toLocalSymbol(symbol))
if err != nil {
return 0, err
}
return (util.MustParseFloat(ticker.Sell) + util.MustParseFloat(ticker.Buy)) / 2, nil
}
| [
"\"MAX_API_BASE_URL\""
]
| []
| [
"MAX_API_BASE_URL"
]
| [] | ["MAX_API_BASE_URL"] | go | 1 | 0 | |
interp/interp_test.go | // Copyright (c) 2017, Daniel Martí <[email protected]>
// See LICENSE for licensing information
package interp
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"mvdan.cc/sh/v3/expand"
"mvdan.cc/sh/v3/syntax"
)
func parse(tb testing.TB, parser *syntax.Parser, src string) *syntax.File {
if parser == nil {
parser = syntax.NewParser()
}
file, err := parser.Parse(strings.NewReader(src), "")
if err != nil {
tb.Fatal(err)
}
return file
}
func BenchmarkRun(b *testing.B) {
b.ReportAllocs()
b.StopTimer()
src := `
echo a b c d
echo ./$foo/etc $(echo foo bar)
foo="bar"
x=y :
fn() {
local a=b
for i in 1 2 3; do
echo $i | cat
done
}
[[ $foo == bar ]] && fn
echo a{b,c}d *.go
let i=(2 + 3)
`
file := parse(b, nil, src)
r, _ := New()
ctx := context.Background()
b.StartTimer()
for i := 0; i < b.N; i++ {
r.Reset()
if err := r.Run(ctx, file); err != nil {
b.Fatal(err)
}
}
}
var hasBash50 bool
func TestMain(m *testing.M) {
if os.Getenv("GOSH_PROG") != "" {
r := strings.NewReader(os.Args[1])
file, err := syntax.NewParser().Parse(r, "")
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
runner, _ := New(
StdIO(os.Stdin, os.Stdout, os.Stderr),
OpenHandler(testOpenHandler),
ExecHandler(testExecHandler),
)
ctx := context.Background()
if err := runner.Run(ctx, file); err != nil {
if status, ok := IsExitStatus(err); ok {
os.Exit(int(status))
}
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
os.Exit(0)
}
prog, err := os.Executable()
if err != nil {
panic(err)
}
os.Setenv("GOSH_PROG", prog)
os.Setenv("LANGUAGE", "en_US.UTF8")
os.Setenv("LC_ALL", "en_US.UTF8")
os.Unsetenv("CDPATH")
hasBash50 = checkBash()
os.Setenv("INTERP_GLOBAL", "value")
os.Setenv("MULTILINE_INTERP_GLOBAL", "\nwith\nnewlines\n\n")
// Double check that env vars on Windows are case insensitive.
if runtime.GOOS == "windows" {
os.Setenv("mixedCase_INTERP_GLOBAL", "value")
} else {
os.Setenv("MIXEDCASE_INTERP_GLOBAL", "value")
}
// Some program which should be in $PATH.
if runtime.GOOS == "windows" {
os.Setenv("PATH_PROG", "cmd")
} else {
os.Setenv("PATH_PROG", "sh")
}
// To print env vars. Only a builtin on Windows.
if runtime.GOOS == "windows" {
os.Setenv("ENV_PROG", "cmd /c set")
} else {
os.Setenv("ENV_PROG", "env")
}
for _, s := range []string{"a", "b", "c", "d", "foo", "bar"} {
os.Unsetenv(s)
}
exit := m.Run()
os.Exit(exit)
}
func checkBash() bool {
out, err := exec.Command("bash", "-c", "echo -n $BASH_VERSION").Output()
if err != nil {
return false
}
return strings.HasPrefix(string(out), "5.0")
}
// concBuffer wraps a bytes.Buffer in a mutex so that concurrent writes
// to it don't upset the race detector.
type concBuffer struct {
buf bytes.Buffer
sync.Mutex
}
func absPath(dir, path string) string {
if !filepath.IsAbs(path) {
path = filepath.Join(dir, path)
}
return filepath.Clean(path)
}
func (c *concBuffer) Write(p []byte) (int, error) {
c.Lock()
n, err := c.buf.Write(p)
c.Unlock()
return n, err
}
func (c *concBuffer) WriteString(s string) (int, error) {
c.Lock()
n, err := c.buf.WriteString(s)
c.Unlock()
return n, err
}
func (c *concBuffer) String() string {
c.Lock()
s := c.buf.String()
c.Unlock()
return s
}
func (c *concBuffer) Reset() {
c.Lock()
c.buf.Reset()
c.Unlock()
}
type runTest struct {
in, want string
}
var runTests = []runTest{
// no-op programs
{"", ""},
{"true", ""},
{":", ""},
{"exit", ""},
{"exit 0", ""},
{"{ :; }", ""},
{"(:)", ""},
// exit status codes
{"exit 1", "exit status 1"},
{"exit -1", "exit status 255"},
{"exit 300", "exit status 44"},
{"false", "exit status 1"},
{"false foo", "exit status 1"},
{"! false", ""},
{"true foo", ""},
{": foo", ""},
{"! true", "exit status 1"},
{"false; true", ""},
{"false; exit", "exit status 1"},
{"exit; echo foo", ""},
{"exit 0; echo foo", ""},
{"printf", "usage: printf format [arguments]\nexit status 2 #JUSTERR"},
{"break", "break is only useful in a loop #JUSTERR"},
{"continue", "continue is only useful in a loop #JUSTERR"},
{"cd a b", "usage: cd [dir]\nexit status 2 #JUSTERR"},
{"shift a", "usage: shift [n]\nexit status 2 #JUSTERR"},
{
"shouldnotexist",
"\"shouldnotexist\": executable file not found in $PATH\nexit status 127 #JUSTERR",
},
{
"for i in 1; do continue a; done",
"usage: continue [n]\nexit status 2 #JUSTERR",
},
{
"for i in 1; do break a; done",
"usage: break [n]\nexit status 2 #JUSTERR",
},
// we don't need to follow bash error strings
{"exit a", "invalid exit status code: \"a\"\nexit status 2 #JUSTERR"},
{"exit 1 2", "exit cannot take multiple arguments\nexit status 1 #JUSTERR"},
// echo
{"echo", "\n"},
{"echo a b c", "a b c\n"},
{"echo -n foo", "foo"},
{`echo -e '\t'`, "\t\n"},
{`echo -E '\t'`, "\\t\n"},
{"echo -x foo", "-x foo\n"},
{"echo -e -x -e foo", "-x -e foo\n"},
// printf
{"printf foo", "foo"},
{"printf %%", "%"},
{"printf %", "missing format char\nexit status 1 #JUSTERR"},
{"printf %; echo foo", "missing format char\nfoo\n #IGNORE"},
{"printf %1", "missing format char\nexit status 1 #JUSTERR"},
{"printf %+", "missing format char\nexit status 1 #JUSTERR"},
{"printf %B foo", "invalid format char: B\nexit status 1 #JUSTERR"},
{"printf %12-s foo", "invalid format char: -\nexit status 1 #JUSTERR"},
{"printf ' %s \n' bar", " bar \n"},
{"printf '\\A'", "\\A"},
{"printf %s foo", "foo"},
{"printf %s", ""},
{"printf %d,%i 3 4", "3,4"},
{"printf %d", "0"},
{"printf %d,%d 010 0x10", "8,16"},
{"printf %i,%u -3 -3", "-3,18446744073709551613"},
{"printf %o -3", "1777777777777777777775"},
{"printf %x -3", "fffffffffffffffd"},
{"printf %c,%c,%c foo àa", "f,\xc3,\x00"}, // TODO: use a rune?
{"printf %3s a", " a"},
{"printf %3i 1", " 1"},
{"printf %+i%+d 1 -3", "+1-3"},
{"printf %-5x 10", "a "},
{"printf %02x 1", "01"},
{"printf 'a% 5s' a", "a a"},
{"printf 'nofmt' 1 2 3", "nofmt"},
{"printf '%d_' 1 2 3", "1_2_3_"},
{"printf '%02d %02d\n' 1 2 3", "01 02\n03 00\n"},
// words and quotes
{"echo foo ", "foo\n"},
{"echo ' foo '", " foo \n"},
{`echo " foo "`, " foo \n"},
{`echo a'b'c"d"e`, "abcde\n"},
{`a=" b c "; echo $a`, "b c\n"},
{`a=" b c "; echo "$a"`, " b c \n"},
{`echo "$(echo ' b c ')"`, " b c \n"},
{"echo ''", "\n"},
{`$(echo)`, ""},
{`echo -n '\\'`, `\\`},
{`echo -n "\\"`, `\`},
{`set -- a b c; x="$@"; echo "$x"`, "a b c\n"},
{`set -- b c; echo a"$@"d`, "ab cd\n"},
{`count() { echo $#; }; set --; count "$@"`, "0\n"},
{`count() { echo $#; }; set -- ""; count "$@"`, "1\n"},
{`count() { echo $#; }; a=(); count "${a[@]}"`, "0\n"},
{`count() { echo $#; }; a=(""); count "${a[@]}"`, "1\n"},
{`echo $1 $3; set -- a b c; echo $1 $3`, "\na c\n"},
{`[[ $0 == "bash" || $0 == "gosh" ]]`, ""},
// dollar quotes
{`echo $'foo\nbar'`, "foo\nbar\n"},
{`echo $'\r\t\\'`, "\r\t\\\n"},
{`echo $"foo\nbar"`, "foo\\nbar\n"},
{`echo $'%s'`, "%s\n"},
{`a=$'\r\t\\'; echo "$a"`, "\r\t\\\n"},
{`a=$"foo\nbar"; echo "$a"`, "foo\\nbar\n"},
{`echo $'\a\b\e\E\f\v'`, "\a\b\x1b\x1b\f\v\n"},
{`echo $'\\\'\"\?'`, "\\'\"?\n"},
{`echo $'\1\45\12345\777\9'`, "\x01%S45\xff\\9\n"},
{`echo $'\x\xf\x09\xAB'`, "\\x\x0f\x09\xab\n"},
{`echo $'\u\uf\u09\uABCD\u00051234'`, "\\u\u000f\u0009\uabcd\u00051234\n"},
{`echo $'\U\Uf\U09\UABCD\U00051234'`, "\\U\u000f\u0009\uabcd\U00051234\n"},
// escaped chars
{"echo a\\b", "ab\n"},
{"echo a\\ b", "a b\n"},
{"echo \\$a", "$a\n"},
{"echo \"a\\b\"", "a\\b\n"},
{"echo 'a\\b'", "a\\b\n"},
{"echo \"a\\\nb\"", "ab\n"},
{"echo 'a\\\nb'", "a\\\nb\n"},
{`echo "\""`, "\"\n"},
{`echo \\`, "\\\n"},
{`echo \\\\`, "\\\\\n"},
// vars
{"foo=bar; echo $foo", "bar\n"},
{"foo=bar foo=etc; echo $foo", "etc\n"},
{"foo=bar; foo=etc; echo $foo", "etc\n"},
{"foo=bar; foo=; echo $foo", "\n"},
{"unset foo; echo $foo", "\n"},
{"foo=bar; unset foo; echo $foo", "\n"},
{"echo $INTERP_GLOBAL", "value\n"},
{"INTERP_GLOBAL=; echo $INTERP_GLOBAL", "\n"},
{"unset INTERP_GLOBAL; echo $INTERP_GLOBAL", "\n"},
{"echo $MIXEDCASE_INTERP_GLOBAL", "value\n"},
{"foo=bar; foo=x true; echo $foo", "bar\n"},
{"foo=bar; foo=x true; echo $foo", "bar\n"},
{"foo=bar; $ENV_PROG | grep '^foo='", "exit status 1"},
{"foo=bar $ENV_PROG | grep '^foo='", "foo=bar\n"},
{"foo=a foo=b $ENV_PROG | grep '^foo='", "foo=b\n"},
{"$ENV_PROG | grep '^INTERP_GLOBAL='", "INTERP_GLOBAL=value\n"},
{"INTERP_GLOBAL=new; $ENV_PROG | grep '^INTERP_GLOBAL='", "INTERP_GLOBAL=new\n"},
{"INTERP_GLOBAL=; $ENV_PROG | grep '^INTERP_GLOBAL='", "INTERP_GLOBAL=\n"},
{"a=b; a+=c x+=y; echo $a $x", "bc y\n"},
{`a=" x y"; b=$a c="$a"; echo $b; echo $c`, "x y\nx y\n"},
{`a=" x y"; b=$a c="$a"; echo "$b"; echo "$c"`, " x y\n x y\n"},
// TODO: reenable once we figure out the broken pipe error
//{`$ENV_PROG | while read line; do if test -z "$line"; then echo empty; fi; break; done`, ""}, // never begin with an empty element
// special vars
{"echo $?; false; echo $?", "0\n1\n"},
{"for i in 1 2; do\necho $LINENO\necho $LINENO\ndone", "2\n3\n2\n3\n"},
{"[[ -n $$ && $$ -gt 0 ]]", ""},
{"[[ $$ -eq $PPID ]]", "exit status 1"},
// var manipulation
{"echo ${#a} ${#a[@]}", "0 0\n"},
{"a=bar; echo ${#a} ${#a[@]}", "3 1\n"},
{"a=世界; echo ${#a}", "2\n"},
{"a=(a bcd); echo ${#a} ${#a[@]} ${#a[*]} ${#a[1]}", "1 2 2 3\n"},
{"set -- a bc; echo ${#@} ${#*} $#", "2 2 2\n"},
{
"echo ${!a}; echo more",
"invalid indirect expansion\nexit status 1 #JUSTERR",
},
{
"a=b; echo ${!a}; b=c; echo ${!a}",
"\nc\n",
},
{
"a=foo; echo ${a:1}; echo ${a: -1}; echo ${a: -10}; echo ${a:5}",
"oo\no\n\n\n",
},
{
"a=foo; echo ${a::2}; echo ${a::-1}; echo ${a: -10}; echo ${a::5}",
"fo\nfo\n\nfoo\n",
},
{
"a=abc; echo ${a:1:1}",
"b\n",
},
{
"a=foo; echo ${a/no/x} ${a/o/i} ${a//o/i} ${a/fo/}",
"foo fio fii o\n",
},
{
"a=foo; echo ${a/*/xx} ${a//?/na} ${a/o*}",
"xx nanana f\n",
},
{
"a=12345; echo ${a//[42]} ${a//[^42]} ${a//[!42]}",
"135 24 24\n",
},
{"a=0123456789; echo ${a//[1-35-8]}", "049\n"},
{"a=]abc]; echo ${a//[]b]}", "ac\n"},
{"a=-abc-; echo ${a//[-b]}", "ac\n"},
{`a='x\y'; echo ${a//\\}`, "xy\n"},
{"a=']'; echo ${a//[}", "]\n"},
{"a=']'; echo ${a//[]}", "]\n"},
{"a=']'; echo ${a//[]]}", "\n"},
{"a='['; echo ${a//[[]}", "\n"},
{"a=']'; echo ${a//[xy}", "]\n"},
{"a='abc123'; echo ${a//[[:digit:]]}", "abc\n"},
{"a='[[:wrong:]]'; echo ${a//[[:wrong:]]}", "[[:wrong:]]\n"},
{"a='[[:wrong:]]'; echo ${a//[[:}", "[[:wrong:]]\n"},
{"a='abcx1y'; echo ${a//x[[:digit:]]y}", "abc\n"},
{`a=xyz; echo "${a/y/a b}"`, "xa bz\n"},
{"a='foo/bar'; echo ${a//o*a/}", "fr\n"},
{
"echo ${a:-b}; echo $a; a=; echo ${a:-b}; a=c; echo ${a:-b}",
"b\n\nb\nc\n",
},
{
"echo ${#:-never} ${?:-never} ${LINENO:-never}",
"0 0 1\n",
},
{
"echo ${a-b}; echo $a; a=; echo ${a-b}; a=c; echo ${a-b}",
"b\n\n\nc\n",
},
{
"echo ${a:=b}; echo $a; a=; echo ${a:=b}; a=c; echo ${a:=b}",
"b\nb\nb\nc\n",
},
{
"echo ${a=b}; echo $a; a=; echo ${a=b}; a=c; echo ${a=b}",
"b\nb\n\nc\n",
},
{
"echo ${a:+b}; echo $a; a=; echo ${a:+b}; a=c; echo ${a:+b}",
"\n\n\nb\n",
},
{
"echo ${a+b}; echo $a; a=; echo ${a+b}; a=c; echo ${a+b}",
"\n\nb\nb\n",
},
{
"a=b; echo ${a:?err1}; a=; echo ${a:?err2}; unset a; echo ${a:?err3}",
"b\nerr2\nexit status 1 #JUSTERR",
},
{
"a=b; echo ${a?err1}; a=; echo ${a?err2}; unset a; echo ${a?err3}",
"b\n\nerr3\nexit status 1 #JUSTERR",
},
{
"echo ${a:?%s}",
"%s\nexit status 1 #JUSTERR",
},
{
"x=aaabccc; echo ${x#*a}; echo ${x##*a}",
"aabccc\nbccc\n",
},
{
"x=(__a _b c_); echo ${x[@]#_}",
"_a b c_\n",
},
{
"x=(a__ b_ _c); echo ${x[@]%%_}",
"a_ b _c\n",
},
{
"x=aaabccc; echo ${x%c*}; echo ${x%%c*}",
"aaabcc\naaab\n",
},
{
"x=aaabccc; echo ${x%%[bc}",
"aaabccc\n",
},
{
"a='àÉñ bAr'; echo ${a^}; echo ${a^^}",
"ÀÉñ bAr\nÀÉÑ BAR\n",
},
{
"a='àÉñ bAr'; echo ${a,}; echo ${a,,}",
"àÉñ bAr\nàéñ bar\n",
},
{
"a='àÉñ bAr'; echo ${a^?}; echo ${a^^[br]}",
"ÀÉñ bAr\nàÉñ BAR\n",
},
{
"a='àÉñ bAr'; echo ${a,?}; echo ${a,,[br]}",
"àÉñ bAr\nàÉñ bAr\n",
},
{
"a=(àÉñ bAr); echo ${a[@]^}; echo ${a[*],,}",
"ÀÉñ BAr\nàéñ bar\n",
},
{
"INTERP_X_1=a INTERP_X_2=b; echo ${!INTERP_X_*}",
"INTERP_X_1 INTERP_X_2\n",
},
{
"INTERP_X_2=b INTERP_X_1=a; echo ${!INTERP_*}",
"INTERP_GLOBAL INTERP_X_1 INTERP_X_2\n",
},
{
`a='b c'; eval "echo -n ${a} ${a@Q}"`,
`b c b c`,
},
{
`a='"\n'; printf "%s %s" "${a}" "${a@E}"`,
"\"\\n \"\n",
},
// if
{
"if true; then echo foo; fi",
"foo\n",
},
{
"if false; then echo foo; fi",
"",
},
{
"if false; then echo foo; fi",
"",
},
{
"if true; then echo foo; else echo bar; fi",
"foo\n",
},
{
"if false; then echo foo; else echo bar; fi",
"bar\n",
},
{
"if true; then false; fi",
"exit status 1",
},
{
"if false; then :; else false; fi",
"exit status 1",
},
{
"if false; then :; elif true; then echo foo; fi",
"foo\n",
},
{
"if false; then :; elif false; then :; elif true; then echo foo; fi",
"foo\n",
},
{
"if false; then :; elif false; then :; else echo foo; fi",
"foo\n",
},
// while
{
"while false; do echo foo; done",
"",
},
{
"while true; do exit 1; done",
"exit status 1",
},
{
"while true; do break; done",
"",
},
{
"while true; do while true; do break 2; done; done",
"",
},
// until
{
"until true; do echo foo; done",
"",
},
{
"until false; do exit 1; done",
"exit status 1",
},
{
"until false; do break; done",
"",
},
// for
{
"for i in 1 2 3; do echo $i; done",
"1\n2\n3\n",
},
{
"for i in 1 2 3; do echo $i; exit; done",
"1\n",
},
{
"for i in 1 2 3; do echo $i; false; done",
"1\n2\n3\nexit status 1",
},
{
"for i in 1 2 3; do echo $i; break; done",
"1\n",
},
{
"for i in 1 2 3; do echo $i; continue; echo foo; done",
"1\n2\n3\n",
},
{
"for i in 1 2; do for j in a b; do echo $i $j; continue 2; done; done",
"1 a\n2 a\n",
},
{
"for ((i=0; i<3; i++)); do echo $i; done",
"0\n1\n2\n",
},
// TODO: uncomment once expandEnv.Set starts returning errors
// {
// "readonly i; for ((i=0; i<3; i++)); do echo $i; done",
// "0\n1\n2\n",
// },
{
"for ((i=5; i>0; i--)); do echo $i; break; done",
"5\n",
},
{
"for i in 1 2; do for j in a b; do echo $i $j; done; break; done",
"1 a\n1 b\n",
},
{
"for i in 1 2 3; do :; done; echo $i",
"3\n",
},
{
"for ((i=0; i<3; i++)); do :; done; echo $i",
"3\n",
},
{
"set -- a 'b c'; for i in; do echo $i; done",
"",
},
{
"set -- a 'b c'; for i; do echo $i; done",
"a\nb c\n",
},
// block
{
"{ echo foo; }",
"foo\n",
},
{
"{ false; }",
"exit status 1",
},
// subshell
{
"(echo foo)",
"foo\n",
},
{
"(false)",
"exit status 1",
},
{
"(exit 1)",
"exit status 1",
},
{
"(foo=bar; echo $foo); echo $foo",
"bar\n\n",
},
{
"(echo() { printf 'bar\n'; }; echo); echo",
"bar\n\n",
},
{
"unset INTERP_GLOBAL & echo $INTERP_GLOBAL",
"value\n",
},
{
"(fn() { :; }) & pwd >/dev/null",
"",
},
{
"x[0]=x; (echo ${x[0]}; x[0]=y; echo ${x[0]}); echo ${x[0]}",
"x\ny\nx\n",
},
{
`x[3]=x; (x[3]=y); echo ${x[3]}`,
"x\n",
},
{
"shopt -s expand_aliases; alias f='echo x'\nf\n(f\nalias f='echo y'\neval f\n)\nf\n",
"x\nx\ny\nx\n",
},
{
"set -- a; echo $1; (echo $1; set -- b; echo $1); echo $1",
"a\na\nb\na\n",
},
// cd/pwd
{"[[ fo~ == 'fo~' ]]", ""},
{`[[ 'ab\c' == *\\* ]]`, ""},
{`[[ foo/bar == foo* ]]`, ""},
{"[[ a == [ab ]]", "exit status 1"},
{`HOME='/*'; echo ~; echo "$HOME"`, "/*\n/*\n"},
{`test -d ~`, ""},
{`foo=~; test -d $foo`, ""},
{`foo=~; test -d "$foo"`, ""},
{`foo='~'; test -d $foo`, "exit status 1"},
{`foo='~'; [ $foo == '~' ]`, ""},
{
`[[ ~ == "$HOME" ]] && [[ ~/foo == "$HOME/foo" ]]`,
"",
},
{
"[[ ~noexist == '~noexist' ]]",
"",
},
{
`w="$HOME"; cd; [[ $PWD == "$w" ]]`,
"",
},
{
`HOME=/foo; echo $HOME`,
"/foo\n",
},
{
"cd noexist",
"exit status 1 #JUSTERR",
},
{
"mkdir -p a/b && cd a && cd b && cd ../..",
"",
},
{
">a && cd a",
"exit status 1 #JUSTERR",
},
{
`[[ $PWD == "$(pwd)" ]]`,
"",
},
{
"PWD=changed; [[ $PWD == changed ]]",
"",
},
{
"PWD=changed; mkdir a; cd a; [[ $PWD == changed ]]",
"exit status 1",
},
{
`mkdir %s; old="$PWD"; cd %s; [[ $old == "$PWD" ]]`,
"exit status 1",
},
{
`old="$PWD"; mkdir a; cd a; cd ..; [[ $old == "$PWD" ]]`,
"",
},
{
`[[ $PWD == "$OLDPWD" ]]`,
"exit status 1",
},
{
`old="$PWD"; mkdir a; cd a; [[ $old == "$OLDPWD" ]]`,
"",
},
{
`mkdir a; ln -s a b; [[ $(cd a && pwd) == "$(cd b && pwd)" ]]; echo $?`,
"1\n",
},
// dirs/pushd/popd
{"set -- $(dirs); echo $# ${#DIRSTACK[@]}", "1 1\n"},
{"pushd", "pushd: no other directory\nexit status 1 #JUSTERR"},
{"pushd -n", ""},
{"pushd foo bar", "pushd: too many arguments\nexit status 2 #JUSTERR"},
{"pushd does-not-exist; set -- $(dirs); echo $#", "1\n #IGNORE"},
{"mkdir a; pushd a >/dev/null; set -- $(dirs); echo $#", "2\n"},
{"mkdir a; set -- $(pushd a); echo $#", "2\n"},
{
`mkdir a; pushd a >/dev/null; set -- $(dirs); [[ $1 == "$HOME" ]]`,
"exit status 1",
},
{
`mkdir a; pushd a >/dev/null; [[ ${DIRSTACK[0]} == "$HOME" ]]`,
"exit status 1",
},
{
`old=$(dirs); mkdir a; pushd a >/dev/null; pushd >/dev/null; set -- $(dirs); [[ $1 == "$old" ]]`,
"",
},
{
`old=$(dirs); mkdir a; pushd a >/dev/null; pushd -n >/dev/null; set -- $(dirs); [[ $1 == "$old" ]]`,
"exit status 1",
},
{
"mkdir a; pushd a >/dev/null; pushd >/dev/null; rm -r a; pushd",
"exit status 1 #JUSTERR",
},
{
`old=$(dirs); mkdir a; pushd -n a >/dev/null; set -- $(dirs); [[ $1 == "$old" ]]`,
"",
},
{
`old=$(dirs); mkdir a; pushd -n a >/dev/null; pushd >/dev/null; set -- $(dirs); [[ $1 == "$old" ]]`,
"exit status 1",
},
{"popd", "popd: directory stack empty\nexit status 1 #JUSTERR"},
{"popd -n", "popd: directory stack empty\nexit status 1 #JUSTERR"},
{"popd foo", "popd: invalid argument\nexit status 2 #JUSTERR"},
{"old=$(dirs); mkdir a; pushd a >/dev/null; set -- $(popd); echo $#", "1\n"},
{
`old=$(dirs); mkdir a; pushd a >/dev/null; popd >/dev/null; [[ $(dirs) == "$old" ]]`,
"",
},
{"old=$(dirs); mkdir a; pushd a >/dev/null; set -- $(popd -n); echo $#", "1\n"},
{
`old=$(dirs); mkdir a; pushd a >/dev/null; popd -n >/dev/null; [[ $(dirs) == "$old" ]]`,
"exit status 1",
},
{
"mkdir a; pushd a >/dev/null; pushd >/dev/null; rm -r a; popd",
"exit status 1 #JUSTERR",
},
// binary cmd
{
"true && echo foo || echo bar",
"foo\n",
},
{
"false && echo foo || echo bar",
"bar\n",
},
// func
{
"foo() { echo bar; }; foo",
"bar\n",
},
{
"foo() { echo $1; }; foo",
"\n",
},
{
"foo() { echo $1; }; foo a b",
"a\n",
},
{
"foo() { echo $1; bar c d; echo $2; }; bar() { echo $2; }; foo a b",
"a\nd\nb\n",
},
{
`foo() { echo $#; }; foo; foo 1 2 3; foo "a b"; echo $#`,
"0\n3\n1\n0\n",
},
{
`foo() { for a in $*; do echo "$a"; done }; foo 'a 1' 'b 2'`,
"a\n1\nb\n2\n",
},
{
`foo() { for a in "$*"; do echo "$a"; done }; foo 'a 1' 'b 2'`,
"a 1 b 2\n",
},
{
`foo() { for a in "foo$*"; do echo "$a"; done }; foo 'a 1' 'b 2'`,
"fooa 1 b 2\n",
},
{
`foo() { for a in $@; do echo "$a"; done }; foo 'a 1' 'b 2'`,
"a\n1\nb\n2\n",
},
{
`foo() { for a in "$@"; do echo "$a"; done }; foo 'a 1' 'b 2'`,
"a 1\nb 2\n",
},
// alias (note the input newlines)
{
"alias foo; alias foo=echo; alias foo; alias foo=; alias foo",
"alias: \"foo\" not found\nalias foo='echo'\nalias foo=''\n #IGNORE",
},
{
"shopt -s expand_aliases; alias foo=echo\nfoo foo; foo bar",
"foo\nbar\n",
},
{
"shopt -s expand_aliases; alias true=echo\ntrue foo; unalias true\ntrue bar",
"foo\n",
},
{
"shopt -s expand_aliases; alias echo='echo a'\necho b c",
"a b c\n",
},
{
"shopt -s expand_aliases; alias foo='echo '\nfoo foo; foo bar",
"echo\nbar\n",
},
// case
{
"case b in x) echo foo ;; a|b) echo bar ;; esac",
"bar\n",
},
{
"case b in x) echo foo ;; y|z) echo bar ;; esac",
"",
},
{
"case foo in bar) echo foo ;; *) echo bar ;; esac",
"bar\n",
},
{
"case foo in *o*) echo bar ;; esac",
"bar\n",
},
{
"case foo in '*') echo x ;; f*) echo y ;; esac",
"y\n",
},
// exec
{
"$GOSH_PROG 'echo foo'",
"foo\n",
},
{
"$GOSH_PROG 'echo foo >&2' >/dev/null",
"foo\n",
},
{
"echo foo | $GOSH_PROG 'cat >&2' >/dev/null",
"foo\n",
},
{
"$GOSH_PROG 'exit 1'",
"exit status 1",
},
{
"exec >/dev/null; echo foo",
"",
},
// return
{"return", "return: can only be done from a func or sourced script\nexit status 1 #JUSTERR"},
{"f() { return; }; f", ""},
{"f() { return 2; }; f", "exit status 2"},
{"f() { echo foo; return; echo bar; }; f", "foo\n"},
{"f1() { :; }; f2() { f1; return; }; f2", ""},
{"echo 'return' >a; source a", ""},
{"echo 'return' >a; source a; return", "return: can only be done from a func or sourced script\nexit status 1 #JUSTERR"},
{"echo 'return 2' >a; source a", "exit status 2"},
{"echo 'echo foo; return; echo bar' >a; source a", "foo\n"},
// command
{"command", ""},
{"command -o echo", "command: invalid option -o\nexit status 2 #JUSTERR"},
{"echo() { :; }; echo foo", ""},
{"echo() { :; }; command echo foo", "foo\n"},
{"command -v does-not-exist", "exit status 1"},
{"foo() { :; }; command -v foo", "foo\n"},
{"foo() { :; }; command -v does-not-exist foo", "foo\n"},
{"command -v echo", "echo\n"},
{"[[ $(command -v $PATH_PROG) == $PATH_PROG ]]", "exit status 1"},
// cmd substitution
{
"echo foo $(printf bar)",
"foo bar\n",
},
{
"echo foo $(echo bar)",
"foo bar\n",
},
{
"$(echo echo foo bar)",
"foo bar\n",
},
{
"for i in 1 $(echo 2 3) 4; do echo $i; done",
"1\n2\n3\n4\n",
},
{
"echo 1$(echo 2 3)4",
"12 34\n",
},
{
`mkdir d; [[ $(cd d && pwd) == "$(pwd)" ]]`,
"exit status 1",
},
{
"a=sub true & { a=main $ENV_PROG | grep '^a='; }",
"a=main\n",
},
{
"echo foo >f; echo $(cat f); echo $(<f)",
"foo\nfoo\n",
},
{
"echo foo >f; echo $(<f; echo bar)",
"bar\n",
},
// pipes
{
"echo foo | sed 's/o/a/g'",
"faa\n",
},
{
"echo foo | false | true",
"",
},
{
"true $(true) | true", // used to panic
"",
},
// redirects
{
"echo foo >&1 | sed 's/o/a/g'",
"faa\n",
},
{
"echo foo >&2 | sed 's/o/a/g'",
"foo\n",
},
{
// TODO: why does bash need a block here?
"{ echo foo >&2; } |& sed 's/o/a/g'",
"faa\n",
},
{
"echo foo >/dev/null; echo bar",
"bar\n",
},
{
">a; echo foo >>b; wc -c <a >>b; cat b",
"foo\n0\n",
},
{
"echo foo >a; <a",
"",
},
{
"echo foo >a; wc -c <a",
"4\n",
},
{
"echo foo >>a; echo bar &>>a; wc -c <a",
"8\n",
},
{
"{ echo a; echo b >&2; } &>/dev/null",
"",
},
{
"sed 's/o/a/g' <<EOF\nfoo$foo\nEOF",
"faa\n",
},
{
"sed 's/o/a/g' <<'EOF'\nfoo$foo\nEOF",
"faa$faa\n",
},
{
"sed 's/o/a/g' <<EOF\n\tfoo\nEOF",
"\tfaa\n",
},
{
"sed 's/o/a/g' <<EOF\nfoo\nEOF",
"faa\n",
},
{
"cat <<EOF\n~/foo\nEOF",
"~/foo\n",
},
{
"sed 's/o/a/g' <<<foo$foo",
"faa\n",
},
{
"cat <<-EOF\n\tfoo\nEOF",
"foo\n",
},
{
"cat <<-EOF\n\tfoo\n\nEOF",
"foo\n\n",
},
{
"cat <<EOF\nfoo\\\nbar\nEOF",
"foobar\n",
},
{
"cat <<'EOF'\nfoo\\\nbar\nEOF",
"foo\\\nbar\n",
},
{
"mkdir a; echo foo >a |& grep -q 'is a directory'",
" #IGNORE",
},
{
"echo foo 1>&1 | sed 's/o/a/g'",
"faa\n",
},
{
"echo foo 2>&2 |& sed 's/o/a/g'",
"faa\n",
},
{
"printf 2>&1 | sed 's/.*usage.*/foo/'",
"foo\n",
},
{
"mkdir a && cd a && echo foo >b && cd .. && cat a/b",
"foo\n",
},
// background/wait
{"wait", ""},
{"{ true; } & wait", ""},
{"{ exit 1; } & wait", ""},
{
"{ echo foo; } & wait; echo bar",
"foo\nbar\n",
},
{
"{ echo foo & wait; } & wait; echo bar",
"foo\nbar\n",
},
{`mkdir d; old=$PWD; cd d & wait; [[ $old == "$PWD" ]]`, ""},
{
"f() { echo 1; }; { sleep 0.01s; f; } & f() { echo 2; }; wait",
"1\n",
},
// bash test
{
"[[ a ]]",
"",
},
{
"[[ '' ]]",
"exit status 1",
},
{
"[[ '' ]]; [[ a ]]",
"",
},
{
"[[ ! (a == b) ]]",
"",
},
{
"[[ a != b ]]",
"",
},
{
"[[ a && '' ]]",
"exit status 1",
},
{
"[[ a || '' ]]",
"",
},
{
"[[ a > 3 ]]",
"",
},
{
"[[ a < 3 ]]",
"exit status 1",
},
{
"[[ 3 == 03 ]]",
"exit status 1",
},
{
"[[ a -eq b ]]",
"",
},
{
"[[ 3 -eq 03 ]]",
"",
},
{
"[[ 3 -ne 4 ]]",
"",
},
{
"[[ 3 -le 4 ]]",
"",
},
{
"[[ 3 -ge 4 ]]",
"exit status 1",
},
{
"[[ 3 -ge 3 ]]",
"",
},
{
"[[ 3 -lt 4 ]]",
"",
},
{
"[[ 3 -gt 4 ]]",
"exit status 1",
},
{
"[[ 3 -gt 3 ]]",
"exit status 1",
},
{
"[[ a -nt a || a -ot a ]]",
"exit status 1",
},
{
"touch -d @1 a b; [[ a -nt b || a -ot b ]]",
"exit status 1",
},
{
"touch -d @1 a; touch -d @2 b; [[ a -nt b ]]",
"exit status 1",
},
{
"touch -d @1 a; touch -d @2 b; [[ a -ot b ]]",
"",
},
{
"[[ a -ef b ]]",
"exit status 1",
},
{
">a >b; [[ a -ef b ]]",
"exit status 1",
},
{
">a; [[ a -ef a ]]",
"",
},
{
">a; ln a b; [[ a -ef b ]]",
"",
},
{
">a; ln -s a b; [[ a -ef b ]]",
"",
},
{
"[[ -z 'foo' || -n '' ]]",
"exit status 1",
},
{
"[[ -z '' && -n 'foo' ]]",
"",
},
{
"a=x b=''; [[ -v a && -v b && ! -v c ]]",
"",
},
{
"[[ abc == *b* ]]",
"",
},
{
"[[ abc != *b* ]]",
"exit status 1",
},
{
"[[ *b = '*b' ]]",
"",
},
{
"[[ ab == a. ]]",
"exit status 1",
},
{
`x='*b*'; [[ abc == $x ]]`,
"",
},
{
`x='*b*'; [[ abc == "$x" ]]`,
"exit status 1",
},
{
`[[ abc == \a\bc ]]`,
"",
},
{
"[[ abc != *b'*' ]]",
"",
},
{
"[[ a =~ b ]]",
"exit status 1",
},
{
"[[ foo =~ foo && foo =~ .* && foo =~ f.o ]]",
"",
},
{
"[[ foo =~ oo ]] && echo foo; [[ foo =~ ^oo$ ]] && echo bar || true",
"foo\n",
},
{
"[[ a =~ [ ]]",
"exit status 2",
},
{
"[[ -e a ]] && echo x; >a; [[ -e a ]] && echo y",
"y\n",
},
{
"ln -s b a; [[ -e a ]] && echo x; >b; [[ -e a ]] && echo y",
"y\n",
},
{
"[[ -f a ]] && echo x; >a; [[ -f a ]] && echo y",
"y\n",
},
{
"[[ -e a ]] && echo x; mkdir a; [[ -e a ]] && echo y",
"y\n",
},
{
"[[ -d a ]] && echo x; mkdir a; [[ -d a ]] && echo y",
"y\n",
},
{
"[[ -r a ]] && echo x; >a; [[ -r a ]] && echo y",
"y\n",
},
{
"[[ -w a ]] && echo x; >a; [[ -w a ]] && echo y",
"y\n",
},
{
"[[ -s a ]] && echo x; echo body >a; [[ -s a ]] && echo y",
"y\n",
},
{
"[[ -L a ]] && echo x; ln -s b a; [[ -L a ]] && echo y;",
"y\n",
},
{
"mkdir a; cd a; test -f b && echo x; >b; test -f b && echo y",
"y\n",
},
{
">a; [[ -b a ]] && echo block; [[ -c a ]] && echo char; true",
"",
},
{
"[[ -e /dev/sda ]] || { echo block; exit; }; [[ -b /dev/sda ]] && echo block; [[ -c /dev/sda ]] && echo char; true",
"block\n",
},
{
"[[ -e /dev/tty ]] || { echo char; exit; }; [[ -b /dev/tty ]] && echo block; [[ -c /dev/tty ]] && echo char; true",
"char\n",
},
{"[[ -t 1234 ]]", "exit status 1"}, // TODO: reliable way to test a positive?
{"[[ -o wrong ]]", "exit status 1"},
{"[[ -o errexit ]]", "exit status 1"},
{"set -e; [[ -o errexit ]]", ""},
{"[[ -o noglob ]]", "exit status 1"},
{"set -f; [[ -o noglob ]]", ""},
{"[[ -o allexport ]]", "exit status 1"},
{"set -a; [[ -o allexport ]]", ""},
{"[[ -o nounset ]]", "exit status 1"},
{"set -u; [[ -o nounset ]]", ""},
{"[[ -o noexec ]]", "exit status 1"},
{"set -n; [[ -o noexec ]]", ""}, // actually does nothing, but oh well
{"[[ -o pipefail ]]", "exit status 1"},
{"set -o pipefail; [[ -o pipefail ]]", ""},
// classic test
{
"[",
"1:1: [: missing matching ]\nexit status 2 #JUSTERR",
},
{
"[ a",
"1:1: [: missing matching ]\nexit status 2 #JUSTERR",
},
{
"[ a b c ]",
"1:1: not a valid test operator: b\nexit status 2 #JUSTERR",
},
{
"[ a -a ]",
"1:1: -a must be followed by an expression\nexit status 2 #JUSTERR",
},
{"[ a ]", ""},
{"[ -n ]", ""},
{"[ '-n' ]", ""},
{"[ -z ]", ""},
{"[ ! ]", ""},
{"[ a != b ]", ""},
{"[ ! a '==' a ]", "exit status 1"},
{"[ a -a 0 -gt 1 ]", "exit status 1"},
{"[ 0 -gt 1 -o 1 -gt 0 ]", ""},
{"[ 3 -gt 4 ]", "exit status 1"},
{"[ 3 -lt 4 ]", ""},
{
"[ -e a ] && echo x; >a; [ -e a ] && echo y",
"y\n",
},
{
"test 3 -gt 4",
"exit status 1",
},
{
"test 3 -lt 4",
"",
},
{
"test 3 -lt",
"1:1: -lt must be followed by a word\nexit status 2 #JUSTERR",
},
{
"touch -d @1 a; touch -d @2 b; [ a -nt b ]",
"exit status 1",
},
{
"touch -d @1 a; touch -d @2 b; [ a -ot b ]",
"",
},
{
">a; [ a -ef a ]",
"",
},
{"[ 3 -eq 04 ]", "exit status 1"},
{"[ 3 -eq 03 ]", ""},
{"[ 3 -ne 03 ]", "exit status 1"},
{"[ 3 -le 4 ]", ""},
{"[ 3 -ge 4 ]", "exit status 1"},
{
"[ -d a ] && echo x; mkdir a; [ -d a ] && echo y",
"y\n",
},
{
"[ -r a ] && echo x; >a; [ -r a ] && echo y",
"y\n",
},
{
"[ -w a ] && echo x; >a; [ -w a ] && echo y",
"y\n",
},
{
"[ -s a ] && echo x; echo body >a; [ -s a ] && echo y",
"y\n",
},
{
"[ -L a ] && echo x; ln -s b a; [ -L a ] && echo y;",
"y\n",
},
{
">a; [ -b a ] && echo block; [ -c a ] && echo char; true",
"",
},
{"[ -t 1234 ]", "exit status 1"}, // TODO: reliable way to test a positive?
{"[ -o wrong ]", "exit status 1"},
{"[ -o errexit ]", "exit status 1"},
{"set -e; [ -o errexit ]", ""},
{"a=x b=''; [ -v a -a -v b -a ! -v c ]", ""},
{"[ a = a ]", ""},
{"[ a != a ]", "exit status 1"},
{"[ abc = ab* ]", "exit status 1"},
{"[ abc != ab* ]", ""},
// arithm
{
"echo $((1 == +1))",
"1\n",
},
{
"echo $((!0))",
"1\n",
},
{
"echo $((!3))",
"0\n",
},
{
"echo $((~0))",
"-1\n",
},
{
"echo $((~3))",
"-4\n",
},
{
"echo $((1 + 2 - 3))",
"0\n",
},
{
"echo $((-1 * 6 / 2))",
"-3\n",
},
{
"a=2; echo $(( a + $a + c ))",
"4\n",
},
{
"a=b; b=c; c=5; echo $((a % 3))",
"2\n",
},
{
"echo $((2 > 2 || 2 < 2))",
"0\n",
},
{
"echo $((2 >= 2 && 2 <= 2))",
"1\n",
},
{
"echo $(((1 & 2) != (1 | 2)))",
"1\n",
},
{
"echo $a; echo $((a = 3 ^ 2)); echo $a",
"\n1\n1\n",
},
{
"echo $((a += 1, a *= 2, a <<= 2, a >> 1))",
"4\n",
},
{
"echo $((a -= 10, a /= 2, a >>= 1, a << 1))",
"-6\n",
},
{
"echo $((a |= 3, a &= 1, a ^= 8, a %= 5, a))",
"4\n",
},
{
"echo $((a = 3, ++a, a--))",
"4\n",
},
{
"echo $((2 ** 3)) $((1234 ** 4567))",
"8 0\n",
},
{
"echo $((1 ? 2 : 3)) $((0 ? 2 : 3))",
"2 3\n",
},
{
"((1))",
"",
},
{
"((3 == 4))",
"exit status 1",
},
{
"let i=(3+4); let i++; echo $i; let i--; echo $i",
"8\n7\n",
},
{
"let 3==4",
"exit status 1",
},
{
"a=1; let a++; echo $a",
"2\n",
},
{
"a=$((1 + 2)); echo $a",
"3\n",
},
{
"x=3; echo $(($x)) $((x))",
"3 3\n",
},
{
"set -- 1; echo $(($@))",
"1\n",
},
{
"a=b b=a; echo $(($a))",
"0\n #IGNORE",
},
// set/shift
{
"echo $#; set foo bar; echo $#",
"0\n2\n",
},
{
"shift; set a b c; shift; echo $@",
"b c\n",
},
{
"shift 2; set a b c; shift 2; echo $@",
"c\n",
},
{
`echo $#; set '' ""; echo $#`,
"0\n2\n",
},
{
"set -- a b; echo $#",
"2\n",
},
{
"set -U",
"set: invalid option: \"-U\"\nexit status 2 #JUSTERR",
},
{
"set -e; false; echo foo",
"exit status 1",
},
{
"set -e; set +e; false; echo foo",
"foo\n",
},
{
"set -e; ! false; echo foo",
"foo\n",
},
{
"set -e; ! true; echo foo",
"foo\n",
},
{
"set -e; if false; then echo foo; fi",
"",
},
{
"set -e; while false; do echo foo; done",
"",
},
{
"set -e; false || true",
"",
},
{
"set -e; false && true; true",
"",
},
{
"false | :",
"",
},
{
"set -o pipefail; false | :",
"exit status 1",
},
{
"set -o pipefail; true | false | true | :",
"exit status 1",
},
{
"set -o pipefail; set -M 2>/dev/null | false",
"exit status 1",
},
{
"set -f; >a.x; echo *.x;",
"*.x\n",
},
{
"set -f; set +f; >a.x; echo *.x;",
"a.x\n",
},
{
"set -a; foo=bar; $ENV_PROG | grep ^foo=",
"foo=bar\n",
},
{
"set -a; foo=(b a r); $ENV_PROG | grep ^foo=",
"exit status 1",
},
{
"foo=bar; set -a; $ENV_PROG | grep ^foo=",
"exit status 1",
},
{
"a=b; echo $a; set -u; echo $a",
"b\nb\n",
},
{
"echo $a; set -u; echo $a; echo extra",
"\na: unbound variable\nexit status 1 #JUSTERR",
},
{"set -n; echo foo", ""},
{"set -n; [ wrong", ""},
{"set -n; set +n; echo foo", ""},
{
"set -o foobar",
"set: invalid option: \"-o\"\nexit status 2 #JUSTERR",
},
{"set -o noexec; echo foo", ""},
{"set +o noexec; echo foo", "foo\n"},
{"set -e; set -o | grep -E 'errexit|noexec' | wc -l", "2\n"},
{"set -e; set -o | grep -E 'errexit|noexec' | grep 'on$' | wc -l", "1\n"},
{
"set -a; set +o",
`set -o allexport
set +o errexit
set +o noexec
set +o noglob
set +o nounset
set +o pipefail
#IGNORE`,
},
// unset
{
"a=1; echo $a; unset a; echo $a",
"1\n\n",
},
{
"notinpath() { echo func; }; notinpath; unset -f notinpath; notinpath",
"func\n\"notinpath\": executable file not found in $PATH\nexit status 127 #JUSTERR",
},
{
"a=1; a() { echo func; }; unset -f a; echo $a",
"1\n",
},
{
"a=1; a() { echo func; }; unset -v a; a; echo $a",
"func\n\n",
},
{
"notinpath=1; notinpath() { echo func; }; notinpath; echo $notinpath; unset notinpath; notinpath; echo $notinpath; unset notinpath; notinpath",
"func\n1\nfunc\n\n\"notinpath\": executable file not found in $PATH\nexit status 127 #JUSTERR",
},
{
"unset PATH; [[ $PATH == '' ]]",
"",
},
{
"readonly a=1; echo $a; unset a; echo $a",
"1\na: readonly variable\n1\n #IGNORE",
},
{
"f() { local a=1; echo $a; unset a; echo $a; }; f",
"1\n\n",
},
{
`a=b eval 'echo $a; unset a; echo $a'`,
"b\n\n",
},
{
`$(unset INTERP_GLOBAL); echo $INTERP_GLOBAL; unset INTERP_GLOBAL; echo $INTERP_GLOBAL`,
"value\n\n",
},
{
`x=orig; f() { local x=local; unset x; x=still_local; }; f; echo $x`,
"orig\n",
},
{
`x=orig; f() { local x=local; unset x; [[ -v x ]] && echo set || echo unset; }; f`,
"unset\n",
},
// shopt
{"set -e; shopt -o | grep -E 'errexit|noexec' | wc -l", "2\n"},
{"set -e; shopt -o | grep -E 'errexit|noexec' | grep 'on$' | wc -l", "1\n"},
{"shopt -s -o noexec; echo foo", ""},
{"shopt -u -o noexec; echo foo", "foo\n"},
{"shopt -u globstar; shopt globstar | grep 'off$' | wc -l", "1\n"},
{"shopt -s globstar; shopt globstar | grep 'off$' | wc -l", "0\n"},
// IFS
{`echo -n "$IFS"`, " \t\n"},
{`a="x:y:z"; IFS=:; echo $a`, "x y z\n"},
{`a=(x y z); IFS=-; echo "${a[*]}"`, "x-y-z\n"},
{`a=(x y z); IFS=-; echo "${a[@]}"`, "x y z\n"},
{`a=" x y z"; IFS=; echo $a`, " x y z\n"},
{`a=(x y z); IFS=; echo "${a[*]}"`, "xyz\n"},
{`a=(x y z); IFS=-; echo "${!a[@]}"`, "0 1 2\n"},
// builtin
{"builtin", ""},
{"builtin noexist", "exit status 1 #JUSTERR"},
{"builtin echo foo", "foo\n"},
{
"echo() { printf 'bar\n'; }; echo foo; builtin echo foo",
"bar\nfoo\n",
},
// type
{"type", ""},
{"type echo", "echo is a shell builtin\n"},
{"echo() { :; }; type echo | grep 'is a function'", "echo is a function\n"},
{"type $PATH_PROG | grep -q -E ' is (/|[A-Z]:).*'", ""},
{"type noexist", "type: noexist: not found\nexit status 1 #JUSTERR"},
// eval
{"eval", ""},
{"eval ''", ""},
{"eval echo foo", "foo\n"},
{"eval 'echo foo'", "foo\n"},
{"eval 'exit 1'", "exit status 1"},
{"eval '('", "eval: 1:1: reached EOF without matching ( with )\nexit status 1 #JUSTERR"},
{"set a b; eval 'echo $@'", "a b\n"},
{"eval 'a=foo'; echo $a", "foo\n"},
{`a=b eval "echo $a"`, "\n"},
{`a=b eval 'echo $a'`, "b\n"},
{`eval 'echo "\$a"'`, "$a\n"},
{`a=b eval 'x=y eval "echo \$a \$x"'`, "b y\n"},
{`a=b eval 'a=y eval "echo $a \$a"'`, "b y\n"},
{"a=b eval '(echo $a)'", "b\n"},
// source
{
"source",
"1:1: source: need filename\nexit status 2 #JUSTERR",
},
{
"echo 'echo foo' >a; source a; . a",
"foo\nfoo\n",
},
{
"echo 'echo $@' >a; source a; source a b c; echo $@",
"\nb c\n\n",
},
{
"echo 'foo=bar' >a; source a; echo $foo",
"bar\n",
},
// indexed arrays
{
"a=foo; echo ${a[0]} ${a[@]} ${a[x]}; echo ${a[1]}",
"foo foo foo\n\n",
},
{
"a=(); echo ${a[0]} ${a[@]} ${a[x]} ${a[1]}",
"\n",
},
{
"a=(b c); echo $a; echo ${a[0]}; echo ${a[1]}; echo ${a[x]}",
"b\nb\nc\nb\n",
},
{
"a=(b c); echo ${a[@]}; echo ${a[*]}",
"b c\nb c\n",
},
{
"a=(1 2 3); echo ${a[2-1]}; echo $((a[1+1]))",
"2\n3\n",
},
{
"a=(1 2) x=(); a+=b x+=c; echo ${a[@]}; echo ${x[@]}",
"1b 2\nc\n",
},
{
"a=(1 2) x=(); a+=(b c) x+=(d e); echo ${a[@]}; echo ${x[@]}",
"1 2 b c\nd e\n",
},
{
"a=bbb; a+=(c d); echo ${a[@]}",
"bbb c d\n",
},
{
`a=('a 1' 'b 2'); for e in ${a[@]}; do echo "$e"; done`,
"a\n1\nb\n2\n",
},
{
`a=('a 1' 'b 2'); for e in "${a[*]}"; do echo "$e"; done`,
"a 1 b 2\n",
},
{
`a=('a 1' 'b 2'); for e in "${a[@]}"; do echo "$e"; done`,
"a 1\nb 2\n",
},
{
`a=([1]=y [0]=x); echo ${a[0]}`,
"x\n",
},
{
`a=(y); a[2]=x; echo ${a[2]}`,
"x\n",
},
{
`a="y"; a[2]=x; echo ${a[2]}`,
"x\n",
},
{
`declare -a a=(x y); echo ${a[1]}`,
"y\n",
},
{
`a=b; echo "${a[@]}"`,
"b\n",
},
// associative arrays
{
`a=foo; echo ${a[""]} ${a["x"]}`,
"foo foo\n",
},
{
`declare -A a=(); echo ${a[0]} ${a[@]} ${a[1]} ${a["x"]}`,
"\n",
},
{
`declare -A a=([x]=b [y]=c); echo $a; echo ${a[0]}; echo ${a["x"]}; echo ${a["_"]}`,
"\n\nb\n\n",
},
{
`declare -A a=([x]=b [y]=c); for e in ${a[@]}; do echo $e; done | sort`,
"b\nc\n",
},
{
`declare -A a=([y]=b [x]=c); for e in ${a[*]}; do echo $e; done | sort`,
"b\nc\n",
},
{
`declare -A a=([x]=a); a["y"]=d; a["x"]=c; for e in ${a[@]}; do echo $e; done | sort`,
"c\nd\n",
},
{
`declare -A a=([x]=a); a[y]=d; a[x]=c; for e in ${a[@]}; do echo $e; done | sort`,
"c\nd\n",
},
{
// cheating a little; bash just did a=c
`a=(["x"]=b ["y"]=c); echo ${a["y"]}`,
"c\n",
},
{
`declare -A a=(['x']=b); echo ${a['x']} ${a[$'x']} ${a[$"x"]}`,
"b b b\n",
},
{
`a=(['x']=b); echo ${a['y']}`,
"\n #IGNORE bash requires -A",
},
// weird assignments
{"a=b; a=(c d); echo ${a[@]}", "c d\n"},
{"a=(b c); a=d; echo ${a[@]}", "d c\n"},
{"declare -A a=([x]=b [y]=c); a=d; for e in ${a[@]}; do echo $e; done | sort", "b\nc\nd\n"},
{"i=3; a=b; a[i]=x; echo ${a[@]}", "b x\n"},
{"i=3; declare a=(b); a[i]=x; echo ${!a[@]}", "0 3\n"},
{"i=3; declare -A a=(['x']=b); a[i]=x; for e in ${!a[@]}; do echo $e; done | sort", "i\nx\n"},
// declare
{"declare -B foo", "declare: invalid option \"-B\"\nexit status 2 #JUSTERR"},
{"a=b; declare a; echo $a; declare a=; echo $a", "b\n\n"},
{"a=b; declare a; echo $a", "b\n"},
{
"declare a=b c=(1 2); echo $a; echo ${c[@]}",
"b\n1 2\n",
},
{"a=x; declare $a; echo $a $x", "x\n"},
{"a=x=y; declare $a; echo $a $x", "x=y y\n"},
{"a='x=(y)'; declare $a; echo $a $x", "x=(y) (y)\n"},
{"a='x=b y=c'; declare $a; echo $x $y", "b c\n"},
{"declare =bar", "declare: invalid name \"\"\nexit status 1 #JUSTERR"},
{"declare $unset=$unset", "declare: invalid name \"\"\nexit status 1 #JUSTERR"},
// export
{"declare foo=bar; $ENV_PROG | grep '^foo='", "exit status 1"},
{"declare -x foo=bar; $ENV_PROG | grep '^foo='", "foo=bar\n"},
{"export foo=bar; $ENV_PROG | grep '^foo='", "foo=bar\n"},
{"foo=bar; export foo; $ENV_PROG | grep '^foo='", "foo=bar\n"},
{"export foo=bar; foo=baz; $ENV_PROG | grep '^foo='", "foo=baz\n"},
{"export foo=bar; readonly foo=baz; $ENV_PROG | grep '^foo='", "foo=baz\n"},
{"export foo=(1 2); $ENV_PROG | grep '^foo='", "exit status 1"},
{"declare -A foo=([a]=b); export foo; $ENV_PROG | grep '^foo='", "exit status 1"},
{"export foo=(b c); foo=x; $ENV_PROG | grep '^foo='", "exit status 1"},
// local
{
"local a=b",
"local: can only be used in a function\nexit status 1 #JUSTERR",
},
{
"local a=b 2>/dev/null; echo $a",
"\n",
},
{
"{ local a=b; }",
"local: can only be used in a function\nexit status 1 #JUSTERR",
},
{
"echo 'local a=b' >a; source a",
"local: can only be used in a function\nexit status 1 #JUSTERR",
},
{
"echo 'local a=b' >a; f() { source a; }; f; echo $a",
"\n",
},
{
"f() { local a=b; }; f; echo $a",
"\n",
},
{
"a=x; f() { local a=b; }; f; echo $a",
"x\n",
},
{
"a=x; f() { echo $a; local a=b; echo $a; }; f",
"x\nb\n",
},
{
"f1() { local a=b; }; f2() { f1; echo $a; }; f2",
"\n",
},
{
"f() { a=1; declare b=2; export c=3; readonly d=4; declare -g e=5; }; f; echo $a $b $c $d $e",
"1 3 4 5\n",
},
{
`f() { local x; [[ -v x ]] && echo set || echo unset; }; f`,
"unset\n",
},
{
`f() { local x=; [[ -v x ]] && echo set || echo unset; }; f`,
"set\n",
},
{
`export x=before; f() { local x; export x=after; $ENV_PROG | grep '^x='; }; f; echo $x`,
"x=after\nbefore\n",
},
// name references
{"declare -n foo=bar; bar=etc; [[ -R foo ]]", ""},
{"declare -n foo=bar; bar=etc; [ -R foo ]", ""},
{"nameref foo=bar; bar=etc; [[ -R foo ]]", " #IGNORE"},
{"declare foo=bar; bar=etc; [[ -R foo ]]", "exit status 1"},
{
"declare -n foo=bar; bar=etc; echo $foo; bar=zzz; echo $foo",
"etc\nzzz\n",
},
{
"declare -n foo=bar; bar=(x y); echo ${foo[1]}; bar=(a b); echo ${foo[1]}",
"y\nb\n",
},
{
"declare -n foo=bar; bar=etc; echo $foo; unset bar; echo $foo",
"etc\n\n",
},
{
"declare -n a1=a2 a2=a3 a3=a4; a4=x; echo $a1 $a3",
"x x\n",
},
{
"declare -n foo=bar bar=foo; echo $foo",
"\n #IGNORE",
},
{
"declare -n foo=bar; echo $foo",
"\n",
},
{
"declare -n foo=bar; echo ${!foo}",
"bar\n",
},
{
"declare -n foo=bar; bar=etc; echo $foo; echo ${!foo}",
"etc\nbar\n",
},
{
"declare -n foo=bar; bar=etc; foo=xxx; echo $foo $bar",
"xxx xxx\n",
},
{
"declare -n foo=bar; foo=xxx; echo $foo $bar",
"xxx xxx\n",
},
// TODO: figure this one out
//{
// "declare -n foo=bar bar=baz; foo=xxx; echo $foo $bar; echo $baz",
// "xxx xxx\nxxx\n",
//},
// read-only vars
{"declare -r foo=bar; echo $foo", "bar\n"},
{"readonly foo=bar; echo $foo", "bar\n"},
{
"a=b; a=c; echo $a; readonly a; a=d",
"c\na: readonly variable\nexit status 1 #JUSTERR",
},
{
"declare -r foo=bar; foo=etc",
"foo: readonly variable\nexit status 1 #JUSTERR",
},
{
"readonly foo=bar; foo=etc",
"foo: readonly variable\nexit status 1 #JUSTERR",
},
// multiple var modes at once
{
"declare -r -x foo=bar; $ENV_PROG | grep '^foo='",
"foo=bar\n",
},
{
"declare -r -x foo=bar; foo=x",
"foo: readonly variable\nexit status 1 #JUSTERR",
},
// globbing
{"echo .", ".\n"},
{"echo ..", "..\n"},
{"echo ./.", "./.\n"},
{
">a.x >b.x >c.x; echo *.x; rm a.x b.x c.x",
"a.x b.x c.x\n",
},
{
`>a.x; echo '*.x' "*.x"; rm a.x`,
"*.x *.x\n",
},
{
`>a.x >b.y; echo *'.'x; rm a.x`,
"a.x\n",
},
{
`>a.x; echo *'.x' "a."* '*'.x; rm a.x`,
"a.x a.x *.x\n",
},
{
"echo *.x; echo foo *.y bar",
"*.x\nfoo *.y bar\n",
},
{
"mkdir a; >a/b.x; echo */*.x | sed 's@\\\\@/@g'; cd a; echo *.x",
"a/b.x\nb.x\n",
},
{
"mkdir -p a/b/c; echo a/* | sed 's@\\\\@/@g'",
"a/b\n",
},
{
">.hidden >a; echo *; echo .h*; rm .hidden a",
"a\n.hidden\n",
},
{
`mkdir d; >d/.hidden >d/a; set -- "$(echo d/*)" "$(echo d/.h*)"; echo ${#1} ${#2}; rm -r d`,
"3 9\n",
},
{
"mkdir -p a/b/c; echo a/** | sed 's@\\\\@/@g'",
"a/b\n",
},
{
"shopt -s globstar; mkdir -p a/b/c; echo a/** | sed 's@\\\\@/@g'",
"a/ a/b a/b/c\n",
},
{
"shopt -s globstar; mkdir -p a/b/c; echo **/c | sed 's@\\\\@/@g'",
"a/b/c\n",
},
{
"cat <<EOF\n{foo,bar}\nEOF",
"{foo,bar}\n",
},
{
"cat <<EOF\n*.go\nEOF",
"*.go\n",
},
{
"mkdir -p a/b a/c; echo ./a/* | sed 's@\\\\@/@g'",
"./a/b ./a/c\n",
},
{
"mkdir -p a/b a/c d; cd d; echo ../a/* | sed 's@\\\\@/@g'",
"../a/b ../a/c\n",
},
{
"mkdir x-d1 x-d2; >x-f; echo x-*/ | sed 's@\\\\@/@g'",
"x-d1/ x-d2/\n",
},
{
"mkdir x-d1 x-d2; >x-f; echo ././x-*/// | sed 's@\\\\@/@g'",
"././x-d1/ ././x-d2/\n",
},
{
"mkdir -p x-d1/a x-d2/b; >x-f; echo x-*/* | sed 's@\\\\@/@g'",
"x-d1/a x-d2/b\n",
},
{
"mkdir -p foo/bar; ln -s foo sym; echo sy*/; echo sym/b*",
"sym/\nsym/bar\n",
},
{
">foo; ln -s foo sym; echo sy*; echo sy*/",
"sym\nsy*/\n",
},
// brace expansion; more exhaustive tests in the syntax package
{"echo a}b", "a}b\n"},
{"echo {a,b{c,d}", "{a,bc {a,bd\n"},
{"echo a{b}", "a{b}\n"},
{"echo a{à,世界}", "aà a世界\n"},
{"echo a{b,c}d{e,f}g", "abdeg abdfg acdeg acdfg\n"},
{"echo a{b{x,y},c}d", "abxd abyd acd\n"},
{"echo a{1..", "a{1..\n"},
{"echo a{1..2}b{4..5}c", "a1b4c a1b5c a2b4c a2b5c\n"},
{"echo a{c..f}", "ac ad ae af\n"},
{"echo a{4..1..1}", "a4 a3 a2 a1\n"},
// tilde expansion
{
"[[ '~/foo' == ~/foo ]] || [[ ~/foo == '~/foo' ]]",
"exit status 1",
},
{
"case '~/foo' in ~/foo) echo match ;; esac",
"",
},
{
"a=~/foo; [[ $a == '~/foo' ]]",
"exit status 1",
},
{
`a=$(echo "~/foo"); [[ $a == '~/foo' ]]`,
"",
},
// /dev/null
{"echo foo >/dev/null", ""},
{"cat </dev/null", ""},
// time - real would be slow and flaky; see TestElapsedString
{"{ time; } |& wc", " 4 6 42\n"},
{"{ time echo -n; } |& wc", " 4 6 42\n"},
{"{ time -p; } |& wc", " 3 6 29\n"},
{"{ time -p echo -n; } |& wc", " 3 6 29\n"},
// exec
{"exec", ""},
{
"exec builtin echo foo",
"\"builtin\": executable file not found in $PATH\nexit status 127 #JUSTERR",
},
{
"exec $GOSH_PROG 'echo foo'; echo bar",
"foo\n",
},
// read
{
"read </dev/null",
"exit status 1",
},
{
"read -X",
"read: invalid option \"-X\"\nexit status 2 #JUSTERR",
},
{
"read 0ab",
"read: invalid identifier \"0ab\"\nexit status 2 #JUSTERR",
},
{
"read <<< foo; echo $REPLY",
"foo\n",
},
{
"read <<<' a b c '; echo \"$REPLY\"",
" a b c \n",
},
{
"read <<< 'y\nn\n'; echo $REPLY",
"y\n",
},
{
"read a_0 <<< foo; echo $a_0",
"foo\n",
},
{
"read a b <<< 'foo bar baz '; echo \"$a\"; echo \"$b\"",
"foo\nbar baz\n",
},
{
"while read a; do echo $a; done <<< 'a\nb\nc'",
"a\nb\nc\n",
},
{
"while read a b; do echo -e \"$a\n$b\"; done <<< '1 2\n3'",
"1\n2\n3\n\n",
},
{
`read a <<< '\\'; echo "$a"`,
"\\\n",
},
{
`read a <<< '\a\b\c'; echo "$a"`,
"abc\n",
},
{
"read -r a b <<< '1\\\t2'; echo $a; echo $b;",
"1\\\n2\n",
},
{
"echo line\\\ncontinuation | while read a; do echo $a; done",
"linecontinuation\n",
},
{
`read -r a <<< '\\'; echo "$a"`,
"\\\\\n",
},
{
"read -r a <<< '\\a\\b\\c'; echo $a",
"\\a\\b\\c\n",
},
{
"IFS=: read a b c <<< '1:2:3'; echo $a; echo $b; echo $c",
"1\n2\n3\n",
},
{
"IFS=: read a b c <<< '1\\:2:3'; echo \"$a\"; echo $b; echo $c",
"1:2\n3\n\n",
},
// getopts
{
"getopts",
"getopts: usage: getopts optstring name [arg]\nexit status 2",
},
{
"getopts a a:b",
"getopts: invalid identifier: \"a:b\"\nexit status 2 #JUSTERR",
},
{
"getopts abc opt -a; echo $opt; $optarg",
"a\n",
},
{
"getopts abc opt -z",
"getopts: illegal option -- \"z\"\n #IGNORE",
},
{
"getopts a: opt -a",
"getopts: option requires an argument -- \"a\"\n #IGNORE",
},
{
"getopts :abc opt -z; echo $opt; echo $OPTARG",
"?\nz\n",
},
{
"getopts :a: opt -a; echo $opt; echo $OPTARG",
":\na\n",
},
{
"getopts abc opt foo -a; echo $opt; echo $OPTIND",
"?\n1\n",
},
{
"getopts abc opt -a foo; echo $opt; echo $OPTIND",
"a\n2\n",
},
{
"OPTIND=3; getopts abc opt -a -b -c; echo $opt;",
"c\n",
},
{
"OPTIND=100; getopts abc opt -a -b -c; echo $opt;",
"?\n",
},
{
"OPTIND=foo; getopts abc opt -a -b -c; echo $opt;",
"a\n",
},
{
"while getopts ab:c opt -c -b arg -a foo; do echo $opt $OPTARG $OPTIND; done",
"c 2\nb arg 4\na 5\n",
},
{
"while getopts abc opt -ba -c foo; do echo $opt $OPTARG $OPTIND; done",
"b 1\na 2\nc 3\n",
},
{
"a() { while getopts abc: opt; do echo $opt $OPTARG; done }; a -a -b -c arg",
"a\nb\nc arg\n",
},
}
var runTestsUnix = []runTest{
{"[[ -n $PPID && $PPID -gt 0 ]]", ""},
{
// no root user on windows
"[[ ~root == '~root' ]]",
"exit status 1",
},
// windows does not support paths with '*'
{
"mkdir -p '*/a.z' 'b/a.z'; cd '*'; set -- *.z; echo $#",
"1\n",
},
{
"mkdir -p 'a-*/d'; test -d $PWD/a-*/*",
"",
},
// no fifos on windows
{
"[ -p a ] && echo x; mkfifo a; [ -p a ] && echo y",
"y\n",
},
{
"[[ -p a ]] && echo x; mkfifo a; [[ -p a ]] && echo y",
"y\n",
},
{"sh() { :; }; sh -c 'echo foo'", ""},
{"sh() { :; }; command sh -c 'echo foo'", "foo\n"},
// chmod is practically useless on Windows
{
"[ -x a ] && echo x; >a; chmod 0755 a; [ -x a ] && echo y",
"y\n",
},
{
"[[ -x a ]] && echo x; >a; chmod 0755 a; [[ -x a ]] && echo y",
"y\n",
},
{
">a; [ -k a ] && echo x; chmod +t a; [ -k a ] && echo y",
"y\n",
},
{
">a; [ -u a ] && echo x; chmod u+s a; [ -u a ] && echo y",
"y\n",
},
{
">a; [ -g a ] && echo x; chmod g+s a; [ -g a ] && echo y",
"y\n",
},
{
">a; [[ -k a ]] && echo x; chmod +t a; [[ -k a ]] && echo y",
"y\n",
},
{
">a; [[ -u a ]] && echo x; chmod u+s a; [[ -u a ]] && echo y",
"y\n",
},
{
">a; [[ -g a ]] && echo x; chmod g+s a; [[ -g a ]] && echo y",
"y\n",
},
{
`mkdir a; chmod 0100 a; cd a`,
"",
},
// Note that these will succeed if we're root.
{
`mkdir a; chmod 0000 a; cd a && test $UID -ne 0`,
"exit status 1 #JUSTERR",
},
{
`mkdir a; chmod 0222 a; cd a && test $UID -ne 0`,
"exit status 1 #JUSTERR",
},
{
`mkdir a; chmod 0444 a; cd a && test $UID -ne 0`,
"exit status 1 #JUSTERR",
},
{
`mkdir a; chmod 0010 a; cd a && test $UID -ne 0`,
"exit status 1 #JUSTERR",
},
{
`mkdir a; chmod 0001 a; cd a && test $UID -ne 0`,
"exit status 1 #JUSTERR",
},
// Unix-y PATH
{
"PATH=; bash -c 'echo foo'",
"\"bash\": executable file not found in $PATH\nexit status 127 #JUSTERR",
},
{
"cd /; sure/is/missing",
"stat /sure/is/missing: no such file or directory\nexit status 127 #JUSTERR",
},
{
"echo '#!/bin/sh\necho b' >a; chmod 0755 a; PATH=; a",
"b\n",
},
{
"mkdir c; cd c; echo '#!/bin/sh\necho b' >a; chmod 0755 a; PATH=; a",
"b\n",
},
{
"mkdir c; echo '#!/bin/sh\necho b' >c/a; chmod 0755 c/a; c/a",
"b\n",
},
// TODO: move back to the main tests list once
// https://github.community/t5/GitHub-Actions/TEMP-is-broken-on-Windows/m-p/30432#M427
// is fixed.
{
"mkdir x-d; >x-f; test -d $PWD/x-*/",
"",
},
// process substitution; named pipes (fifos) are a TODO for windows
{
"sed 's/o/e/g' <(echo foo bar)",
"fee bar\n",
},
{
"cat <(echo foo) <(echo bar) <(echo baz)",
"foo\nbar\nbaz\n",
},
{
"cat <(cat <(echo nested))",
"nested\n",
},
{
"echo foo bar > >(sed 's/o/e/g')",
"fee bar\n",
},
{
"echo foo bar | tee >(sed 's/o/e/g') >/dev/null",
"fee bar\n",
},
{
"echo nested > >(cat > >(cat))",
"nested\n",
},
}
var runTestsWindows = []runTest{
{"[[ -n $PPID || $PPID -gt 0 ]]", ""}, // os.Getppid can be 0 on windows
{"cmd() { :; }; cmd /c 'echo foo'", ""},
{"cmd() { :; }; command cmd /c 'echo foo'", "foo\r\n"},
}
func init() {
if runtime.GOOS == "windows" {
runTests = append(runTests, runTestsWindows...)
} else {
runTests = append(runTests, runTestsUnix...)
}
}
// ln -s: wine doesn't implement symlinks; see https://bugs.winehq.org/show_bug.cgi?id=44948
var skipOnWindows = regexp.MustCompile(`ln -s`)
func skipIfUnsupported(tb testing.TB, src string) {
switch {
case runtime.GOOS == "windows" && skipOnWindows.MatchString(src):
tb.Skipf("skipping non-portable test on windows")
}
}
func TestRunnerRun(t *testing.T) {
p := syntax.NewParser()
for i := range runTests {
t.Run(fmt.Sprintf("%03d", i), func(t *testing.T) {
c := runTests[i]
skipIfUnsupported(t, c.in)
file := parse(t, p, c.in)
t.Parallel()
dir, err := ioutil.TempDir("", "interp-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
var cb concBuffer
r, err := New(Dir(dir), StdIO(nil, &cb, &cb),
OpenHandler(testOpenHandler),
ExecHandler(testExecHandler),
)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
if err := r.Run(ctx, file); err != nil {
cb.WriteString(err.Error())
}
want := c.want
if i := strings.Index(want, " #"); i >= 0 {
want = want[:i]
}
if got := cb.String(); got != want {
t.Fatalf("wrong output in %q:\nwant: %q\ngot: %q",
c.in, want, got)
}
})
}
}
func readLines(hc HandlerContext) ([][]byte, error) {
bs, err := ioutil.ReadAll(hc.Stdin)
if err != nil {
return nil, err
}
if runtime.GOOS == "windows" {
bs = bytes.Replace(bs, []byte("\r\n"), []byte("\n"), -1)
}
bs = bytes.TrimSuffix(bs, []byte("\n"))
return bytes.Split(bs, []byte("\n")), nil
}
var testBuiltinsMap = map[string]func(HandlerContext, []string) error{
"cat": func(hc HandlerContext, args []string) error {
if len(args) == 0 {
if hc.Stdin == nil || hc.Stdout == nil {
return nil
}
_, err := io.Copy(hc.Stdout, hc.Stdin)
return err
}
for _, arg := range args {
path := absPath(hc.Dir, arg)
f, err := os.Open(path)
if err != nil {
return err
}
_, err = io.Copy(hc.Stdout, f)
f.Close()
if err != nil {
return err
}
}
return nil
},
"wc": func(hc HandlerContext, args []string) error {
bs, err := ioutil.ReadAll(hc.Stdin)
if err != nil {
return err
}
if len(args) == 0 {
fmt.Fprintf(hc.Stdout, "%7d", bytes.Count(bs, []byte("\n")))
fmt.Fprintf(hc.Stdout, "%8d", len(bytes.Fields(bs)))
fmt.Fprintf(hc.Stdout, "%8d\n", len(bs))
} else if args[0] == "-c" {
fmt.Fprintln(hc.Stdout, len(bs))
} else if args[0] == "-l" {
fmt.Fprintln(hc.Stdout, bytes.Count(bs, []byte("\n")))
}
return nil
},
"sort": func(hc HandlerContext, args []string) error {
lines, err := readLines(hc)
if err != nil {
return err
}
sort.Slice(lines, func(i, j int) bool {
return bytes.Compare(lines[i], lines[j]) < 0
})
for _, line := range lines {
fmt.Fprintf(hc.Stdout, "%s\n", line)
}
return nil
},
"grep": func(hc HandlerContext, args []string) error {
var rx *regexp.Regexp
quiet := false
for _, arg := range args {
if arg == "-q" {
quiet = true
} else if arg == "-E" {
} else if rx == nil {
rx = regexp.MustCompile(arg)
}
}
lines, err := readLines(hc)
if err != nil {
return err
}
any := false
for _, line := range lines {
if rx.Match(line) {
if quiet {
return nil
}
any = true
fmt.Fprintf(hc.Stdout, "%s\n", line)
}
}
if !any {
return NewExitStatus(1)
}
return nil
},
"sed": func(hc HandlerContext, args []string) error {
f := hc.Stdin
switch len(args) {
case 1:
case 2:
var err error
f, err = os.Open(absPath(hc.Dir, args[1]))
if err != nil {
return err
}
default:
return fmt.Errorf("usage: sed pattern [file]")
}
expr := args[0]
if expr == "" || expr[0] != 's' {
return fmt.Errorf("unimplemented")
}
sep := expr[1]
expr = expr[2:]
from := expr[:strings.IndexByte(expr, sep)]
expr = expr[len(from)+1:]
to := expr[:strings.IndexByte(expr, sep)]
bs, err := ioutil.ReadAll(f)
if err != nil {
return err
}
rx := regexp.MustCompile(from)
bs = rx.ReplaceAllLiteral(bs, []byte(to))
_, err = hc.Stdout.Write(bs)
return err
},
"mkdir": func(hc HandlerContext, args []string) error {
for _, arg := range args {
if arg == "-p" {
continue
}
path := absPath(hc.Dir, arg)
if err := os.MkdirAll(path, 0777); err != nil {
return err
}
}
return nil
},
"rm": func(hc HandlerContext, args []string) error {
for _, arg := range args {
if arg == "-r" {
continue
}
path := absPath(hc.Dir, arg)
if err := os.RemoveAll(path); err != nil {
return err
}
}
return nil
},
"ln": func(hc HandlerContext, args []string) error {
symbolic := args[0] == "-s"
if symbolic {
args = args[1:]
}
oldname := absPath(hc.Dir, args[0])
newname := absPath(hc.Dir, args[1])
if symbolic {
return os.Symlink(oldname, newname)
}
return os.Link(oldname, newname)
},
"touch": func(hc HandlerContext, args []string) error {
newTime := time.Now()
if args[0] == "-d" {
if !strings.HasPrefix(args[1], "@") {
return fmt.Errorf("unimplemented")
}
sec, err := strconv.ParseInt(args[1][1:], 10, 64)
if err != nil {
return err
}
newTime = time.Unix(sec, 0)
args = args[2:]
}
for _, arg := range args {
path := absPath(hc.Dir, arg)
// create the file if it does not exist
f, err := os.OpenFile(path, os.O_CREATE, 0666)
if err != nil {
return err
}
f.Close()
// change the modification and access time
if err := os.Chtimes(path, newTime, newTime); err != nil {
return err
}
}
return nil
},
"sleep": func(hc HandlerContext, args []string) error {
// Note that, unlike GNU sleep, we don't assume a default unit
// of seconds.
for _, arg := range args {
d, err := time.ParseDuration(arg)
if err != nil {
return err
}
time.Sleep(d)
}
return nil
},
}
func testExecHandler(ctx context.Context, args []string) error {
if fn := testBuiltinsMap[args[0]]; fn != nil {
return fn(HandlerCtx(ctx), args[1:])
}
return DefaultExecHandler(2*time.Second)(ctx, args)
}
func testOpenHandler(ctx context.Context, path string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) {
if runtime.GOOS == "windows" && path == "/dev/null" {
path = "NUL"
}
return DefaultOpenHandler()(ctx, path, flag, perm)
}
func TestRunnerRunConfirm(t *testing.T) {
if testing.Short() {
t.Skip("calling bash is slow")
}
if !hasBash50 {
t.Skip("bash 5.0 required to run")
}
if runtime.GOOS == "windows" {
// For example, it seems to treat environment variables as
// case-sensitive, which isn't how Windows works.
t.Skip("bash on Windows emulates Unix-y behavior")
}
for i := range runTests {
t.Run(fmt.Sprintf("%03d", i), func(t *testing.T) {
c := runTests[i]
if strings.Contains(c.want, " #IGNORE") {
return
}
skipIfUnsupported(t, c.in)
t.Parallel()
dir, err := ioutil.TempDir("", "interp-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
cmd := exec.Command("bash")
cmd.Dir = dir
cmd.Stdin = strings.NewReader(c.in)
out, err := cmd.CombinedOutput()
if strings.Contains(c.want, " #JUSTERR") {
// bash sometimes exits with status code 0 and
// stderr "bash: ..." for an error
fauxErr := bytes.HasPrefix(out, []byte("bash:"))
if err == nil && !fauxErr {
t.Fatalf("wanted bash to error in %q", c.in)
}
return
}
got := string(out)
if err != nil {
got += err.Error()
}
if got != c.want {
t.Fatalf("wrong bash output in %q:\nwant: %q\ngot: %q",
c.in, c.want, got)
}
})
}
}
func TestRunnerOpts(t *testing.T) {
t.Parallel()
withPath := func(strs ...string) func(*Runner) error {
prefix := []string{
"PATH=" + os.Getenv("PATH"),
"ENV_PROG=" + os.Getenv("ENV_PROG"),
}
return Env(expand.ListEnviron(append(prefix, strs...)...))
}
opts := func(list ...RunnerOption) []RunnerOption {
return list
}
cases := []struct {
opts []RunnerOption
in, want string
}{
{
nil,
"$ENV_PROG | grep '^INTERP_GLOBAL='",
"INTERP_GLOBAL=value\n",
},
{
opts(withPath()),
"$ENV_PROG | grep '^INTERP_GLOBAL='",
"exit status 1",
},
{
opts(withPath("INTERP_GLOBAL=bar")),
"$ENV_PROG | grep '^INTERP_GLOBAL='",
"INTERP_GLOBAL=bar\n",
},
{
opts(withPath("a=b")),
"echo $a",
"b\n",
},
{
opts(withPath("A=b")),
"$ENV_PROG | grep '^A='; echo $A",
"A=b\nb\n",
},
{
opts(withPath("A=b", "A=c")),
"$ENV_PROG | grep '^A='; echo $A",
"A=c\nc\n",
},
{
opts(withPath("HOME=")),
"echo $HOME",
"\n",
},
{
opts(withPath("PWD=foo")),
"[[ $PWD == foo ]]",
"exit status 1",
},
{
opts(Params("foo")),
"echo $@",
"foo\n",
},
{
opts(Params("-u", "--", "foo")),
"echo $@; echo $unset",
"foo\nunset: unbound variable\nexit status 1",
},
{
opts(Params("foo")),
"set >/dev/null; echo $@",
"foo\n",
},
{
opts(Params("foo")),
"set -e; echo $@",
"foo\n",
},
{
opts(Params("foo")),
"set --; echo $@",
"\n",
},
{
opts(Params("foo")),
"set bar; echo $@",
"bar\n",
},
}
p := syntax.NewParser()
for i, c := range cases {
t.Run(fmt.Sprintf("%03d", i), func(t *testing.T) {
skipIfUnsupported(t, c.in)
file := parse(t, p, c.in)
var cb concBuffer
r, err := New(append(c.opts,
StdIO(nil, &cb, &cb),
OpenHandler(testOpenHandler),
ExecHandler(testExecHandler),
)...)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
if err := r.Run(ctx, file); err != nil {
cb.WriteString(err.Error())
}
if got := cb.String(); got != c.want {
t.Fatalf("wrong output in %q:\nwant: %q\ngot: %q",
c.in, c.want, got)
}
})
}
}
func TestRunnerContext(t *testing.T) {
t.Parallel()
cases := []string{
"",
"while true; do true; done",
"until false; do true; done",
"sleep 1000",
"while true; do true; done & wait",
"sleep 1000 & wait",
"(while true; do true; done)",
"$(while true; do true; done)",
"while true; do true; done | while true; do true; done",
}
p := syntax.NewParser()
for i, in := range cases {
t.Run(fmt.Sprintf("%03d", i), func(t *testing.T) {
file := parse(t, p, in)
ctx, cancel := context.WithCancel(context.Background())
cancel()
r, _ := New()
errChan := make(chan error)
go func() {
errChan <- r.Run(ctx, file)
}()
select {
case err := <-errChan:
if err != nil && err != ctx.Err() {
t.Fatal("Runner did not use ctx.Err()")
}
case <-time.After(time.Millisecond * 100):
t.Fatal("program was not killed in 0.1s")
}
})
}
}
func TestRunnerAltNodes(t *testing.T) {
t.Parallel()
in := "echo foo"
file := parse(t, nil, in)
want := "foo\n"
nodes := []syntax.Node{
file,
file.Stmts[0],
file.Stmts[0].Cmd,
}
for _, node := range nodes {
var cb concBuffer
r, _ := New(StdIO(nil, &cb, &cb))
ctx := context.Background()
if err := r.Run(ctx, node); err != nil {
cb.WriteString(err.Error())
}
if got := cb.String(); got != want {
t.Fatalf("wrong output in %q:\nwant: %q\ngot: %q",
in, want, got)
}
}
}
func TestElapsedString(t *testing.T) {
t.Parallel()
tests := []struct {
in time.Duration
posix bool
want string
}{
{time.Nanosecond, false, "0m0.000s"},
{time.Millisecond, false, "0m0.001s"},
{time.Millisecond, true, "0.00"},
{2500 * time.Millisecond, false, "0m2.500s"},
{2500 * time.Millisecond, true, "2.50"},
{
10*time.Minute + 10*time.Second,
false,
"10m10.000s",
},
{
10*time.Minute + 10*time.Second,
true,
"610.00",
},
}
for _, tc := range tests {
t.Run(tc.in.String(), func(t *testing.T) {
got := elapsedString(tc.in, tc.posix)
if got != tc.want {
t.Fatalf("wanted %q, got %q", tc.want, got)
}
})
}
}
func TestRunnerDir(t *testing.T) {
t.Parallel()
wd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
t.Run("Missing", func(t *testing.T) {
_, err := New(Dir("missing"))
if err == nil {
t.Fatal("expected New to error when Dir is missing")
}
})
t.Run("NoDir", func(t *testing.T) {
_, err := New(Dir("interp_test.go"))
if err == nil {
t.Fatal("expected New to error when Dir is not a dir")
}
})
t.Run("NoDirAbs", func(t *testing.T) {
_, err := New(Dir(filepath.Join(wd, "interp_test.go")))
if err == nil {
t.Fatal("expected New to error when Dir is not a dir")
}
})
t.Run("Relative", func(t *testing.T) {
// On Windows, it's impossible to make a relative path from one
// drive to another. Use the parent directory, as that's for
// sure in the same drive as the current directory.
rel := ".." + string(filepath.Separator)
r, err := New(Dir(rel))
if err != nil {
t.Fatal(err)
}
if !filepath.IsAbs(r.Dir) {
t.Errorf("Runner.Dir is not absolute")
}
})
}
func TestRunnerIncremental(t *testing.T) {
t.Parallel()
file := parse(t, nil, "echo foo; false; echo bar; exit 0; echo baz")
want := "foo\nbar\n"
var b bytes.Buffer
r, _ := New(StdIO(nil, &b, &b))
ctx := context.Background()
for _, stmt := range file.Stmts {
err := r.Run(ctx, stmt)
if _, ok := IsExitStatus(err); !ok && err != nil {
// Keep track of unexpected errors.
b.WriteString(err.Error())
}
if r.Exited() {
break
}
}
if got := b.String(); got != want {
t.Fatalf("\nwant: %q\ngot: %q", want, got)
}
}
func TestRunnerResetFields(t *testing.T) {
t.Parallel()
dir, err := ioutil.TempDir("", "interp")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
logPath := filepath.Join(dir, "log")
logFile, err := os.Create(logPath)
if err != nil {
t.Fatal(err)
}
defer logFile.Close()
r, _ := New(
Params("-f", "--", "first", dir, logPath),
Dir(dir),
OpenHandler(testOpenHandler),
ExecHandler(testExecHandler),
)
// Check that using option funcs and Runner fields directly is still
// kept by Reset.
StdIO(nil, logFile, os.Stderr)(r)
r.Env = expand.ListEnviron(append(os.Environ(), "GLOBAL=foo")...)
file := parse(t, nil, `
# Params set 3 arguments
[[ $# -eq 3 ]] || exit 10
[[ $1 == "first" ]] || exit 11
# Params set the -f option (noglob)
[[ -o noglob ]] || exit 12
# $PWD was set via Dir, and should be equal to $2
[[ "$PWD" == "$2" ]] || exit 13
# stdout should go into the log file, which is at $3
echo line1
echo line2
[[ "$(wc -l <$3)" == "2" ]] || exit 14
# $GLOBAL was set directly via the Env field
[[ "$GLOBAL" == "foo" ]] || exit 15
# Change all of the above within the script. Reset should undo this.
set +f -- newargs
cd
exec >/dev/null 2>/dev/null
GLOBAL=
export GLOBAL=
`)
ctx := context.Background()
for i := 0; i < 3; i++ {
if err := r.Run(ctx, file); err != nil {
t.Fatalf("run number %d: %v", i, err)
}
r.Reset()
// empty the log file too
logFile.Truncate(0)
logFile.Seek(0, io.SeekStart)
}
}
func TestRunnerManyResets(t *testing.T) {
t.Parallel()
r, _ := New()
for i := 0; i < 5; i++ {
r.Reset()
}
}
func TestRunnerFilename(t *testing.T) {
t.Parallel()
want := "f.sh\n"
file, _ := syntax.NewParser().Parse(strings.NewReader("echo $0"), "f.sh")
var b bytes.Buffer
r, _ := New(StdIO(nil, &b, &b))
ctx := context.Background()
if err := r.Run(ctx, file); err != nil {
t.Fatal(err)
}
if got := b.String(); got != want {
t.Fatalf("\nwant: %q\ngot: %q", want, got)
}
}
func TestRunnerEnvNoModify(t *testing.T) {
t.Parallel()
env := expand.ListEnviron("one=1", "two=2")
file := parse(t, nil, `echo -n "$one $two; "; one=x; unset two`)
var b bytes.Buffer
r, _ := New(Env(env), StdIO(nil, &b, &b))
ctx := context.Background()
for i := 0; i < 3; i++ {
r.Reset()
err := r.Run(ctx, file)
if err != nil {
t.Fatal(err)
}
}
want := "1 2; 1 2; 1 2; "
if got := b.String(); got != want {
t.Fatalf("\nwant: %q\ngot: %q", want, got)
}
}
func TestMalformedPathOnWindows(t *testing.T) {
if runtime.GOOS != "windows" {
t.Skip("Skipping windows test on non-windows GOOS")
}
dir, err := ioutil.TempDir("", "interp-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
path := filepath.Join(dir, "test.cmd")
script := []byte("@echo foo")
if err := ioutil.WriteFile(path, script, 0777); err != nil {
t.Fatal(err)
}
// set PATH to c:\tmp\dir instead of C:\tmp\dir
volume := filepath.VolumeName(dir)
pathList := strings.ToLower(volume) + dir[len(volume):]
file := parse(t, nil, "test.cmd")
var cb concBuffer
r, _ := New(Env(expand.ListEnviron("PATH="+pathList)), StdIO(nil, &cb, &cb))
if err := r.Run(context.Background(), file); err != nil {
t.Fatal(err)
}
want := "foo\r\n"
if got := cb.String(); got != want {
t.Fatalf("wrong output:\nwant: %q\ngot: %q", want, got)
}
}
| [
"\"GOSH_PROG\"",
"\"PATH\"",
"\"ENV_PROG\""
]
| []
| [
"GOSH_PROG",
"ENV_PROG",
"PATH"
]
| [] | ["GOSH_PROG", "ENV_PROG", "PATH"] | go | 3 | 0 | |
datasets/extract_frames_and_masks_3dhp.py | #!/usr/bin/env python3
import glob
import multiprocessing
import os
import pathlib
import sys
import imageio
import numpy as np
def main():
if 'DATA_ROOT' not in os.environ:
print('Set the DATA_ROOT environment variable to the parent dir of the 3dhp directory.')
sys.exit(1)
pool = multiprocessing.Pool()
data_root = os.environ['DATA_ROOT']
video_paths = glob.glob(f'{data_root}/3dhp/**/imageSequence/*.avi', recursive=True)
pool.map(extract_frames, video_paths)
chroma_paths = glob.glob(f'{data_root}/3dhp/**/FGmasks/*.avi', recursive=True)
pool.map(extract_chroma_masks, chroma_paths)
def extract_chroma_masks(chroma_path):
"""Save a thresholded version of everyt 5th foreground mask."""
print('Processing', chroma_path)
video_name = pathlib.Path(chroma_path).stem
dst_folder_path = pathlib.Path(chroma_path).parents[1] / 'FGmaskImages' / video_name
os.makedirs(dst_folder_path, exist_ok=True)
with imageio.get_reader(chroma_path, 'ffmpeg') as reader:
for i_frame, frame in enumerate(reader):
if i_frame % 5 == 0:
dst_filename = f'frame_{i_frame:06d}.png'
dst_path = os.path.join(dst_folder_path, dst_filename)
frame = 255 * (frame[..., 0] > 32).astype(np.uint8)
imageio.imwrite(dst_path, frame)
def extract_frames(src_video_path):
"""Save every 5th frame."""
print('Processing', src_video_path)
video_name = pathlib.Path(src_video_path).stem
dst_folder_path = pathlib.Path(src_video_path).parents[1] / 'Images' / video_name
os.makedirs(dst_folder_path, exist_ok=True)
with imageio.get_reader(src_video_path, 'ffmpeg') as reader:
for i_frame, frame in enumerate(reader):
if i_frame % 5 == 0:
dst_filename = f'frame_{i_frame:06d}.jpg'
dst_path = os.path.join(dst_folder_path, dst_filename)
imageio.imwrite(dst_path, frame)
if __name__ == '__main__':
main()
| []
| []
| [
"DATA_ROOT"
]
| [] | ["DATA_ROOT"] | python | 1 | 0 | |
rest/api/v2010/accounts_messages_media.go | /*
* Twilio - Api
*
* This is the public Twilio REST API.
*
* API version: 1.24.0
* Contact: [email protected]
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package openapi
import (
"encoding/json"
"fmt"
"net/url"
"strings"
"time"
"github.com/patnunes/twilio-go/client"
)
// Optional parameters for the method 'DeleteMedia'
type DeleteMediaParams struct {
// The SID of the [Account](https://www.twilio.com/docs/iam/api/account) that created the Media resource(s) to delete.
PathAccountSid *string `json:"PathAccountSid,omitempty"`
}
func (params *DeleteMediaParams) SetPathAccountSid(PathAccountSid string) *DeleteMediaParams {
params.PathAccountSid = &PathAccountSid
return params
}
// Delete media from your account. Once delete, you will no longer be billed
func (c *ApiService) DeleteMedia(MessageSid string, Sid string, params *DeleteMediaParams) error {
path := "/2010-04-01/Accounts/{AccountSid}/Messages/{MessageSid}/Media/{Sid}.json"
if params != nil && params.PathAccountSid != nil {
path = strings.Replace(path, "{"+"AccountSid"+"}", *params.PathAccountSid, -1)
} else {
path = strings.Replace(path, "{"+"AccountSid"+"}", c.requestHandler.Client.AccountSid(), -1)
}
path = strings.Replace(path, "{"+"MessageSid"+"}", MessageSid, -1)
path = strings.Replace(path, "{"+"Sid"+"}", Sid, -1)
data := url.Values{}
headers := make(map[string]interface{})
resp, err := c.requestHandler.Delete(c.baseURL+path, data, headers)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
// Optional parameters for the method 'FetchMedia'
type FetchMediaParams struct {
// The SID of the [Account](https://www.twilio.com/docs/iam/api/account) that created the Media resource(s) to fetch.
PathAccountSid *string `json:"PathAccountSid,omitempty"`
}
func (params *FetchMediaParams) SetPathAccountSid(PathAccountSid string) *FetchMediaParams {
params.PathAccountSid = &PathAccountSid
return params
}
// Fetch a single media instance belonging to the account used to make the request
func (c *ApiService) FetchMedia(MessageSid string, Sid string, params *FetchMediaParams) (*ApiV2010Media, error) {
path := "/2010-04-01/Accounts/{AccountSid}/Messages/{MessageSid}/Media/{Sid}.json"
if params != nil && params.PathAccountSid != nil {
path = strings.Replace(path, "{"+"AccountSid"+"}", *params.PathAccountSid, -1)
} else {
path = strings.Replace(path, "{"+"AccountSid"+"}", c.requestHandler.Client.AccountSid(), -1)
}
path = strings.Replace(path, "{"+"MessageSid"+"}", MessageSid, -1)
path = strings.Replace(path, "{"+"Sid"+"}", Sid, -1)
data := url.Values{}
headers := make(map[string]interface{})
resp, err := c.requestHandler.Get(c.baseURL+path, data, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
ps := &ApiV2010Media{}
if err := json.NewDecoder(resp.Body).Decode(ps); err != nil {
return nil, err
}
return ps, err
}
// Optional parameters for the method 'ListMedia'
type ListMediaParams struct {
// The SID of the [Account](https://www.twilio.com/docs/iam/api/account) that created the Media resource(s) to read.
PathAccountSid *string `json:"PathAccountSid,omitempty"`
// Only include media that was created on this date. Specify a date as `YYYY-MM-DD` in GMT, for example: `2009-07-06`, to read media that was created on this date. You can also specify an inequality, such as `StartTime<=YYYY-MM-DD`, to read media that was created on or before midnight of this date, and `StartTime>=YYYY-MM-DD` to read media that was created on or after midnight of this date.
DateCreated *time.Time `json:"DateCreated,omitempty"`
// Only include media that was created on this date. Specify a date as `YYYY-MM-DD` in GMT, for example: `2009-07-06`, to read media that was created on this date. You can also specify an inequality, such as `StartTime<=YYYY-MM-DD`, to read media that was created on or before midnight of this date, and `StartTime>=YYYY-MM-DD` to read media that was created on or after midnight of this date.
DateCreatedBefore *time.Time `json:"DateCreated<,omitempty"`
// Only include media that was created on this date. Specify a date as `YYYY-MM-DD` in GMT, for example: `2009-07-06`, to read media that was created on this date. You can also specify an inequality, such as `StartTime<=YYYY-MM-DD`, to read media that was created on or before midnight of this date, and `StartTime>=YYYY-MM-DD` to read media that was created on or after midnight of this date.
DateCreatedAfter *time.Time `json:"DateCreated>,omitempty"`
// How many resources to return in each list page. The default is 50, and the maximum is 1000.
PageSize *int `json:"PageSize,omitempty"`
// Max number of records to return.
Limit *int `json:"limit,omitempty"`
}
func (params *ListMediaParams) SetPathAccountSid(PathAccountSid string) *ListMediaParams {
params.PathAccountSid = &PathAccountSid
return params
}
func (params *ListMediaParams) SetDateCreated(DateCreated time.Time) *ListMediaParams {
params.DateCreated = &DateCreated
return params
}
func (params *ListMediaParams) SetDateCreatedBefore(DateCreatedBefore time.Time) *ListMediaParams {
params.DateCreatedBefore = &DateCreatedBefore
return params
}
func (params *ListMediaParams) SetDateCreatedAfter(DateCreatedAfter time.Time) *ListMediaParams {
params.DateCreatedAfter = &DateCreatedAfter
return params
}
func (params *ListMediaParams) SetPageSize(PageSize int) *ListMediaParams {
params.PageSize = &PageSize
return params
}
func (params *ListMediaParams) SetLimit(Limit int) *ListMediaParams {
params.Limit = &Limit
return params
}
// Retrieve a single page of Media records from the API. Request is executed immediately.
func (c *ApiService) PageMedia(MessageSid string, params *ListMediaParams, pageToken, pageNumber string) (*ListMediaResponse, error) {
path := "/2010-04-01/Accounts/{AccountSid}/Messages/{MessageSid}/Media.json"
if params != nil && params.PathAccountSid != nil {
path = strings.Replace(path, "{"+"AccountSid"+"}", *params.PathAccountSid, -1)
} else {
path = strings.Replace(path, "{"+"AccountSid"+"}", c.requestHandler.Client.AccountSid(), -1)
}
path = strings.Replace(path, "{"+"MessageSid"+"}", MessageSid, -1)
data := url.Values{}
headers := make(map[string]interface{})
if params != nil && params.DateCreated != nil {
data.Set("DateCreated", fmt.Sprint((*params.DateCreated).Format(time.RFC3339)))
}
if params != nil && params.DateCreatedBefore != nil {
data.Set("DateCreated<", fmt.Sprint((*params.DateCreatedBefore).Format(time.RFC3339)))
}
if params != nil && params.DateCreatedAfter != nil {
data.Set("DateCreated>", fmt.Sprint((*params.DateCreatedAfter).Format(time.RFC3339)))
}
if params != nil && params.PageSize != nil {
data.Set("PageSize", fmt.Sprint(*params.PageSize))
}
if pageToken != "" {
data.Set("PageToken", pageToken)
}
if pageNumber != "" {
data.Set("Page", pageNumber)
}
resp, err := c.requestHandler.Get(c.baseURL+path, data, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
ps := &ListMediaResponse{}
if err := json.NewDecoder(resp.Body).Decode(ps); err != nil {
return nil, err
}
return ps, err
}
// Lists Media records from the API as a list. Unlike stream, this operation is eager and loads 'limit' records into memory before returning.
func (c *ApiService) ListMedia(MessageSid string, params *ListMediaParams) ([]ApiV2010Media, error) {
if params == nil {
params = &ListMediaParams{}
}
params.SetPageSize(client.ReadLimits(params.PageSize, params.Limit))
response, err := c.PageMedia(MessageSid, params, "", "")
if err != nil {
return nil, err
}
curRecord := 0
var records []ApiV2010Media
for response != nil {
records = append(records, response.MediaList...)
var record interface{}
if record, err = client.GetNext(c.baseURL, response, &curRecord, params.Limit, c.getNextListMediaResponse); record == nil || err != nil {
return records, err
}
response = record.(*ListMediaResponse)
}
return records, err
}
// Streams Media records from the API as a channel stream. This operation lazily loads records as efficiently as possible until the limit is reached.
func (c *ApiService) StreamMedia(MessageSid string, params *ListMediaParams) (chan ApiV2010Media, error) {
if params == nil {
params = &ListMediaParams{}
}
params.SetPageSize(client.ReadLimits(params.PageSize, params.Limit))
response, err := c.PageMedia(MessageSid, params, "", "")
if err != nil {
return nil, err
}
curRecord := 0
//set buffer size of the channel to 1
channel := make(chan ApiV2010Media, 1)
go func() {
for response != nil {
for item := range response.MediaList {
channel <- response.MediaList[item]
}
var record interface{}
if record, err = client.GetNext(c.baseURL, response, &curRecord, params.Limit, c.getNextListMediaResponse); record == nil || err != nil {
close(channel)
return
}
response = record.(*ListMediaResponse)
}
close(channel)
}()
return channel, err
}
func (c *ApiService) getNextListMediaResponse(nextPageUrl string) (interface{}, error) {
if nextPageUrl == "" {
return nil, nil
}
resp, err := c.requestHandler.Get(nextPageUrl, nil, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
ps := &ListMediaResponse{}
if err := json.NewDecoder(resp.Body).Decode(ps); err != nil {
return nil, err
}
return ps, nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
src/frontend/handlers.go | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"html/template"
"math/rand"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/gorilla/mux"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/signalfx/signalfx-go-tracing/ddtrace/tracer"
"github.com/sirupsen/logrus"
pb "github.com/signalfx/microservices-demo/src/frontend/genproto"
"github.com/signalfx/microservices-demo/src/frontend/money"
)
type platformDetails struct {
css string
provider string
}
var (
templates = template.Must(template.New("").
Funcs(template.FuncMap{
"renderMoney": renderMoney,
}).ParseGlob("templates/*.html"))
plat platformDetails
)
func (fe *frontendServer) homeHandler(w http.ResponseWriter, r *http.Request) {
log := getLoggerWithTraceFields(r.Context())
log.WithField("currency", currentCurrency(r)).Info("home")
currencies, err := fe.getCurrencies(r.Context())
if err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "could not retrieve currencies"), http.StatusInternalServerError)
return
}
products, err := fe.getProducts(r.Context())
if err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "could not retrieve products"), http.StatusInternalServerError)
return
}
cart, err := fe.getCart(r.Context(), sessionID(r))
if err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "could not retrieve cart"), http.StatusInternalServerError)
return
}
type productView struct {
Item *pb.Product
Price *pb.Money
}
ps := make([]productView, len(products))
for i, p := range products {
price, err := fe.convertCurrency(r.Context(), p.GetPriceUsd(), currentCurrency(r))
if err != nil {
renderHTTPError(log, r, w, errors.Wrapf(err, "failed to do currency conversion for product %s", p.GetId()), http.StatusInternalServerError)
return
}
ps[i] = productView{p, price}
}
//get env and render correct platform banner.
var env = os.Getenv("ENV_PLATFORM")
plat = platformDetails{}
plat.setPlatformDetails(strings.ToLower(env))
if err := templates.ExecuteTemplate(w, "home", map[string]interface{}{
"session_id": sessionID(r),
"request_id": r.Context().Value(ctxKeyRequestID{}),
"user_currency": currentCurrency(r),
"currencies": currencies,
"products": ps,
"cart_size": cartSize(cart),
"banner_color": os.Getenv("BANNER_COLOR"), // illustrates canary deployments
"ad": fe.chooseAd(r.Context(), []string{}, log),
"platform_css": plat.css,
"platform_name": plat.provider,
"extra_headers": template.HTML(os.Getenv("FRONTEND_EXTRA_HEADERS")),
}); err != nil {
log.Error(err)
}
}
func (plat *platformDetails) setPlatformDetails(env string) {
if env == "aws" {
plat.provider = "AWS"
plat.css = "aws-platform"
} else if env == "onprem" {
plat.provider = "On-Premises"
plat.css = "onprem-platform"
} else if env == "azure" {
plat.provider = "Azure"
plat.css = "azure-platform"
} else {
plat.provider = "Google Cloud"
plat.css = "gcp-platform"
}
}
func (fe *frontendServer) productHandler(w http.ResponseWriter, r *http.Request) {
log := getLoggerWithTraceFields(r.Context())
id := mux.Vars(r)["id"]
if id == "" {
renderHTTPError(log, r, w, errors.New("product id not specified"), http.StatusBadRequest)
return
}
log.WithField("id", id).WithField("currency", currentCurrency(r)).
Debug("serving product page")
p, err := fe.getProduct(r.Context(), id)
if err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "could not retrieve product"), http.StatusInternalServerError)
return
}
currencies, err := fe.getCurrencies(r.Context())
if err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "could not retrieve currencies"), http.StatusInternalServerError)
return
}
cart, err := fe.getCart(r.Context(), sessionID(r))
if err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "could not retrieve cart"), http.StatusInternalServerError)
return
}
price, err := fe.convertCurrency(r.Context(), p.GetPriceUsd(), currentCurrency(r))
if err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "failed to convert currency"), http.StatusInternalServerError)
return
}
recommendations, err := fe.getRecommendations(r.Context(), sessionID(r), []string{id})
if err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "failed to get product recommendations"), http.StatusInternalServerError)
return
}
product := struct {
Item *pb.Product
Price *pb.Money
}{p, price}
if err := templates.ExecuteTemplate(w, "product", map[string]interface{}{
"session_id": sessionID(r),
"request_id": r.Context().Value(ctxKeyRequestID{}),
"ad": fe.chooseAd(r.Context(), p.Categories, log),
"user_currency": currentCurrency(r),
"currencies": currencies,
"product": product,
"recommendations": recommendations,
"cart_size": cartSize(cart),
"platform_css": plat.css,
"platform_name": plat.provider,
"extra_headers": template.HTML(os.Getenv("FRONTEND_EXTRA_HEADERS")),
}); err != nil {
log.Println(err)
}
}
func (fe *frontendServer) addToCartHandler(w http.ResponseWriter, r *http.Request) {
log := getLoggerWithTraceFields(r.Context())
quantity, _ := strconv.ParseUint(r.FormValue("quantity"), 10, 32)
productID := r.FormValue("product_id")
if productID == "" || quantity == 0 {
renderHTTPError(log, r, w, errors.New("invalid form input"), http.StatusBadRequest)
return
}
log.WithField("product", productID).WithField("quantity", quantity).Debug("adding to cart")
p, err := fe.getProduct(r.Context(), productID)
if err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "could not retrieve product"), http.StatusInternalServerError)
return
}
if err := fe.insertCart(r.Context(), sessionID(r), p.GetId(), int32(quantity)); err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "failed to add to cart"), http.StatusInternalServerError)
return
}
w.Header().Set("location", "/cart")
w.WriteHeader(http.StatusFound)
}
func (fe *frontendServer) emptyCartHandler(w http.ResponseWriter, r *http.Request) {
log := getLoggerWithTraceFields(r.Context())
log.Debug("emptying cart")
if err := fe.emptyCart(r.Context(), sessionID(r)); err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "failed to empty cart"), http.StatusInternalServerError)
return
}
w.Header().Set("location", "/")
w.WriteHeader(http.StatusFound)
}
func (fe *frontendServer) viewCartHandler(w http.ResponseWriter, r *http.Request) {
log := getLoggerWithTraceFields(r.Context())
log.Debug("view user cart")
currencies, err := fe.getCurrencies(r.Context())
if err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "could not retrieve currencies"), http.StatusInternalServerError)
return
}
cart, err := fe.getCart(r.Context(), sessionID(r))
if err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "could not retrieve cart"), http.StatusInternalServerError)
return
}
recommendations, err := fe.getRecommendations(r.Context(), sessionID(r), cartIDs(cart))
if err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "failed to get product recommendations"), http.StatusInternalServerError)
return
}
shippingCost, err := fe.getShippingQuote(r.Context(), cart, currentCurrency(r))
if err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "failed to get shipping quote"), http.StatusInternalServerError)
return
}
type cartItemView struct {
Item *pb.Product
Quantity int32
Price *pb.Money
}
items := make([]cartItemView, len(cart))
totalPrice := pb.Money{CurrencyCode: currentCurrency(r)}
for i, item := range cart {
p, err := fe.getProduct(r.Context(), item.GetProductId())
if err != nil {
renderHTTPError(log, r, w, errors.Wrapf(err, "could not retrieve product #%s", item.GetProductId()), http.StatusInternalServerError)
return
}
price, err := fe.convertCurrency(r.Context(), p.GetPriceUsd(), currentCurrency(r))
if err != nil {
renderHTTPError(log, r, w, errors.Wrapf(err, "could not convert currency for product #%s", item.GetProductId()), http.StatusInternalServerError)
return
}
multPrice := money.MultiplySlow(*price, uint32(item.GetQuantity()))
items[i] = cartItemView{
Item: p,
Quantity: item.GetQuantity(),
Price: &multPrice}
totalPrice = money.Must(money.Sum(totalPrice, multPrice))
}
totalPrice = money.Must(money.Sum(totalPrice, *shippingCost))
log.Info("🌈 ITEMS: %v", items)
year := time.Now().Year()
if err := templates.ExecuteTemplate(w, "cart", map[string]interface{}{
"session_id": sessionID(r),
"request_id": r.Context().Value(ctxKeyRequestID{}),
"user_currency": currentCurrency(r),
"currencies": currencies,
"recommendations": recommendations,
"cart_size": cartSize(cart),
"shipping_cost": shippingCost,
"total_cost": totalPrice,
"items": items,
"expiration_years": []int{year, year + 1, year + 2, year + 3, year + 4},
"platform_css": plat.css,
"platform_name": plat.provider,
"extra_headers": template.HTML(os.Getenv("FRONTEND_EXTRA_HEADERS")),
}); err != nil {
log.Println(err)
}
}
func (fe *frontendServer) placeOrderHandler(w http.ResponseWriter, r *http.Request) {
log := getLoggerWithTraceFields(r.Context())
log.Debug("placing order")
var (
email = r.FormValue("email")
streetAddress = r.FormValue("street_address")
zipCode, _ = strconv.ParseInt(r.FormValue("zip_code"), 10, 32)
city = r.FormValue("city")
state = r.FormValue("state")
country = r.FormValue("country")
ccNumber = r.FormValue("credit_card_number")
ccMonth, _ = strconv.ParseInt(r.FormValue("credit_card_expiration_month"), 10, 32)
ccYear, _ = strconv.ParseInt(r.FormValue("credit_card_expiration_year"), 10, 32)
ccCVV, _ = strconv.ParseInt(r.FormValue("credit_card_cvv"), 10, 32)
)
order, err := pb.NewCheckoutServiceClient(fe.checkoutSvcConn).
PlaceOrder(r.Context(), &pb.PlaceOrderRequest{
Email: email,
CreditCard: &pb.CreditCardInfo{
CreditCardNumber: ccNumber,
CreditCardExpirationMonth: int32(ccMonth),
CreditCardExpirationYear: int32(ccYear),
CreditCardCvv: int32(ccCVV)},
UserId: sessionID(r),
UserCurrency: currentCurrency(r),
Address: &pb.Address{
StreetAddress: streetAddress,
City: city,
State: state,
ZipCode: int32(zipCode),
Country: country},
})
if err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "failed to complete the order"), http.StatusInternalServerError)
return
}
log.WithField("order", order.GetOrder().GetOrderId()).Info("order placed")
order.GetOrder().GetItems()
recommendations, _ := fe.getRecommendations(r.Context(), sessionID(r), nil)
totalPaid := *order.GetOrder().GetShippingCost()
for _, v := range order.GetOrder().GetItems() {
totalPaid = money.Must(money.Sum(totalPaid, *v.GetCost()))
}
currencies, err := fe.getCurrencies(r.Context())
if err != nil {
renderHTTPError(log, r, w, errors.Wrap(err, "could not retrieve currencies"), http.StatusInternalServerError)
return
}
if err := templates.ExecuteTemplate(w, "order", map[string]interface{}{
"session_id": sessionID(r),
"request_id": r.Context().Value(ctxKeyRequestID{}),
"user_currency": currentCurrency(r),
"currencies": currencies,
"order": order.GetOrder(),
"total_paid": &totalPaid,
"recommendations": recommendations,
"platform_css": plat.css,
"platform_name": plat.provider,
"extra_headers": template.HTML(os.Getenv("FRONTEND_EXTRA_HEADERS")),
}); err != nil {
log.Println(err)
}
}
func (fe *frontendServer) logoutHandler(w http.ResponseWriter, r *http.Request) {
log := getLoggerWithTraceFields(r.Context())
log.Debug("logging out")
for _, c := range r.Cookies() {
c.Expires = time.Now().Add(-time.Hour * 24 * 365)
c.MaxAge = -1
http.SetCookie(w, c)
}
w.Header().Set("Location", "/")
w.WriteHeader(http.StatusFound)
}
func (fe *frontendServer) setCurrencyHandler(w http.ResponseWriter, r *http.Request) {
log := getLoggerWithTraceFields(r.Context())
cur := r.FormValue("currency_code")
log.WithField("curr.new", cur).WithField("curr.old", currentCurrency(r)).
Debug("setting currency")
if cur != "" {
http.SetCookie(w, &http.Cookie{
Name: cookieCurrency,
Value: cur,
MaxAge: cookieMaxAge,
})
}
referer := r.Header.Get("referer")
if referer == "" {
referer = "/"
}
w.Header().Set("Location", referer)
w.WriteHeader(http.StatusFound)
}
// chooseAd queries for advertisements available and randomly chooses one, if
// available. It ignores the error retrieving the ad since it is not critical.
func (fe *frontendServer) chooseAd(ctx context.Context, ctxKeys []string, log logrus.FieldLogger) *pb.Ad {
ads, err := fe.getAd(ctx, ctxKeys)
if err != nil {
log.WithField("error", err).Warn("failed to retrieve ads")
return nil
}
return ads[rand.Intn(len(ads))]
}
func renderHTTPError(log logrus.FieldLogger, r *http.Request, w http.ResponseWriter, err error, code int) {
log.WithField("error", err).Error("request error")
errMsg := fmt.Sprintf("%+v", err)
w.WriteHeader(code)
templates.ExecuteTemplate(w, "error", map[string]interface{}{
"session_id": sessionID(r),
"request_id": r.Context().Value(ctxKeyRequestID{}),
"error": errMsg,
"status_code": code,
"extra_headers": template.HTML(os.Getenv("FRONTEND_EXTRA_HEADERS")),
"status": http.StatusText(code)})
}
func currentCurrency(r *http.Request) string {
c, _ := r.Cookie(cookieCurrency)
if c != nil {
return c.Value
}
return defaultCurrency
}
func sessionID(r *http.Request) string {
v := r.Context().Value(ctxKeySessionID{})
if v != nil {
return v.(string)
}
return ""
}
func cartIDs(c []*pb.CartItem) []string {
out := make([]string, len(c))
for i, v := range c {
out[i] = v.GetProductId()
}
return out
}
// get total # of items in cart
func cartSize(c []*pb.CartItem) int {
cartSize := 0
for _, item := range c {
cartSize += int(item.GetQuantity())
}
return cartSize
}
func renderMoney(money pb.Money) string {
return fmt.Sprintf("%s %d.%02d", money.GetCurrencyCode(), money.GetUnits(), money.GetNanos()/10000000)
}
func getLoggerWithTraceFields(ctx context.Context) *logrus.Entry {
log := ctx.Value(ctxKeyLog{}).(logrus.FieldLogger)
fields := logrus.Fields{}
if span := opentracing.SpanFromContext(ctx); span != nil {
spanCtx := span.Context()
fields["trace_id"] = tracer.TraceIDHex(spanCtx)
fields["span_id"] = tracer.SpanIDHex(spanCtx)
fields["service.name"] = "frontend"
}
return log.WithFields(fields)
}
| [
"\"ENV_PLATFORM\"",
"\"BANNER_COLOR\"",
"\"FRONTEND_EXTRA_HEADERS\"",
"\"FRONTEND_EXTRA_HEADERS\"",
"\"FRONTEND_EXTRA_HEADERS\"",
"\"FRONTEND_EXTRA_HEADERS\"",
"\"FRONTEND_EXTRA_HEADERS\""
]
| []
| [
"FRONTEND_EXTRA_HEADERS",
"ENV_PLATFORM",
"BANNER_COLOR"
]
| [] | ["FRONTEND_EXTRA_HEADERS", "ENV_PLATFORM", "BANNER_COLOR"] | go | 3 | 0 | |
diary/20181119/snapshot.py | import os
import time
from selenium import webdriver
BROWSER_HEIGHT = 1024
BROWSER_WIDTH = 800
USERNAME = os.environ.get("APP_USERNAME")
PASSWORD = os.environ.get("APP_PASSWORD")
BOARD_ID = os.environ.get("APP_BOARD_ID")
DRIVER_PATH = os.environ.get("APP_WEBDRIVER_PATH", "geckodriver")
HEADLESS = os.environ.get("APP_ENABLE_HEADLESS", True)
class SiteCapture:
def __init__(self):
firefox_options = webdriver.FirefoxOptions()
if HEADLESS:
firefox_options.add_argument("-headless")
self.driver = webdriver.Firefox(
executable_path=DRIVER_PATH,
options=firefox_options,
)
self.driver.set_window_size(BROWSER_HEIGHT, BROWSER_WIDTH)
def step_login(self):
self.driver.get("https://trello.com/login")
self.driver.find_element_by_css_selector("#user").send_keys(USERNAME)
self.driver.find_element_by_css_selector("#password").send_keys(PASSWORD)
self.driver.find_element_by_css_selector("#login").click()
def step_snap(self):
# implicitly_wait だとボード画面に遷移できない
time.sleep(3)
self.driver.get(f"https://trello.com/b/{BOARD_ID}/")
self.driver.get_screenshot_as_file('board.png')
def close(self):
self.driver.close()
def main():
site_capture = SiteCapture()
try:
site_capture.step_login()
site_capture.step_snap()
finally:
site_capture.close()
if __name__ == "__main__":
main()
| []
| []
| [
"APP_WEBDRIVER_PATH",
"APP_USERNAME",
"APP_PASSWORD",
"APP_BOARD_ID",
"APP_ENABLE_HEADLESS"
]
| [] | ["APP_WEBDRIVER_PATH", "APP_USERNAME", "APP_PASSWORD", "APP_BOARD_ID", "APP_ENABLE_HEADLESS"] | python | 5 | 0 | |
src/cmd/compile/internal/ssagen/ssa.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssagen
import (
"bufio"
"bytes"
"cmd/compile/internal/abi"
"fmt"
"go/constant"
"html"
"internal/buildcfg"
"os"
"path/filepath"
"sort"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
"cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
"cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
)
var ssaConfig *ssa.Config
var ssaCaches []ssa.Cache
var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for
var ssaDir string // optional destination for ssa dump file
var ssaDumpStdout bool // whether to dump to stdout
var ssaDumpCFG string // generate CFGs for these phases
const ssaDumpFile = "ssa.html"
// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
var ssaDumpInlined []*ir.Func
func DumpInline(fn *ir.Func) {
if ssaDump != "" && ssaDump == ir.FuncName(fn) {
ssaDumpInlined = append(ssaDumpInlined, fn)
}
}
func InitEnv() {
ssaDump = os.Getenv("GOSSAFUNC")
ssaDir = os.Getenv("GOSSADIR")
if ssaDump != "" {
if strings.HasSuffix(ssaDump, "+") {
ssaDump = ssaDump[:len(ssaDump)-1]
ssaDumpStdout = true
}
spl := strings.Split(ssaDump, ":")
if len(spl) > 1 {
ssaDump = spl[0]
ssaDumpCFG = spl[1]
}
}
}
func InitConfig() {
types_ := ssa.NewTypes()
if Arch.SoftFloat {
softfloatInit()
}
// Generate a few pointer types that are uncommon in the frontend but common in the backend.
// Caching is disabled in the backend, so generating these here avoids allocations.
_ = types.NewPtr(types.Types[types.TINTER]) // *interface{}
_ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string
_ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{}
_ = types.NewPtr(types.NewPtr(types.ByteType)) // **byte
_ = types.NewPtr(types.NewSlice(types.ByteType)) // *[]byte
_ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string
_ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8
_ = types.NewPtr(types.Types[types.TINT16]) // *int16
_ = types.NewPtr(types.Types[types.TINT64]) // *int64
_ = types.NewPtr(types.ErrorType) // *error
types.NewPtrCacheEnabled = false
ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0)
ssaConfig.SoftFloat = Arch.SoftFloat
ssaConfig.Race = base.Flag.Race
ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
// Set up some runtime functions we'll need to call.
ir.Syms.AssertE2I = typecheck.LookupRuntimeFunc("assertE2I")
ir.Syms.AssertE2I2 = typecheck.LookupRuntimeFunc("assertE2I2")
ir.Syms.AssertI2I = typecheck.LookupRuntimeFunc("assertI2I")
ir.Syms.AssertI2I2 = typecheck.LookupRuntimeFunc("assertI2I2")
ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc")
ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack")
ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn")
ir.Syms.Duffcopy = typecheck.LookupRuntimeFunc("duffcopy")
ir.Syms.Duffzero = typecheck.LookupRuntimeFunc("duffzero")
ir.Syms.GCWriteBarrier = typecheck.LookupRuntimeFunc("gcWriteBarrier")
ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove")
ir.Syms.Newobject = typecheck.LookupRuntimeFunc("newobject")
ir.Syms.Newproc = typecheck.LookupRuntimeFunc("newproc")
ir.Syms.Panicdivide = typecheck.LookupRuntimeFunc("panicdivide")
ir.Syms.PanicdottypeE = typecheck.LookupRuntimeFunc("panicdottypeE")
ir.Syms.PanicdottypeI = typecheck.LookupRuntimeFunc("panicdottypeI")
ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype")
ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow")
ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift")
ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread")
ir.Syms.Racereadrange = typecheck.LookupRuntimeFunc("racereadrange")
ir.Syms.Racewrite = typecheck.LookupRuntimeFunc("racewrite")
ir.Syms.Racewriterange = typecheck.LookupRuntimeFunc("racewriterange")
ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT") // bool
ir.Syms.X86HasSSE41 = typecheck.LookupRuntimeVar("x86HasSSE41") // bool
ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool
ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool
ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool
ir.Syms.Staticuint64s = typecheck.LookupRuntimeVar("staticuint64s")
ir.Syms.Typedmemclr = typecheck.LookupRuntimeFunc("typedmemclr")
ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove")
ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv") // asm func with special ABI
ir.Syms.WriteBarrier = typecheck.LookupRuntimeVar("writeBarrier") // struct { bool; ... }
ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase")
// asm funcs with special ABI
if base.Ctxt.Arch.Name == "amd64" {
GCWriteBarrierReg = map[int16]*obj.LSym{
x86.REG_AX: typecheck.LookupRuntimeFunc("gcWriteBarrier"),
x86.REG_CX: typecheck.LookupRuntimeFunc("gcWriteBarrierCX"),
x86.REG_DX: typecheck.LookupRuntimeFunc("gcWriteBarrierDX"),
x86.REG_BX: typecheck.LookupRuntimeFunc("gcWriteBarrierBX"),
x86.REG_BP: typecheck.LookupRuntimeFunc("gcWriteBarrierBP"),
x86.REG_SI: typecheck.LookupRuntimeFunc("gcWriteBarrierSI"),
x86.REG_R8: typecheck.LookupRuntimeFunc("gcWriteBarrierR8"),
x86.REG_R9: typecheck.LookupRuntimeFunc("gcWriteBarrierR9"),
}
}
if Arch.LinkArch.Family == sys.Wasm {
BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("goPanicIndex")
BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("goPanicIndexU")
BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("goPanicSliceAlen")
BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("goPanicSliceAlenU")
BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("goPanicSliceAcap")
BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("goPanicSliceAcapU")
BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("goPanicSliceB")
BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("goPanicSliceBU")
BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("goPanicSlice3Alen")
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("goPanicSlice3AlenU")
BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("goPanicSlice3Acap")
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("goPanicSlice3AcapU")
BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("goPanicSlice3B")
BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("goPanicSlice3BU")
BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU")
BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("goPanicSliceConvert")
} else {
BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex")
BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU")
BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("panicSliceAlen")
BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("panicSliceAlenU")
BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("panicSliceAcap")
BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("panicSliceAcapU")
BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("panicSliceB")
BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("panicSliceBU")
BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("panicSlice3Alen")
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("panicSlice3AlenU")
BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("panicSlice3Acap")
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("panicSlice3AcapU")
BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("panicSlice3B")
BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU")
BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU")
BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("panicSliceConvert")
}
if Arch.LinkArch.PtrSize == 4 {
ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex")
ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU")
ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen")
ExtendCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeVar("panicExtendSliceAlenU")
ExtendCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeVar("panicExtendSliceAcap")
ExtendCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeVar("panicExtendSliceAcapU")
ExtendCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeVar("panicExtendSliceB")
ExtendCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeVar("panicExtendSliceBU")
ExtendCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeVar("panicExtendSlice3Alen")
ExtendCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeVar("panicExtendSlice3AlenU")
ExtendCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeVar("panicExtendSlice3Acap")
ExtendCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeVar("panicExtendSlice3AcapU")
ExtendCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeVar("panicExtendSlice3B")
ExtendCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeVar("panicExtendSlice3BU")
ExtendCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeVar("panicExtendSlice3C")
ExtendCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeVar("panicExtendSlice3CU")
}
// Wasm (all asm funcs with special ABIs)
ir.Syms.WasmMove = typecheck.LookupRuntimeVar("wasmMove")
ir.Syms.WasmZero = typecheck.LookupRuntimeVar("wasmZero")
ir.Syms.WasmDiv = typecheck.LookupRuntimeVar("wasmDiv")
ir.Syms.WasmTruncS = typecheck.LookupRuntimeVar("wasmTruncS")
ir.Syms.WasmTruncU = typecheck.LookupRuntimeVar("wasmTruncU")
ir.Syms.SigPanic = typecheck.LookupRuntimeFunc("sigpanic")
}
// AbiForBodylessFuncStackMap returns the ABI for a bodyless function's stack map.
// This is not necessarily the ABI used to call it.
// Currently (1.17 dev) such a stack map is always ABI0;
// any ABI wrapper that is present is nosplit, hence a precise
// stack map is not needed there (the parameters survive only long
// enough to call the wrapped assembly function).
// This always returns a freshly copied ABI.
func AbiForBodylessFuncStackMap(fn *ir.Func) *abi.ABIConfig {
return ssaConfig.ABI0.Copy() // No idea what races will result, be safe
}
// These are disabled but remain ready for use in case they are needed for the next regabi port.
// TODO if they are not needed for 1.18 / next register abi port, delete them.
const magicNameDotSuffix = ".*disabled*MagicMethodNameForTestingRegisterABI"
const magicLastTypeName = "*disabled*MagicLastTypeNameForTestingRegisterABI"
// abiForFunc implements ABI policy for a function, but does not return a copy of the ABI.
// Passing a nil function returns the default ABI based on experiment configuration.
func abiForFunc(fn *ir.Func, abi0, abi1 *abi.ABIConfig) *abi.ABIConfig {
if buildcfg.Experiment.RegabiArgs {
// Select the ABI based on the function's defining ABI.
if fn == nil {
return abi1
}
switch fn.ABI {
case obj.ABI0:
return abi0
case obj.ABIInternal:
// TODO(austin): Clean up the nomenclature here.
// It's not clear that "abi1" is ABIInternal.
return abi1
}
base.Fatalf("function %v has unknown ABI %v", fn, fn.ABI)
panic("not reachable")
}
a := abi0
if fn != nil {
name := ir.FuncName(fn)
magicName := strings.HasSuffix(name, magicNameDotSuffix)
if fn.Pragma&ir.RegisterParams != 0 { // TODO(register args) remove after register abi is working
if strings.Contains(name, ".") {
if !magicName {
base.ErrorfAt(fn.Pos(), "Calls to //go:registerparams method %s won't work, remove the pragma from the declaration.", name)
}
}
a = abi1
} else if magicName {
if base.FmtPos(fn.Pos()) == "<autogenerated>:1" {
// no way to put a pragma here, and it will error out in the real source code if they did not do it there.
a = abi1
} else {
base.ErrorfAt(fn.Pos(), "Methods with magic name %s (method %s) must also specify //go:registerparams", magicNameDotSuffix[1:], name)
}
}
if regAbiForFuncType(fn.Type().FuncType()) {
// fmt.Printf("Saw magic last type name for function %s\n", name)
a = abi1
}
}
return a
}
func regAbiForFuncType(ft *types.Func) bool {
np := ft.Params.NumFields()
return np > 0 && strings.Contains(ft.Params.FieldType(np-1).String(), magicLastTypeName)
}
// getParam returns the Field of ith param of node n (which is a
// function/method/interface call), where the receiver of a method call is
// considered as the 0th parameter. This does not include the receiver of an
// interface call.
func getParam(n *ir.CallExpr, i int) *types.Field {
t := n.X.Type()
if n.Op() == ir.OCALLMETH {
base.Fatalf("OCALLMETH missed by walkCall")
}
return t.Params().Field(i)
}
// dvarint writes a varint v to the funcdata in symbol x and returns the new offset
func dvarint(x *obj.LSym, off int, v int64) int {
if v < 0 || v > 1e9 {
panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
}
if v < 1<<7 {
return objw.Uint8(x, off, uint8(v))
}
off = objw.Uint8(x, off, uint8((v&127)|128))
if v < 1<<14 {
return objw.Uint8(x, off, uint8(v>>7))
}
off = objw.Uint8(x, off, uint8(((v>>7)&127)|128))
if v < 1<<21 {
return objw.Uint8(x, off, uint8(v>>14))
}
off = objw.Uint8(x, off, uint8(((v>>14)&127)|128))
if v < 1<<28 {
return objw.Uint8(x, off, uint8(v>>21))
}
off = objw.Uint8(x, off, uint8(((v>>21)&127)|128))
return objw.Uint8(x, off, uint8(v>>28))
}
// emitOpenDeferInfo emits FUNCDATA information about the defers in a function
// that is using open-coded defers. This funcdata is used to determine the active
// defers in a function and execute those defers during panic processing.
//
// The funcdata is all encoded in varints (since values will almost always be less than
// 128, but stack offsets could potentially be up to 2Gbyte). All "locations" (offsets)
// for stack variables are specified as the number of bytes below varp (pointer to the
// top of the local variables) for their starting address. The format is:
//
// - Max total argument size among all the defers
// - Offset of the deferBits variable
// - Number of defers in the function
// - Information about each defer call, in reverse order of appearance in the function:
// - Total argument size of the call
// - Offset of the closure value to call
// - Number of arguments (including interface receiver or method receiver as first arg)
// - Information about each argument
// - Offset of the stored defer argument in this function's frame
// - Size of the argument
// - Offset of where argument should be placed in the args frame when making call
func (s *state) emitOpenDeferInfo() {
x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
s.curfn.LSym.Func().OpenCodedDeferInfo = x
off := 0
// Compute maxargsize (max size of arguments for all defers)
// first, so we can output it first to the funcdata
var maxargsize int64
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
argsize := r.n.X.Type().ArgWidth() // TODO register args: but maybe use of abi0 will make this easy
if argsize > maxargsize {
maxargsize = argsize
}
}
off = dvarint(x, off, maxargsize)
off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
off = dvarint(x, off, int64(len(s.openDefers)))
// Write in reverse-order, for ease of running in that order at runtime
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
off = dvarint(x, off, r.n.X.Type().ArgWidth())
off = dvarint(x, off, -r.closureNode.FrameOffset())
numArgs := len(r.argNodes)
if r.rcvrNode != nil {
// If there's an interface receiver, treat/place it as the first
// arg. (If there is a method receiver, it's already included as
// first arg in r.argNodes.)
numArgs++
}
off = dvarint(x, off, int64(numArgs))
argAdjust := 0 // presence of receiver offsets the parameter count.
if r.rcvrNode != nil {
off = dvarint(x, off, -okOffset(r.rcvrNode.FrameOffset()))
off = dvarint(x, off, s.config.PtrSize)
off = dvarint(x, off, 0) // This is okay because defer records use ABI0 (for now)
argAdjust++
}
// TODO(register args) assume abi0 for this?
ab := s.f.ABI0
pri := ab.ABIAnalyzeFuncType(r.n.X.Type().FuncType())
for j, arg := range r.argNodes {
f := getParam(r.n, j)
off = dvarint(x, off, -okOffset(arg.FrameOffset()))
off = dvarint(x, off, f.Type.Size())
off = dvarint(x, off, okOffset(pri.InParam(j+argAdjust).FrameOffset(pri)))
}
}
}
func okOffset(offset int64) int64 {
if offset == types.BOGUS_FUNARG_OFFSET {
panic(fmt.Errorf("Bogus offset %d", offset))
}
return offset
}
// buildssa builds an SSA function for fn.
// worker indicates which of the backend workers is doing the processing.
func buildssa(fn *ir.Func, worker int) *ssa.Func {
name := ir.FuncName(fn)
printssa := false
if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", package.name e.g. "compress/gzip.(*Reader).Reset", or subpackage name "gzip.(*Reader).Reset"
pkgDotName := base.Ctxt.Pkgpath + "." + name
printssa = name == ssaDump ||
strings.HasSuffix(pkgDotName, ssaDump) && (pkgDotName == ssaDump || strings.HasSuffix(pkgDotName, "/"+ssaDump))
}
var astBuf *bytes.Buffer
if printssa {
astBuf = &bytes.Buffer{}
ir.FDumpList(astBuf, "buildssa-enter", fn.Enter)
ir.FDumpList(astBuf, "buildssa-body", fn.Body)
ir.FDumpList(astBuf, "buildssa-exit", fn.Exit)
if ssaDumpStdout {
fmt.Println("generating SSA for", name)
fmt.Print(astBuf.String())
}
}
var s state
s.pushLine(fn.Pos())
defer s.popLine()
s.hasdefer = fn.HasDefer()
if fn.Pragma&ir.CgoUnsafeArgs != 0 {
s.cgoUnsafeArgs = true
}
fe := ssafn{
curfn: fn,
log: printssa && ssaDumpStdout,
}
s.curfn = fn
s.f = ssa.NewFunc(&fe)
s.config = ssaConfig
s.f.Type = fn.Type()
s.f.Config = ssaConfig
s.f.Cache = &ssaCaches[worker]
s.f.Cache.Reset()
s.f.Name = name
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
s.f.PrintOrHtmlSSA = printssa
if fn.Pragma&ir.Nosplit != 0 {
s.f.NoSplit = true
}
s.f.ABI0 = ssaConfig.ABI0.Copy() // Make a copy to avoid racy map operations in type-register-width cache.
s.f.ABI1 = ssaConfig.ABI1.Copy()
s.f.ABIDefault = abiForFunc(nil, s.f.ABI0, s.f.ABI1)
s.f.ABISelf = abiForFunc(fn, s.f.ABI0, s.f.ABI1)
s.panics = map[funcLine]*ssa.Block{}
s.softFloat = s.config.SoftFloat
// Allocate starting block
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
s.f.Entry.Pos = fn.Pos()
if printssa {
ssaDF := ssaDumpFile
if ssaDir != "" {
ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+name+".html")
ssaD := filepath.Dir(ssaDF)
os.MkdirAll(ssaD, 0755)
}
s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG)
// TODO: generate and print a mapping from nodes to values and blocks
dumpSourcesColumn(s.f.HTMLWriter, fn)
s.f.HTMLWriter.WriteAST("AST", astBuf)
}
// Allocate starting values
s.labels = map[string]*ssaLabel{}
s.fwdVars = map[ir.Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed()
switch {
case base.Debug.NoOpenDefer != 0:
s.hasOpenDefers = false
case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386":
// Don't support open-coded defers for 386 ONLY when using shared
// libraries, because there is extra code (added by rewriteToUseGot())
// preceding the deferreturn/ret code that we don't track correctly.
s.hasOpenDefers = false
}
if s.hasOpenDefers && len(s.curfn.Exit) > 0 {
// Skip doing open defers if there is any extra exit code (likely
// race detection), since we will not generate that code in the
// case of the extra deferreturn/ret segment.
s.hasOpenDefers = false
}
if s.hasOpenDefers {
// Similarly, skip if there are any heap-allocated result
// parameters that need to be copied back to their stack slots.
for _, f := range s.curfn.Type().Results().FieldSlice() {
if !f.Nname.(*ir.Name).OnStack() {
s.hasOpenDefers = false
break
}
}
}
if s.hasOpenDefers &&
s.curfn.NumReturns*s.curfn.NumDefers > 15 {
// Since we are generating defer calls at every exit for
// open-coded defers, skip doing open-coded defers if there are
// too many returns (especially if there are multiple defers).
// Open-coded defers are most important for improving performance
// for smaller functions (which don't have many returns).
s.hasOpenDefers = false
}
s.sp = s.entryNewValue0(ssa.OpSP, types.Types[types.TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
s.sb = s.entryNewValue0(ssa.OpSB, types.Types[types.TUINTPTR])
s.startBlock(s.f.Entry)
s.vars[memVar] = s.startmem
if s.hasOpenDefers {
// Create the deferBits variable and stack slot. deferBits is a
// bitmask showing which of the open-coded defers in this function
// have been activated.
deferBitsTemp := typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8])
deferBitsTemp.SetAddrtaken(true)
s.deferBitsTemp = deferBitsTemp
// For this value, AuxInt is initialized to zero by default
startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8])
s.vars[deferBitsVar] = startDeferBits
s.deferBitsAddr = s.addr(deferBitsTemp)
s.store(types.Types[types.TUINT8], s.deferBitsAddr, startDeferBits)
// Make sure that the deferBits stack slot is kept alive (for use
// by panics) and stores to deferBits are not eliminated, even if
// all checking code on deferBits in the function exit can be
// eliminated, because the defer statements were all
// unconditional.
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
}
var params *abi.ABIParamResultInfo
params = s.f.ABISelf.ABIAnalyze(fn.Type(), true)
// Generate addresses of local declarations
s.decladdrs = map[*ir.Name]*ssa.Value{}
for _, n := range fn.Dcl {
switch n.Class {
case ir.PPARAM:
// Be aware that blank and unnamed input parameters will not appear here, but do appear in the type
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
case ir.PPARAMOUT:
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
case ir.PAUTO:
// processed at each use, to prevent Addr coming
// before the decl.
default:
s.Fatalf("local variable with class %v unimplemented", n.Class)
}
}
s.f.OwnAux = ssa.OwnAuxCall(fn.LSym, params)
// Populate SSAable arguments.
for _, n := range fn.Dcl {
if n.Class == ir.PPARAM {
if s.canSSA(n) {
v := s.newValue0A(ssa.OpArg, n.Type(), n)
s.vars[n] = v
s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
} else { // address was taken AND/OR too large for SSA
paramAssignment := ssa.ParamAssignmentForArgName(s.f, n)
if len(paramAssignment.Registers) > 0 {
if TypeOK(n.Type()) { // SSA-able type, so address was taken -- receive value in OpArg, DO NOT bind to var, store immediately to memory.
v := s.newValue0A(ssa.OpArg, n.Type(), n)
s.store(n.Type(), s.decladdrs[n], v)
} else { // Too big for SSA.
// Brute force, and early, do a bunch of stores from registers
// TODO fix the nasty storeArgOrLoad recursion in ssa/expand_calls.go so this Just Works with store of a big Arg.
s.storeParameterRegsToStack(s.f.ABISelf, paramAssignment, n, s.decladdrs[n], false)
}
}
}
}
}
// Populate closure variables.
if !fn.ClosureCalled() {
clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)
offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field
for _, n := range fn.ClosureVars {
typ := n.Type()
if !n.Byval() {
typ = types.NewPtr(typ)
}
offset = types.Rnd(offset, typ.Alignment())
ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo)
offset += typ.Size()
// If n is a small variable captured by value, promote
// it to PAUTO so it can be converted to SSA.
//
// Note: While we never capture a variable by value if
// the user took its address, we may have generated
// runtime calls that did (#43701). Since we don't
// convert Addrtaken variables to SSA anyway, no point
// in promoting them either.
if n.Byval() && !n.Addrtaken() && TypeOK(n.Type()) {
n.Class = ir.PAUTO
fn.Dcl = append(fn.Dcl, n)
s.assign(n, s.load(n.Type(), ptr), false, 0)
continue
}
if !n.Byval() {
ptr = s.load(typ, ptr)
}
s.setHeapaddr(fn.Pos(), n, ptr)
}
}
// Convert the AST-based IR to the SSA-based IR
s.stmtList(fn.Enter)
s.zeroResults()
s.paramsToHeap()
s.stmtList(fn.Body)
// fallthrough to exit
if s.curBlock != nil {
s.pushLine(fn.Endlineno)
s.exit()
s.popLine()
}
for _, b := range s.f.Blocks {
if b.Pos != src.NoXPos {
s.updateUnsetPredPos(b)
}
}
s.f.HTMLWriter.WritePhase("before insert phis", "before insert phis")
s.insertPhis()
// Main call to ssa package to compile function
ssa.Compile(s.f)
if s.hasOpenDefers {
s.emitOpenDeferInfo()
}
// Record incoming parameter spill information for morestack calls emitted in the assembler.
// This is done here, using all the parameters (used, partially used, and unused) because
// it mimics the behavior of the former ABI (everything stored) and because it's not 100%
// clear if naming conventions are respected in autogenerated code.
// TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also.
// TODO non-amd64 architectures have link registers etc that may require adjustment here.
for _, p := range params.InParams() {
typs, offs := p.RegisterTypesAndOffsets()
for i, t := range typs {
o := offs[i] // offset within parameter
fo := p.FrameOffset(params) // offset of parameter in frame
reg := ssa.ObjRegForAbiReg(p.Registers[i], s.f.Config)
s.f.RegArgs = append(s.f.RegArgs, ssa.Spill{Reg: reg, Offset: fo + o, Type: t})
}
}
return s.f
}
func (s *state) storeParameterRegsToStack(abi *abi.ABIConfig, paramAssignment *abi.ABIParamAssignment, n *ir.Name, addr *ssa.Value, pointersOnly bool) {
typs, offs := paramAssignment.RegisterTypesAndOffsets()
for i, t := range typs {
if pointersOnly && !t.IsPtrShaped() {
continue
}
r := paramAssignment.Registers[i]
o := offs[i]
op, reg := ssa.ArgOpAndRegisterFor(r, abi)
aux := &ssa.AuxNameOffset{Name: n, Offset: o}
v := s.newValue0I(op, t, reg)
v.Aux = aux
p := s.newValue1I(ssa.OpOffPtr, types.NewPtr(t), o, addr)
s.store(t, p, v)
}
}
// zeroResults zeros the return values at the start of the function.
// We need to do this very early in the function. Defer might stop a
// panic and show the return values as they exist at the time of
// panic. For precise stacks, the garbage collector assumes results
// are always live, so we need to zero them before any allocations,
// even allocations to move params/results to the heap.
func (s *state) zeroResults() {
for _, f := range s.curfn.Type().Results().FieldSlice() {
n := f.Nname.(*ir.Name)
if !n.OnStack() {
// The local which points to the return value is the
// thing that needs zeroing. This is already handled
// by a Needzero annotation in plive.go:(*liveness).epilogue.
continue
}
// Zero the stack location containing f.
if typ := n.Type(); TypeOK(typ) {
s.assign(n, s.zeroVal(typ), false, 0)
} else {
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
s.zero(n.Type(), s.decladdrs[n])
}
}
}
// paramsToHeap produces code to allocate memory for heap-escaped parameters
// and to copy non-result parameters' values from the stack.
func (s *state) paramsToHeap() {
do := func(params *types.Type) {
for _, f := range params.FieldSlice() {
if f.Nname == nil {
continue // anonymous or blank parameter
}
n := f.Nname.(*ir.Name)
if ir.IsBlank(n) || n.OnStack() {
continue
}
s.newHeapaddr(n)
if n.Class == ir.PPARAM {
s.move(n.Type(), s.expr(n.Heapaddr), s.decladdrs[n])
}
}
}
typ := s.curfn.Type()
do(typ.Recvs())
do(typ.Params())
do(typ.Results())
}
// newHeapaddr allocates heap memory for n and sets its heap address.
func (s *state) newHeapaddr(n *ir.Name) {
s.setHeapaddr(n.Pos(), n, s.newObject(n.Type()))
}
// setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil)
// and then sets it as n's heap address.
func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
if !ptr.Type.IsPtr() || !types.Identical(n.Type(), ptr.Type.Elem()) {
base.FatalfAt(n.Pos(), "setHeapaddr %L with type %v", n, ptr.Type)
}
// Declare variable to hold address.
addr := ir.NewNameAt(pos, &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg})
addr.SetType(types.NewPtr(n.Type()))
addr.Class = ir.PAUTO
addr.SetUsed(true)
addr.Curfn = s.curfn
s.curfn.Dcl = append(s.curfn.Dcl, addr)
types.CalcSize(addr.Type())
if n.Class == ir.PPARAMOUT {
addr.SetIsOutputParamHeapAddr(true)
}
n.Heapaddr = addr
s.assign(addr, ptr, false, 0)
}
// newObject returns an SSA value denoting new(typ).
func (s *state) newObject(typ *types.Type) *ssa.Value {
if typ.Size() == 0 {
return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb)
}
return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, s.reflectType(typ))[0]
}
// reflectType returns an SSA value representing a pointer to typ's
// reflection type descriptor.
func (s *state) reflectType(typ *types.Type) *ssa.Value {
lsym := reflectdata.TypeLinksym(typ)
return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb)
}
func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) {
// Read sources of target function fn.
fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Endlineno.Line())
if err != nil {
writer.Logf("cannot read sources for function %v: %v", fn, err)
}
// Read sources of inlined functions.
var inlFns []*ssa.FuncLines
for _, fi := range ssaDumpInlined {
elno := fi.Endlineno
fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename()
fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line())
if err != nil {
writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
continue
}
inlFns = append(inlFns, fnLines)
}
sort.Sort(ssa.ByTopo(inlFns))
if targetFn != nil {
inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...)
}
writer.WriteSources("sources", inlFns)
}
func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) {
f, err := os.Open(os.ExpandEnv(file))
if err != nil {
return nil, err
}
defer f.Close()
var lines []string
ln := uint(1)
scanner := bufio.NewScanner(f)
for scanner.Scan() && ln <= end {
if ln >= start {
lines = append(lines, scanner.Text())
}
ln++
}
return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil
}
// updateUnsetPredPos propagates the earliest-value position information for b
// towards all of b's predecessors that need a position, and recurs on that
// predecessor if its position is updated. B should have a non-empty position.
func (s *state) updateUnsetPredPos(b *ssa.Block) {
if b.Pos == src.NoXPos {
s.Fatalf("Block %s should have a position", b)
}
bestPos := src.NoXPos
for _, e := range b.Preds {
p := e.Block()
if !p.LackingPos() {
continue
}
if bestPos == src.NoXPos {
bestPos = b.Pos
for _, v := range b.Values {
if v.LackingPos() {
continue
}
if v.Pos != src.NoXPos {
// Assume values are still in roughly textual order;
// TODO: could also seek minimum position?
bestPos = v.Pos
break
}
}
}
p.Pos = bestPos
s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay.
}
}
// Information about each open-coded defer.
type openDeferInfo struct {
// The node representing the call of the defer
n *ir.CallExpr
// If defer call is closure call, the address of the argtmp where the
// closure is stored.
closure *ssa.Value
// The node representing the argtmp where the closure is stored - used for
// function, method, or interface call, to store a closure that panic
// processing can use for this defer.
closureNode *ir.Name
// If defer call is interface call, the address of the argtmp where the
// receiver is stored
rcvr *ssa.Value
// The node representing the argtmp where the receiver is stored
rcvrNode *ir.Name
// The addresses of the argtmps where the evaluated arguments of the defer
// function call are stored.
argVals []*ssa.Value
// The nodes representing the argtmps where the args of the defer are stored
argNodes []*ir.Name
}
type state struct {
// configuration (arch) information
config *ssa.Config
// function we're building
f *ssa.Func
// Node for function
curfn *ir.Func
// labels in f
labels map[string]*ssaLabel
// unlabeled break and continue statement tracking
breakTo *ssa.Block // current target for plain break statement
continueTo *ssa.Block // current target for plain continue statement
// current location where we're interpreting the AST
curBlock *ssa.Block
// variable assignments in the current block (map from variable symbol to ssa value)
// *Node is the unique identifier (an ONAME Node) for the variable.
// TODO: keep a single varnum map, then make all of these maps slices instead?
vars map[ir.Node]*ssa.Value
// fwdVars are variables that are used before they are defined in the current block.
// This map exists just to coalesce multiple references into a single FwdRef op.
// *Node is the unique identifier (an ONAME Node) for the variable.
fwdVars map[ir.Node]*ssa.Value
// all defined variables at the end of each block. Indexed by block ID.
defvars []map[ir.Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables on the stack.
decladdrs map[*ir.Name]*ssa.Value
// starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value
sp *ssa.Value
sb *ssa.Value
// value representing address of where deferBits autotmp is stored
deferBitsAddr *ssa.Value
deferBitsTemp *ir.Name
// line number stack. The current line number is top of stack
line []src.XPos
// the last line number processed; it may have been popped
lastPos src.XPos
// list of panic calls by function name and line number.
// Used to deduplicate panic calls.
panics map[funcLine]*ssa.Block
cgoUnsafeArgs bool
hasdefer bool // whether the function contains a defer statement
softFloat bool
hasOpenDefers bool // whether we are doing open-coded defers
// If doing open-coded defers, list of info about the defer calls in
// scanning order. Hence, at exit we should run these defers in reverse
// order of this list
openDefers []*openDeferInfo
// For open-coded defers, this is the beginning and end blocks of the last
// defer exit code that we have generated so far. We use these to share
// code between exits if the shareDeferExits option (disabled by default)
// is on.
lastDeferExit *ssa.Block // Entry block of last defer exit code we generated
lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated
lastDeferCount int // Number of defers encountered at that point
prevCall *ssa.Value // the previous call; use this to tie results to the call op.
}
type funcLine struct {
f *obj.LSym
base *src.PosBase
line uint
}
type ssaLabel struct {
target *ssa.Block // block identified by this label
breakTarget *ssa.Block // block to break to in control flow node identified by this label
continueTarget *ssa.Block // block to continue to in control flow node identified by this label
}
// label returns the label associated with sym, creating it if necessary.
func (s *state) label(sym *types.Sym) *ssaLabel {
lab := s.labels[sym.Name]
if lab == nil {
lab = new(ssaLabel)
s.labels[sym.Name] = lab
}
return lab
}
func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
func (s *state) Log() bool { return s.f.Log() }
func (s *state) Fatalf(msg string, args ...interface{}) {
s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
}
func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
func ssaMarker(name string) *ir.Name {
return typecheck.NewName(&types.Sym{Name: name})
}
var (
// marker node for the memory variable
memVar = ssaMarker("mem")
// marker nodes for temporary variables
ptrVar = ssaMarker("ptr")
lenVar = ssaMarker("len")
newlenVar = ssaMarker("newlen")
capVar = ssaMarker("cap")
typVar = ssaMarker("typ")
okVar = ssaMarker("ok")
deferBitsVar = ssaMarker("deferBits")
)
// startBlock sets the current block we're generating code in to b.
func (s *state) startBlock(b *ssa.Block) {
if s.curBlock != nil {
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
}
s.curBlock = b
s.vars = map[ir.Node]*ssa.Value{}
for n := range s.fwdVars {
delete(s.fwdVars, n)
}
}
// endBlock marks the end of generating code for the current block.
// Returns the (former) current block. Returns nil if there is no current
// block, i.e. if no code flows to the current execution point.
func (s *state) endBlock() *ssa.Block {
b := s.curBlock
if b == nil {
return nil
}
for len(s.defvars) <= int(b.ID) {
s.defvars = append(s.defvars, nil)
}
s.defvars[b.ID] = s.vars
s.curBlock = nil
s.vars = nil
if b.LackingPos() {
// Empty plain blocks get the line of their successor (handled after all blocks created),
// except for increment blocks in For statements (handled in ssa conversion of OFOR),
// and for blocks ending in GOTO/BREAK/CONTINUE.
b.Pos = src.NoXPos
} else {
b.Pos = s.lastPos
}
return b
}
// pushLine pushes a line number on the line number stack.
func (s *state) pushLine(line src.XPos) {
if !line.IsKnown() {
// the frontend may emit node with line number missing,
// use the parent line number in this case.
line = s.peekPos()
if base.Flag.K != 0 {
base.Warn("buildssa: unknown position (line 0)")
}
} else {
s.lastPos = line
}
s.line = append(s.line, line)
}
// popLine pops the top of the line number stack.
func (s *state) popLine() {
s.line = s.line[:len(s.line)-1]
}
// peekPos peeks the top of the line number stack.
func (s *state) peekPos() src.XPos {
return s.line[len(s.line)-1]
}
// newValue0 adds a new value with no arguments to the current block.
func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
return s.curBlock.NewValue0(s.peekPos(), op, t)
}
// newValue0A adds a new value with no arguments and an aux value to the current block.
func (s *state) newValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
}
// newValue0I adds a new value with no arguments and an auxint value to the current block.
func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
}
// newValue1 adds a new value with one argument to the current block.
func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
}
// newValue1A adds a new value with one argument and an aux value to the current block.
func (s *state) newValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
}
// newValue1Apos adds a new value with one argument and an aux value to the current block.
// isStmt determines whether the created values may be a statement or not
// (i.e., false means never, yes means maybe).
func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value, isStmt bool) *ssa.Value {
if isStmt {
return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
}
return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg)
}
// newValue1I adds a new value with one argument and an auxint value to the current block.
func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
}
// newValue2 adds a new value with two arguments to the current block.
func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
}
// newValue2A adds a new value with two arguments and an aux value to the current block.
func (s *state) newValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
}
// newValue2Apos adds a new value with two arguments and an aux value to the current block.
// isStmt determines whether the created values may be a statement or not
// (i.e., false means never, yes means maybe).
func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value {
if isStmt {
return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
}
return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1)
}
// newValue2I adds a new value with two arguments and an auxint value to the current block.
func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
}
// newValue3 adds a new value with three arguments to the current block.
func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
}
// newValue3I adds a new value with three arguments and an auxint value to the current block.
func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
// newValue3A adds a new value with three arguments and an aux value to the current block.
func (s *state) newValue3A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
// newValue3Apos adds a new value with three arguments and an aux value to the current block.
// isStmt determines whether the created values may be a statement or not
// (i.e., false means never, yes means maybe).
func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value {
if isStmt {
return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2)
}
// newValue4 adds a new value with four arguments to the current block.
func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
}
// newValue4 adds a new value with four arguments and an auxint value to the current block.
func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3)
}
func (s *state) entryBlock() *ssa.Block {
b := s.f.Entry
if base.Flag.N > 0 && s.curBlock != nil {
// If optimizations are off, allocate in current block instead. Since with -N
// we're not doing the CSE or tighten passes, putting lots of stuff in the
// entry block leads to O(n^2) entries in the live value map during regalloc.
// See issue 45897.
b = s.curBlock
}
return b
}
// entryNewValue0 adds a new value with no arguments to the entry block.
func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
return s.entryBlock().NewValue0(src.NoXPos, op, t)
}
// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
return s.entryBlock().NewValue0A(src.NoXPos, op, t, aux)
}
// entryNewValue1 adds a new value with one argument to the entry block.
func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
return s.entryBlock().NewValue1(src.NoXPos, op, t, arg)
}
// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
return s.entryBlock().NewValue1I(src.NoXPos, op, t, auxint, arg)
}
// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
return s.entryBlock().NewValue1A(src.NoXPos, op, t, aux, arg)
}
// entryNewValue2 adds a new value with two arguments to the entry block.
func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.entryBlock().NewValue2(src.NoXPos, op, t, arg0, arg1)
}
// entryNewValue2A adds a new value with two arguments and an aux value to the entry block.
func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
return s.entryBlock().NewValue2A(src.NoXPos, op, t, aux, arg0, arg1)
}
// const* routines add a new const value to the entry block.
func (s *state) constSlice(t *types.Type) *ssa.Value {
return s.f.ConstSlice(t)
}
func (s *state) constInterface(t *types.Type) *ssa.Value {
return s.f.ConstInterface(t)
}
func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) }
func (s *state) constEmptyString(t *types.Type) *ssa.Value {
return s.f.ConstEmptyString(t)
}
func (s *state) constBool(c bool) *ssa.Value {
return s.f.ConstBool(types.Types[types.TBOOL], c)
}
func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
return s.f.ConstInt8(t, c)
}
func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
return s.f.ConstInt16(t, c)
}
func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
return s.f.ConstInt32(t, c)
}
func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
return s.f.ConstInt64(t, c)
}
func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
return s.f.ConstFloat32(t, c)
}
func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
return s.f.ConstFloat64(t, c)
}
func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
if s.config.PtrSize == 8 {
return s.constInt64(t, c)
}
if int64(int32(c)) != c {
s.Fatalf("integer constant too big %d", c)
}
return s.constInt32(t, int32(c))
}
func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
return s.f.ConstOffPtrSP(t, c, s.sp)
}
// newValueOrSfCall* are wrappers around newValue*, which may create a call to a
// soft-float runtime function instead (when emitting soft-float code).
func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
if s.softFloat {
if c, ok := s.sfcall(op, arg); ok {
return c
}
}
return s.newValue1(op, t, arg)
}
func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
if s.softFloat {
if c, ok := s.sfcall(op, arg0, arg1); ok {
return c
}
}
return s.newValue2(op, t, arg0, arg1)
}
type instrumentKind uint8
const (
instrumentRead = iota
instrumentWrite
instrumentMove
)
func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) {
s.instrument2(t, addr, nil, kind)
}
// instrumentFields instruments a read/write operation on addr.
// If it is instrumenting for MSAN and t is a struct type, it instruments
// operation for each field, instead of for the whole struct.
func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) {
if !base.Flag.MSan || !t.IsStruct() {
s.instrument(t, addr, kind)
return
}
for _, f := range t.Fields().Slice() {
if f.Sym.IsBlank() {
continue
}
offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), abi.FieldOffsetOf(f), addr)
s.instrumentFields(f.Type, offptr, kind)
}
}
func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) {
if base.Flag.MSan {
s.instrument2(t, dst, src, instrumentMove)
} else {
s.instrument(t, src, instrumentRead)
s.instrument(t, dst, instrumentWrite)
}
}
func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
if !s.curfn.InstrumentBody() {
return
}
w := t.Size()
if w == 0 {
return // can't race on zero-sized things
}
if ssa.IsSanitizerSafeAddr(addr) {
return
}
var fn *obj.LSym
needWidth := false
if addr2 != nil && kind != instrumentMove {
panic("instrument2: non-nil addr2 for non-move instrumentation")
}
if base.Flag.MSan {
switch kind {
case instrumentRead:
fn = ir.Syms.Msanread
case instrumentWrite:
fn = ir.Syms.Msanwrite
case instrumentMove:
fn = ir.Syms.Msanmove
default:
panic("unreachable")
}
needWidth = true
} else if base.Flag.Race && t.NumComponents(types.CountBlankFields) > 1 {
// for composite objects we have to write every address
// because a write might happen to any subobject.
// composites with only one element don't have subobjects, though.
switch kind {
case instrumentRead:
fn = ir.Syms.Racereadrange
case instrumentWrite:
fn = ir.Syms.Racewriterange
default:
panic("unreachable")
}
needWidth = true
} else if base.Flag.Race {
// for non-composite objects we can write just the start
// address, as any write must write the first byte.
switch kind {
case instrumentRead:
fn = ir.Syms.Raceread
case instrumentWrite:
fn = ir.Syms.Racewrite
default:
panic("unreachable")
}
} else {
panic("unreachable")
}
args := []*ssa.Value{addr}
if addr2 != nil {
args = append(args, addr2)
}
if needWidth {
args = append(args, s.constInt(types.Types[types.TUINTPTR], w))
}
s.rtcall(fn, true, nil, args...)
}
func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value {
s.instrumentFields(t, src, instrumentRead)
return s.rawLoad(t, src)
}
func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpLoad, t, src, s.mem())
}
func (s *state) store(t *types.Type, dst, val *ssa.Value) {
s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
}
func (s *state) zero(t *types.Type, dst *ssa.Value) {
s.instrument(t, dst, instrumentWrite)
store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
store.Aux = t
s.vars[memVar] = store
}
func (s *state) move(t *types.Type, dst, src *ssa.Value) {
s.instrumentMove(t, dst, src)
store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
store.Aux = t
s.vars[memVar] = store
}
// stmtList converts the statement list n to SSA and adds it to s.
func (s *state) stmtList(l ir.Nodes) {
for _, n := range l {
s.stmt(n)
}
}
// stmt converts the statement n to SSA and adds it to s.
func (s *state) stmt(n ir.Node) {
if !(n.Op() == ir.OVARKILL || n.Op() == ir.OVARLIVE || n.Op() == ir.OVARDEF) {
// OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
s.pushLine(n.Pos())
defer s.popLine()
}
// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
// then this code is dead. Stop here.
if s.curBlock == nil && n.Op() != ir.OLABEL {
return
}
s.stmtList(n.Init())
switch n.Op() {
case ir.OBLOCK:
n := n.(*ir.BlockStmt)
s.stmtList(n.List)
// No-ops
case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL:
// Expression statements
case ir.OCALLFUNC:
n := n.(*ir.CallExpr)
if ir.IsIntrinsicCall(n) {
s.intrinsicCall(n)
return
}
fallthrough
case ir.OCALLINTER:
n := n.(*ir.CallExpr)
s.callResult(n, callNormal)
if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PFUNC {
if fn := n.X.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
n.X.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(m)
// TODO: never rewrite OPANIC to OCALLFUNC in the
// first place. Need to wait until all backends
// go through SSA.
}
}
case ir.ODEFER:
n := n.(*ir.GoDeferStmt)
if base.Debug.Defer > 0 {
var defertype string
if s.hasOpenDefers {
defertype = "open-coded"
} else if n.Esc() == ir.EscNever {
defertype = "stack-allocated"
} else {
defertype = "heap-allocated"
}
base.WarnfAt(n.Pos(), "%s defer", defertype)
}
if s.hasOpenDefers {
s.openDeferRecord(n.Call.(*ir.CallExpr))
} else {
d := callDefer
if n.Esc() == ir.EscNever {
d = callDeferStack
}
s.callResult(n.Call.(*ir.CallExpr), d)
}
case ir.OGO:
n := n.(*ir.GoDeferStmt)
s.callResult(n.Call.(*ir.CallExpr), callGo)
case ir.OAS2DOTTYPE:
n := n.(*ir.AssignListStmt)
res, resok := s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
deref := false
if !TypeOK(n.Rhs[0].Type()) {
if res.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
mem := s.mem()
if mem.Op == ssa.OpVarKill {
mem = mem.Args[0]
}
if res.Args[1] != mem {
s.Fatalf("memory no longer live from 2-result dottype load")
}
deref = true
res = res.Args[0]
}
s.assign(n.Lhs[0], res, deref, 0)
s.assign(n.Lhs[1], resok, false, 0)
return
case ir.OAS2FUNC:
// We come here only when it is an intrinsic call returning two values.
n := n.(*ir.AssignListStmt)
call := n.Rhs[0].(*ir.CallExpr)
if !ir.IsIntrinsicCall(call) {
s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call)
}
v := s.intrinsicCall(call)
v1 := s.newValue1(ssa.OpSelect0, n.Lhs[0].Type(), v)
v2 := s.newValue1(ssa.OpSelect1, n.Lhs[1].Type(), v)
s.assign(n.Lhs[0], v1, false, 0)
s.assign(n.Lhs[1], v2, false, 0)
return
case ir.ODCL:
n := n.(*ir.Decl)
if v := n.X; v.Esc() == ir.EscHeap {
s.newHeapaddr(v)
}
case ir.OLABEL:
n := n.(*ir.LabelStmt)
sym := n.Label
lab := s.label(sym)
// The label might already have a target block via a goto.
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
// Go to that label.
// (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
if s.curBlock != nil {
b := s.endBlock()
b.AddEdgeTo(lab.target)
}
s.startBlock(lab.target)
case ir.OGOTO:
n := n.(*ir.BranchStmt)
sym := n.Label
lab := s.label(sym)
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
b := s.endBlock()
b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
b.AddEdgeTo(lab.target)
case ir.OAS:
n := n.(*ir.AssignStmt)
if n.X == n.Y && n.X.Op() == ir.ONAME {
// An x=x assignment. No point in doing anything
// here. In addition, skipping this assignment
// prevents generating:
// VARDEF x
// COPY x -> x
// which is bad because x is incorrectly considered
// dead before the vardef. See issue #14904.
return
}
// Evaluate RHS.
rhs := n.Y
if rhs != nil {
switch rhs.Op() {
case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !ir.IsZero(rhs) {
s.Fatalf("literal with nonzero value in SSA: %v", rhs)
}
rhs = nil
case ir.OAPPEND:
rhs := rhs.(*ir.CallExpr)
// Check whether we're writing the result of an append back to the same slice.
// If so, we handle it specially to avoid write barriers on the fast
// (non-growth) path.
if !ir.SameSafeExpr(n.X, rhs.Args[0]) || base.Flag.N != 0 {
break
}
// If the slice can be SSA'd, it'll be on the stack,
// so there will be no write barriers,
// so there's no need to attempt to prevent them.
if s.canSSA(n.X) {
if base.Debug.Append > 0 { // replicating old diagnostic message
base.WarnfAt(n.Pos(), "append: len-only update (in local slice)")
}
break
}
if base.Debug.Append > 0 {
base.WarnfAt(n.Pos(), "append: len-only update")
}
s.append(rhs, true)
return
}
}
if ir.IsBlank(n.X) {
// _ = rhs
// Just evaluate rhs for side-effects.
if rhs != nil {
s.expr(rhs)
}
return
}
var t *types.Type
if n.Y != nil {
t = n.Y.Type()
} else {
t = n.X.Type()
}
var r *ssa.Value
deref := !TypeOK(t)
if deref {
if rhs == nil {
r = nil // Signal assign to use OpZero.
} else {
r = s.addr(rhs)
}
} else {
if rhs == nil {
r = s.zeroVal(t)
} else {
r = s.expr(rhs)
}
}
var skip skipMask
if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && ir.SameSafeExpr(rhs.(*ir.SliceExpr).X, n.X) {
// We're assigning a slicing operation back to its source.
// Don't write back fields we aren't changing. See issue #14855.
rhs := rhs.(*ir.SliceExpr)
i, j, k := rhs.Low, rhs.High, rhs.Max
if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) {
// [0:...] is the same as [:...]
i = nil
}
// TODO: detect defaults for len/cap also.
// Currently doesn't really work because (*p)[:len(*p)] appears here as:
// tmp = len(*p)
// (*p)[:tmp]
//if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) {
// j = nil
//}
//if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) {
// k = nil
//}
if i == nil {
skip |= skipPtr
if j == nil {
skip |= skipLen
}
if k == nil {
skip |= skipCap
}
}
}
s.assign(n.X, r, deref, skip)
case ir.OIF:
n := n.(*ir.IfStmt)
if ir.IsConst(n.Cond, constant.Bool) {
s.stmtList(n.Cond.Init())
if ir.BoolVal(n.Cond) {
s.stmtList(n.Body)
} else {
s.stmtList(n.Else)
}
break
}
bEnd := s.f.NewBlock(ssa.BlockPlain)
var likely int8
if n.Likely {
likely = 1
}
var bThen *ssa.Block
if len(n.Body) != 0 {
bThen = s.f.NewBlock(ssa.BlockPlain)
} else {
bThen = bEnd
}
var bElse *ssa.Block
if len(n.Else) != 0 {
bElse = s.f.NewBlock(ssa.BlockPlain)
} else {
bElse = bEnd
}
s.condBranch(n.Cond, bThen, bElse, likely)
if len(n.Body) != 0 {
s.startBlock(bThen)
s.stmtList(n.Body)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
if len(n.Else) != 0 {
s.startBlock(bElse)
s.stmtList(n.Else)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
s.startBlock(bEnd)
case ir.ORETURN:
n := n.(*ir.ReturnStmt)
s.stmtList(n.Results)
b := s.exit()
b.Pos = s.lastPos.WithIsStmt()
case ir.OTAILCALL:
n := n.(*ir.TailCallStmt)
b := s.exit()
b.Kind = ssa.BlockRetJmp // override BlockRet
b.Aux = callTargetLSym(n.Target)
case ir.OCONTINUE, ir.OBREAK:
n := n.(*ir.BranchStmt)
var to *ssa.Block
if n.Label == nil {
// plain break/continue
switch n.Op() {
case ir.OCONTINUE:
to = s.continueTo
case ir.OBREAK:
to = s.breakTo
}
} else {
// labeled break/continue; look up the target
sym := n.Label
lab := s.label(sym)
switch n.Op() {
case ir.OCONTINUE:
to = lab.continueTarget
case ir.OBREAK:
to = lab.breakTarget
}
}
b := s.endBlock()
b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
b.AddEdgeTo(to)
case ir.OWHL:
// OWHL: while Ninit; Left; Right { Nbody }
n := n.(*ir.WhileStmt)
bCond := s.f.NewBlock(ssa.BlockPlain)
bBody := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
bBody.Pos = n.Pos()
b := s.endBlock()
b.AddEdgeTo(bCond)
s.startBlock(bCond)
if n.Cond != nil {
s.condBranch(n.Cond, bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
b.AddEdgeTo(bBody)
}
prevContinue := s.continueTo
prevBreak := s.breakTo
s.continueTo = bCond
s.breakTo = bEnd
var lab *ssaLabel
if sym := n.Label; sym != nil {
// labeled for loop
lab = s.label(sym)
lab.continueTarget = bCond
lab.breakTarget = bEnd
}
s.startBlock(bBody)
s.stmtList(n.Body)
// tear down continue/break
s.continueTo = prevContinue
s.breakTo = prevBreak
if lab != nil {
lab.continueTarget = nil
lab.breakTarget = nil
}
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bCond)
}
s.startBlock(bEnd)
case ir.OFOR, ir.OFORUNTIL:
// OFOR: for Ninit; Left; Right { Nbody }
// cond (Left); body (Nbody); incr (Right)
//
// OFORUNTIL: for Ninit; Left; Right; List { Nbody }
// => body: { Nbody }; incr: Right; if Left { lateincr: List; goto body }; end:
n := n.(*ir.ForStmt)
bCond := s.f.NewBlock(ssa.BlockPlain)
bBody := s.f.NewBlock(ssa.BlockPlain)
bIncr := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
// ensure empty for loops have correct position; issue #30167
bBody.Pos = n.Pos()
// first, jump to condition test (OFOR) or body (OFORUNTIL)
b := s.endBlock()
b.AddEdgeTo(bCond)
// generate code to test condition
s.startBlock(bCond)
if n.Cond != nil {
s.condBranch(n.Cond, bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
b.AddEdgeTo(bBody)
}
// set up for continue/break in body
prevContinue := s.continueTo
prevBreak := s.breakTo
s.continueTo = bIncr
s.breakTo = bEnd
var lab *ssaLabel
if sym := n.Label; sym != nil {
// labeled for loop
lab = s.label(sym)
lab.continueTarget = bIncr
lab.breakTarget = bEnd
}
// generate body
s.startBlock(bBody)
s.stmtList(n.Body)
// tear down continue/break
s.continueTo = prevContinue
s.breakTo = prevBreak
if lab != nil {
lab.continueTarget = nil
lab.breakTarget = nil
}
// done with body, goto incr
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bIncr)
}
// generate incr (and, for OFORUNTIL, condition)
s.startBlock(bIncr)
if n.Post != nil {
s.stmt(n.Post)
}
if n.Op() == ir.OFOR {
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bCond)
// It can happen that bIncr ends in a block containing only VARKILL,
// and that muddles the debugging experience.
if b.Pos == src.NoXPos {
b.Pos = bCond.Pos
}
}
} else {
// bCond is unused in OFORUNTIL, so repurpose it.
bLateIncr := bCond
// test condition
s.condBranch(n.Cond, bLateIncr, bEnd, 1)
// generate late increment
s.startBlock(bLateIncr)
s.stmtList(n.Late)
s.endBlock().AddEdgeTo(bBody)
}
s.startBlock(bEnd)
case ir.OSWITCH, ir.OSELECT:
// These have been mostly rewritten by the front end into their Nbody fields.
// Our main task is to correctly hook up any break statements.
bEnd := s.f.NewBlock(ssa.BlockPlain)
prevBreak := s.breakTo
s.breakTo = bEnd
var sym *types.Sym
var body ir.Nodes
if n.Op() == ir.OSWITCH {
n := n.(*ir.SwitchStmt)
sym = n.Label
body = n.Compiled
} else {
n := n.(*ir.SelectStmt)
sym = n.Label
body = n.Compiled
}
var lab *ssaLabel
if sym != nil {
// labeled
lab = s.label(sym)
lab.breakTarget = bEnd
}
// generate body code
s.stmtList(body)
s.breakTo = prevBreak
if lab != nil {
lab.breakTarget = nil
}
// walk adds explicit OBREAK nodes to the end of all reachable code paths.
// If we still have a current block here, then mark it unreachable.
if s.curBlock != nil {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(m)
}
s.startBlock(bEnd)
case ir.OVARDEF:
n := n.(*ir.UnaryExpr)
if !s.canSSA(n.X) {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
}
case ir.OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
// varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops.
n := n.(*ir.UnaryExpr)
if !s.canSSA(n.X) {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
}
case ir.OVARLIVE:
// Insert a varlive op to record that a variable is still live.
n := n.(*ir.UnaryExpr)
v := n.X.(*ir.Name)
if !v.Addrtaken() {
s.Fatalf("VARLIVE variable %v must have Addrtaken set", v)
}
switch v.Class {
case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
default:
s.Fatalf("VARLIVE variable %v must be Auto or Arg", v)
}
s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, v, s.mem())
case ir.OCHECKNIL:
n := n.(*ir.UnaryExpr)
p := s.expr(n.X)
s.nilCheck(p)
case ir.OINLMARK:
n := n.(*ir.InlineMarkStmt)
s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Index, s.mem())
default:
s.Fatalf("unhandled stmt %v", n.Op())
}
}
// If true, share as many open-coded defer exits as possible (with the downside of
// worse line-number information)
const shareDeferExits = false
// exit processes any code that needs to be generated just before returning.
// It returns a BlockRet block that ends the control flow. Its control value
// will be set to the final memory state.
func (s *state) exit() *ssa.Block {
if s.hasdefer {
if s.hasOpenDefers {
if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount {
if s.curBlock.Kind != ssa.BlockPlain {
panic("Block for an exit should be BlockPlain")
}
s.curBlock.AddEdgeTo(s.lastDeferExit)
s.endBlock()
return s.lastDeferFinalBlock
}
s.openDeferExit()
} else {
s.rtcall(ir.Syms.Deferreturn, true, nil)
}
}
var b *ssa.Block
var m *ssa.Value
// Do actual return.
// These currently turn into self-copies (in many cases).
resultFields := s.curfn.Type().Results().FieldSlice()
results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1)
m = s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
// Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
for i, f := range resultFields {
n := f.Nname.(*ir.Name)
if s.canSSA(n) { // result is in some SSA variable
if !n.IsOutputParamInRegisters() {
// We are about to store to the result slot.
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
}
results[i] = s.variable(n, n.Type())
} else if !n.OnStack() { // result is actually heap allocated
// We are about to copy the in-heap result to the result slot.
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
ha := s.expr(n.Heapaddr)
s.instrumentFields(n.Type(), ha, instrumentRead)
results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem())
} else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA.
// Before register ABI this ought to be a self-move, home=dest,
// With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed)
// No VarDef, as the result slot is already holding live value.
results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem())
}
}
// Run exit code. Today, this is just racefuncexit, in -race mode.
// TODO(register args) this seems risky here with a register-ABI, but not clear it is right to do it earlier either.
// Spills in register allocation might just fix it.
s.stmtList(s.curfn.Exit)
results[len(results)-1] = s.mem()
m.AddArgs(results...)
b = s.endBlock()
b.Kind = ssa.BlockRet
b.SetControl(m)
if s.hasdefer && s.hasOpenDefers {
s.lastDeferFinalBlock = b
}
return b
}
type opAndType struct {
op ir.Op
etype types.Kind
}
var opToSSA = map[opAndType]ssa.Op{
opAndType{ir.OADD, types.TINT8}: ssa.OpAdd8,
opAndType{ir.OADD, types.TUINT8}: ssa.OpAdd8,
opAndType{ir.OADD, types.TINT16}: ssa.OpAdd16,
opAndType{ir.OADD, types.TUINT16}: ssa.OpAdd16,
opAndType{ir.OADD, types.TINT32}: ssa.OpAdd32,
opAndType{ir.OADD, types.TUINT32}: ssa.OpAdd32,
opAndType{ir.OADD, types.TINT64}: ssa.OpAdd64,
opAndType{ir.OADD, types.TUINT64}: ssa.OpAdd64,
opAndType{ir.OADD, types.TFLOAT32}: ssa.OpAdd32F,
opAndType{ir.OADD, types.TFLOAT64}: ssa.OpAdd64F,
opAndType{ir.OSUB, types.TINT8}: ssa.OpSub8,
opAndType{ir.OSUB, types.TUINT8}: ssa.OpSub8,
opAndType{ir.OSUB, types.TINT16}: ssa.OpSub16,
opAndType{ir.OSUB, types.TUINT16}: ssa.OpSub16,
opAndType{ir.OSUB, types.TINT32}: ssa.OpSub32,
opAndType{ir.OSUB, types.TUINT32}: ssa.OpSub32,
opAndType{ir.OSUB, types.TINT64}: ssa.OpSub64,
opAndType{ir.OSUB, types.TUINT64}: ssa.OpSub64,
opAndType{ir.OSUB, types.TFLOAT32}: ssa.OpSub32F,
opAndType{ir.OSUB, types.TFLOAT64}: ssa.OpSub64F,
opAndType{ir.ONOT, types.TBOOL}: ssa.OpNot,
opAndType{ir.ONEG, types.TINT8}: ssa.OpNeg8,
opAndType{ir.ONEG, types.TUINT8}: ssa.OpNeg8,
opAndType{ir.ONEG, types.TINT16}: ssa.OpNeg16,
opAndType{ir.ONEG, types.TUINT16}: ssa.OpNeg16,
opAndType{ir.ONEG, types.TINT32}: ssa.OpNeg32,
opAndType{ir.ONEG, types.TUINT32}: ssa.OpNeg32,
opAndType{ir.ONEG, types.TINT64}: ssa.OpNeg64,
opAndType{ir.ONEG, types.TUINT64}: ssa.OpNeg64,
opAndType{ir.ONEG, types.TFLOAT32}: ssa.OpNeg32F,
opAndType{ir.ONEG, types.TFLOAT64}: ssa.OpNeg64F,
opAndType{ir.OBITNOT, types.TINT8}: ssa.OpCom8,
opAndType{ir.OBITNOT, types.TUINT8}: ssa.OpCom8,
opAndType{ir.OBITNOT, types.TINT16}: ssa.OpCom16,
opAndType{ir.OBITNOT, types.TUINT16}: ssa.OpCom16,
opAndType{ir.OBITNOT, types.TINT32}: ssa.OpCom32,
opAndType{ir.OBITNOT, types.TUINT32}: ssa.OpCom32,
opAndType{ir.OBITNOT, types.TINT64}: ssa.OpCom64,
opAndType{ir.OBITNOT, types.TUINT64}: ssa.OpCom64,
opAndType{ir.OIMAG, types.TCOMPLEX64}: ssa.OpComplexImag,
opAndType{ir.OIMAG, types.TCOMPLEX128}: ssa.OpComplexImag,
opAndType{ir.OREAL, types.TCOMPLEX64}: ssa.OpComplexReal,
opAndType{ir.OREAL, types.TCOMPLEX128}: ssa.OpComplexReal,
opAndType{ir.OMUL, types.TINT8}: ssa.OpMul8,
opAndType{ir.OMUL, types.TUINT8}: ssa.OpMul8,
opAndType{ir.OMUL, types.TINT16}: ssa.OpMul16,
opAndType{ir.OMUL, types.TUINT16}: ssa.OpMul16,
opAndType{ir.OMUL, types.TINT32}: ssa.OpMul32,
opAndType{ir.OMUL, types.TUINT32}: ssa.OpMul32,
opAndType{ir.OMUL, types.TINT64}: ssa.OpMul64,
opAndType{ir.OMUL, types.TUINT64}: ssa.OpMul64,
opAndType{ir.OMUL, types.TFLOAT32}: ssa.OpMul32F,
opAndType{ir.OMUL, types.TFLOAT64}: ssa.OpMul64F,
opAndType{ir.ODIV, types.TFLOAT32}: ssa.OpDiv32F,
opAndType{ir.ODIV, types.TFLOAT64}: ssa.OpDiv64F,
opAndType{ir.ODIV, types.TINT8}: ssa.OpDiv8,
opAndType{ir.ODIV, types.TUINT8}: ssa.OpDiv8u,
opAndType{ir.ODIV, types.TINT16}: ssa.OpDiv16,
opAndType{ir.ODIV, types.TUINT16}: ssa.OpDiv16u,
opAndType{ir.ODIV, types.TINT32}: ssa.OpDiv32,
opAndType{ir.ODIV, types.TUINT32}: ssa.OpDiv32u,
opAndType{ir.ODIV, types.TINT64}: ssa.OpDiv64,
opAndType{ir.ODIV, types.TUINT64}: ssa.OpDiv64u,
opAndType{ir.OMOD, types.TINT8}: ssa.OpMod8,
opAndType{ir.OMOD, types.TUINT8}: ssa.OpMod8u,
opAndType{ir.OMOD, types.TINT16}: ssa.OpMod16,
opAndType{ir.OMOD, types.TUINT16}: ssa.OpMod16u,
opAndType{ir.OMOD, types.TINT32}: ssa.OpMod32,
opAndType{ir.OMOD, types.TUINT32}: ssa.OpMod32u,
opAndType{ir.OMOD, types.TINT64}: ssa.OpMod64,
opAndType{ir.OMOD, types.TUINT64}: ssa.OpMod64u,
opAndType{ir.OAND, types.TINT8}: ssa.OpAnd8,
opAndType{ir.OAND, types.TUINT8}: ssa.OpAnd8,
opAndType{ir.OAND, types.TINT16}: ssa.OpAnd16,
opAndType{ir.OAND, types.TUINT16}: ssa.OpAnd16,
opAndType{ir.OAND, types.TINT32}: ssa.OpAnd32,
opAndType{ir.OAND, types.TUINT32}: ssa.OpAnd32,
opAndType{ir.OAND, types.TINT64}: ssa.OpAnd64,
opAndType{ir.OAND, types.TUINT64}: ssa.OpAnd64,
opAndType{ir.OOR, types.TINT8}: ssa.OpOr8,
opAndType{ir.OOR, types.TUINT8}: ssa.OpOr8,
opAndType{ir.OOR, types.TINT16}: ssa.OpOr16,
opAndType{ir.OOR, types.TUINT16}: ssa.OpOr16,
opAndType{ir.OOR, types.TINT32}: ssa.OpOr32,
opAndType{ir.OOR, types.TUINT32}: ssa.OpOr32,
opAndType{ir.OOR, types.TINT64}: ssa.OpOr64,
opAndType{ir.OOR, types.TUINT64}: ssa.OpOr64,
opAndType{ir.OXOR, types.TINT8}: ssa.OpXor8,
opAndType{ir.OXOR, types.TUINT8}: ssa.OpXor8,
opAndType{ir.OXOR, types.TINT16}: ssa.OpXor16,
opAndType{ir.OXOR, types.TUINT16}: ssa.OpXor16,
opAndType{ir.OXOR, types.TINT32}: ssa.OpXor32,
opAndType{ir.OXOR, types.TUINT32}: ssa.OpXor32,
opAndType{ir.OXOR, types.TINT64}: ssa.OpXor64,
opAndType{ir.OXOR, types.TUINT64}: ssa.OpXor64,
opAndType{ir.OEQ, types.TBOOL}: ssa.OpEqB,
opAndType{ir.OEQ, types.TINT8}: ssa.OpEq8,
opAndType{ir.OEQ, types.TUINT8}: ssa.OpEq8,
opAndType{ir.OEQ, types.TINT16}: ssa.OpEq16,
opAndType{ir.OEQ, types.TUINT16}: ssa.OpEq16,
opAndType{ir.OEQ, types.TINT32}: ssa.OpEq32,
opAndType{ir.OEQ, types.TUINT32}: ssa.OpEq32,
opAndType{ir.OEQ, types.TINT64}: ssa.OpEq64,
opAndType{ir.OEQ, types.TUINT64}: ssa.OpEq64,
opAndType{ir.OEQ, types.TINTER}: ssa.OpEqInter,
opAndType{ir.OEQ, types.TSLICE}: ssa.OpEqSlice,
opAndType{ir.OEQ, types.TFUNC}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TMAP}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TCHAN}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TPTR}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TUINTPTR}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TUNSAFEPTR}: ssa.OpEqPtr,
opAndType{ir.OEQ, types.TFLOAT64}: ssa.OpEq64F,
opAndType{ir.OEQ, types.TFLOAT32}: ssa.OpEq32F,
opAndType{ir.ONE, types.TBOOL}: ssa.OpNeqB,
opAndType{ir.ONE, types.TINT8}: ssa.OpNeq8,
opAndType{ir.ONE, types.TUINT8}: ssa.OpNeq8,
opAndType{ir.ONE, types.TINT16}: ssa.OpNeq16,
opAndType{ir.ONE, types.TUINT16}: ssa.OpNeq16,
opAndType{ir.ONE, types.TINT32}: ssa.OpNeq32,
opAndType{ir.ONE, types.TUINT32}: ssa.OpNeq32,
opAndType{ir.ONE, types.TINT64}: ssa.OpNeq64,
opAndType{ir.ONE, types.TUINT64}: ssa.OpNeq64,
opAndType{ir.ONE, types.TINTER}: ssa.OpNeqInter,
opAndType{ir.ONE, types.TSLICE}: ssa.OpNeqSlice,
opAndType{ir.ONE, types.TFUNC}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TMAP}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TCHAN}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TPTR}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TUINTPTR}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TUNSAFEPTR}: ssa.OpNeqPtr,
opAndType{ir.ONE, types.TFLOAT64}: ssa.OpNeq64F,
opAndType{ir.ONE, types.TFLOAT32}: ssa.OpNeq32F,
opAndType{ir.OLT, types.TINT8}: ssa.OpLess8,
opAndType{ir.OLT, types.TUINT8}: ssa.OpLess8U,
opAndType{ir.OLT, types.TINT16}: ssa.OpLess16,
opAndType{ir.OLT, types.TUINT16}: ssa.OpLess16U,
opAndType{ir.OLT, types.TINT32}: ssa.OpLess32,
opAndType{ir.OLT, types.TUINT32}: ssa.OpLess32U,
opAndType{ir.OLT, types.TINT64}: ssa.OpLess64,
opAndType{ir.OLT, types.TUINT64}: ssa.OpLess64U,
opAndType{ir.OLT, types.TFLOAT64}: ssa.OpLess64F,
opAndType{ir.OLT, types.TFLOAT32}: ssa.OpLess32F,
opAndType{ir.OLE, types.TINT8}: ssa.OpLeq8,
opAndType{ir.OLE, types.TUINT8}: ssa.OpLeq8U,
opAndType{ir.OLE, types.TINT16}: ssa.OpLeq16,
opAndType{ir.OLE, types.TUINT16}: ssa.OpLeq16U,
opAndType{ir.OLE, types.TINT32}: ssa.OpLeq32,
opAndType{ir.OLE, types.TUINT32}: ssa.OpLeq32U,
opAndType{ir.OLE, types.TINT64}: ssa.OpLeq64,
opAndType{ir.OLE, types.TUINT64}: ssa.OpLeq64U,
opAndType{ir.OLE, types.TFLOAT64}: ssa.OpLeq64F,
opAndType{ir.OLE, types.TFLOAT32}: ssa.OpLeq32F,
}
func (s *state) concreteEtype(t *types.Type) types.Kind {
e := t.Kind()
switch e {
default:
return e
case types.TINT:
if s.config.PtrSize == 8 {
return types.TINT64
}
return types.TINT32
case types.TUINT:
if s.config.PtrSize == 8 {
return types.TUINT64
}
return types.TUINT32
case types.TUINTPTR:
if s.config.PtrSize == 8 {
return types.TUINT64
}
return types.TUINT32
}
}
func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op {
etype := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype}]
if !ok {
s.Fatalf("unhandled binary op %v %s", op, etype)
}
return x
}
type opAndTwoTypes struct {
op ir.Op
etype1 types.Kind
etype2 types.Kind
}
type twoTypes struct {
etype1 types.Kind
etype2 types.Kind
}
type twoOpsAndType struct {
op1 ssa.Op
op2 ssa.Op
intermediateType types.Kind
}
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
twoTypes{types.TINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, types.TINT32},
twoTypes{types.TINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, types.TINT32},
twoTypes{types.TINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, types.TINT32},
twoTypes{types.TINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, types.TINT64},
twoTypes{types.TINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, types.TINT32},
twoTypes{types.TINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, types.TINT32},
twoTypes{types.TINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, types.TINT32},
twoTypes{types.TINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, types.TINT64},
twoTypes{types.TFLOAT32, types.TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
twoTypes{types.TFLOAT32, types.TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
twoTypes{types.TFLOAT32, types.TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, types.TINT32},
twoTypes{types.TFLOAT32, types.TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, types.TINT64},
twoTypes{types.TFLOAT64, types.TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
twoTypes{types.TFLOAT64, types.TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
twoTypes{types.TFLOAT64, types.TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, types.TINT32},
twoTypes{types.TFLOAT64, types.TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, types.TINT64},
// unsigned
twoTypes{types.TUINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, types.TINT32},
twoTypes{types.TUINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, types.TINT32},
twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, types.TINT64}, // go wide to dodge unsigned
twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto32F, branchy code expansion instead
twoTypes{types.TUINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, types.TINT32},
twoTypes{types.TUINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, types.TINT32},
twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, types.TINT64}, // go wide to dodge unsigned
twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto64F, branchy code expansion instead
twoTypes{types.TFLOAT32, types.TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
twoTypes{types.TFLOAT32, types.TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead
twoTypes{types.TFLOAT64, types.TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
twoTypes{types.TFLOAT64, types.TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead
// float
twoTypes{types.TFLOAT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32},
twoTypes{types.TFLOAT64, types.TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, types.TFLOAT64},
twoTypes{types.TFLOAT32, types.TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, types.TFLOAT32},
twoTypes{types.TFLOAT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, types.TFLOAT64},
}
// this map is used only for 32-bit arch, and only includes the difference
// on 32-bit arch, don't use int64<->float conversion for uint32
var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, types.TUINT32},
twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, types.TUINT32},
twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, types.TUINT32},
twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, types.TUINT32},
}
// uint64<->float conversions, only on machines that have instructions for that
var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, types.TUINT64},
twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, types.TUINT64},
twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, types.TUINT64},
twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, types.TUINT64},
}
var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT8}: ssa.OpRsh8x8,
opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT8}: ssa.OpRsh8Ux8,
opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT16}: ssa.OpRsh8x16,
opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT16}: ssa.OpRsh8Ux16,
opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT32}: ssa.OpRsh8x32,
opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT32}: ssa.OpRsh8Ux32,
opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT64}: ssa.OpRsh8x64,
opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT64}: ssa.OpRsh8Ux64,
opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT8}: ssa.OpRsh16x8,
opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT8}: ssa.OpRsh16Ux8,
opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT16}: ssa.OpRsh16x16,
opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT16}: ssa.OpRsh16Ux16,
opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT32}: ssa.OpRsh16x32,
opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT32}: ssa.OpRsh16Ux32,
opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT64}: ssa.OpRsh16x64,
opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT64}: ssa.OpRsh16Ux64,
opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT8}: ssa.OpRsh32x8,
opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT8}: ssa.OpRsh32Ux8,
opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT16}: ssa.OpRsh32x16,
opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT16}: ssa.OpRsh32Ux16,
opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT32}: ssa.OpRsh32x32,
opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT32}: ssa.OpRsh32Ux32,
opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT64}: ssa.OpRsh32x64,
opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT64}: ssa.OpRsh32Ux64,
opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT8}: ssa.OpRsh64x8,
opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT8}: ssa.OpRsh64Ux8,
opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT16}: ssa.OpRsh64x16,
opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT16}: ssa.OpRsh64Ux16,
opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT32}: ssa.OpRsh64x32,
opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT32}: ssa.OpRsh64Ux32,
opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT64}: ssa.OpRsh64x64,
opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT64}: ssa.OpRsh64Ux64,
}
func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
etype1 := s.concreteEtype(t)
etype2 := s.concreteEtype(u)
x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
if !ok {
s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
}
return x
}
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
func (s *state) expr(n ir.Node) *ssa.Value {
if ir.HasUniquePos(n) {
// ONAMEs and named OLITERALs have the line number
// of the decl, not the use. See issue 14742.
s.pushLine(n.Pos())
defer s.popLine()
}
s.stmtList(n.Init())
switch n.Op() {
case ir.OBYTES2STRTMP:
n := n.(*ir.ConvExpr)
slice := s.expr(n.X)
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len)
case ir.OSTR2BYTESTMP:
n := n.(*ir.ConvExpr)
str := s.expr(n.X)
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
case ir.OCFUNC:
n := n.(*ir.UnaryExpr)
aux := n.X.(*ir.Name).Linksym()
// OCFUNC is used to build function values, which must
// always reference ABIInternal entry points.
if aux.ABI() != obj.ABIInternal {
s.Fatalf("expected ABIInternal: %v", aux.ABI())
}
return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb)
case ir.ONAME:
n := n.(*ir.Name)
if n.Class == ir.PFUNC {
// "value" of a function is the address of the function's closure
sym := staticdata.FuncLinksym(n)
return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
}
if s.canSSA(n) {
return s.variable(n, n.Type())
}
return s.load(n.Type(), s.addr(n))
case ir.OLINKSYMOFFSET:
n := n.(*ir.LinksymOffsetExpr)
return s.load(n.Type(), s.addr(n))
case ir.ONIL:
n := n.(*ir.NilExpr)
t := n.Type()
switch {
case t.IsSlice():
return s.constSlice(t)
case t.IsInterface():
return s.constInterface(t)
default:
return s.constNil(t)
}
case ir.OLITERAL:
switch u := n.Val(); u.Kind() {
case constant.Int:
i := ir.IntVal(n.Type(), u)
switch n.Type().Size() {
case 1:
return s.constInt8(n.Type(), int8(i))
case 2:
return s.constInt16(n.Type(), int16(i))
case 4:
return s.constInt32(n.Type(), int32(i))
case 8:
return s.constInt64(n.Type(), i)
default:
s.Fatalf("bad integer size %d", n.Type().Size())
return nil
}
case constant.String:
i := constant.StringVal(u)
if i == "" {
return s.constEmptyString(n.Type())
}
return s.entryNewValue0A(ssa.OpConstString, n.Type(), ssa.StringToAux(i))
case constant.Bool:
return s.constBool(constant.BoolVal(u))
case constant.Float:
f, _ := constant.Float64Val(u)
switch n.Type().Size() {
case 4:
return s.constFloat32(n.Type(), f)
case 8:
return s.constFloat64(n.Type(), f)
default:
s.Fatalf("bad float size %d", n.Type().Size())
return nil
}
case constant.Complex:
re, _ := constant.Float64Val(constant.Real(u))
im, _ := constant.Float64Val(constant.Imag(u))
switch n.Type().Size() {
case 8:
pt := types.Types[types.TFLOAT32]
return s.newValue2(ssa.OpComplexMake, n.Type(),
s.constFloat32(pt, re),
s.constFloat32(pt, im))
case 16:
pt := types.Types[types.TFLOAT64]
return s.newValue2(ssa.OpComplexMake, n.Type(),
s.constFloat64(pt, re),
s.constFloat64(pt, im))
default:
s.Fatalf("bad complex size %d", n.Type().Size())
return nil
}
default:
s.Fatalf("unhandled OLITERAL %v", u.Kind())
return nil
}
case ir.OCONVNOP:
n := n.(*ir.ConvExpr)
to := n.Type()
from := n.X.Type()
// Assume everything will work out, so set up our return value.
// Anything interesting that happens from here is a fatal.
x := s.expr(n.X)
if to == from {
return x
}
// Special case for not confusing GC and liveness.
// We don't want pointers accidentally classified
// as not-pointers or vice-versa because of copy
// elision.
if to.IsPtrShaped() != from.IsPtrShaped() {
return s.newValue2(ssa.OpConvert, to, x, s.mem())
}
v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
// CONVNOP closure
if to.Kind() == types.TFUNC && from.IsPtrShaped() {
return v
}
// named <--> unnamed type or typed <--> untyped const
if from.Kind() == to.Kind() {
return v
}
// unsafe.Pointer <--> *T
if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() {
return v
}
// map <--> *hmap
if to.Kind() == types.TMAP && from.IsPtr() &&
to.MapType().Hmap == from.Elem() {
return v
}
types.CalcSize(from)
types.CalcSize(to)
if from.Width != to.Width {
s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
return nil
}
if etypesign(from.Kind()) != etypesign(to.Kind()) {
s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Kind(), to, to.Kind())
return nil
}
if base.Flag.Cfg.Instrumenting {
// These appear to be fine, but they fail the
// integer constraint below, so okay them here.
// Sample non-integer conversion: map[string]string -> *uint8
return v
}
if etypesign(from.Kind()) == 0 {
s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
return nil
}
// integer, same width, same sign
return v
case ir.OCONV:
n := n.(*ir.ConvExpr)
x := s.expr(n.X)
ft := n.X.Type() // from type
tt := n.Type() // to type
if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
return s.newValue1(ssa.OpCopy, n.Type(), x)
}
if ft.IsInteger() && tt.IsInteger() {
var op ssa.Op
if tt.Size() == ft.Size() {
op = ssa.OpCopy
} else if tt.Size() < ft.Size() {
// truncation
switch 10*ft.Size() + tt.Size() {
case 21:
op = ssa.OpTrunc16to8
case 41:
op = ssa.OpTrunc32to8
case 42:
op = ssa.OpTrunc32to16
case 81:
op = ssa.OpTrunc64to8
case 82:
op = ssa.OpTrunc64to16
case 84:
op = ssa.OpTrunc64to32
default:
s.Fatalf("weird integer truncation %v -> %v", ft, tt)
}
} else if ft.IsSigned() {
// sign extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpSignExt8to16
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
}
} else {
// zero extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpZeroExt8to16
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
}
}
return s.newValue1(op, n.Type(), x)
}
if ft.IsFloat() || tt.IsFloat() {
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat {
if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
if Arch.LinkArch.Family == sys.MIPS && !s.softFloat {
if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
// tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
return s.uint32Tofloat32(n, x, ft, tt)
}
if tt.Size() == 8 {
return s.uint32Tofloat64(n, x, ft, tt)
}
} else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
// ft is float32 or float64, and tt is unsigned integer
if ft.Size() == 4 {
return s.float32ToUint32(n, x, ft, tt)
}
if ft.Size() == 8 {
return s.float64ToUint32(n, x, ft, tt)
}
}
}
if !ok {
s.Fatalf("weird float conversion %v -> %v", ft, tt)
}
op1, op2, it := conv.op1, conv.op2, conv.intermediateType
if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
// normal case, not tripping over unsigned 64
if op1 == ssa.OpCopy {
if op2 == ssa.OpCopy {
return x
}
return s.newValueOrSfCall1(op2, n.Type(), x)
}
if op2 == ssa.OpCopy {
return s.newValueOrSfCall1(op1, n.Type(), x)
}
return s.newValueOrSfCall1(op2, n.Type(), s.newValueOrSfCall1(op1, types.Types[it], x))
}
// Tricky 64-bit unsigned cases.
if ft.IsInteger() {
// tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
return s.uint64Tofloat32(n, x, ft, tt)
}
if tt.Size() == 8 {
return s.uint64Tofloat64(n, x, ft, tt)
}
s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
}
// ft is float32 or float64, and tt is unsigned integer
if ft.Size() == 4 {
return s.float32ToUint64(n, x, ft, tt)
}
if ft.Size() == 8 {
return s.float64ToUint64(n, x, ft, tt)
}
s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
return nil
}
if ft.IsComplex() && tt.IsComplex() {
var op ssa.Op
if ft.Size() == tt.Size() {
switch ft.Size() {
case 8:
op = ssa.OpRound32F
case 16:
op = ssa.OpRound64F
default:
s.Fatalf("weird complex conversion %v -> %v", ft, tt)
}
} else if ft.Size() == 8 && tt.Size() == 16 {
op = ssa.OpCvt32Fto64F
} else if ft.Size() == 16 && tt.Size() == 8 {
op = ssa.OpCvt64Fto32F
} else {
s.Fatalf("weird complex conversion %v -> %v", ft, tt)
}
ftp := types.FloatForComplex(ft)
ttp := types.FloatForComplex(tt)
return s.newValue2(ssa.OpComplexMake, tt,
s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
}
s.Fatalf("unhandled OCONV %s -> %s", n.X.Type().Kind(), n.Type().Kind())
return nil
case ir.ODOTTYPE:
n := n.(*ir.TypeAssertExpr)
res, _ := s.dottype(n, false)
return res
// binary ops
case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
if n.X.Type().IsComplex() {
pt := types.FloatForComplex(n.X.Type())
op := s.ssaOp(ir.OEQ, pt)
r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i)
switch n.Op() {
case ir.OEQ:
return c
case ir.ONE:
return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c)
default:
s.Fatalf("ordered complex compare %v", n.Op())
}
}
// Convert OGE and OGT into OLE and OLT.
op := n.Op()
switch op {
case ir.OGE:
op, a, b = ir.OLE, b, a
case ir.OGT:
op, a, b = ir.OLT, b, a
}
if n.X.Type().IsFloat() {
// float comparison
return s.newValueOrSfCall2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
}
// integer comparison
return s.newValue2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
case ir.OMUL:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
if n.Type().IsComplex() {
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
}
xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal))
if pt != wt { // Narrow to store back
xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
}
if n.Type().IsFloat() {
return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
case ir.ODIV:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
if n.Type().IsComplex() {
// TODO this is not executed because the front-end substitutes a runtime call.
// That probably ought to change; with modest optimization the widen/narrow
// conversions could all be elided in larger expression trees.
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
divop := ssa.OpDiv64F
pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
}
denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag))
xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag))
// TODO not sure if this is best done in wide precision or narrow
// Double-rounding might be an issue.
// Note that the pre-SSA implementation does the entire calculation
// in wide format, so wide is compatible.
xreal = s.newValueOrSfCall2(divop, wt, xreal, denom)
ximag = s.newValueOrSfCall2(divop, wt, ximag, denom)
if pt != wt { // Narrow to store back
xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
}
if n.Type().IsFloat() {
return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
return s.intDivide(n, a, b)
case ir.OMOD:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
return s.intDivide(n, a, b)
case ir.OADD, ir.OSUB:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
if n.Type().IsComplex() {
pt := types.FloatForComplex(n.Type())
op := s.ssaOp(n.Op(), pt)
return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
}
if n.Type().IsFloat() {
return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
case ir.OAND, ir.OOR, ir.OXOR:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
case ir.OANDNOT:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b)
return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b)
case ir.OLSH, ir.ORSH:
n := n.(*ir.BinaryExpr)
a := s.expr(n.X)
b := s.expr(n.Y)
bt := b.Type
if bt.IsSigned() {
cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b)
s.check(cmp, ir.Syms.Panicshift)
bt = bt.ToUnsigned()
}
return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b)
case ir.OANDAND, ir.OOROR:
// To implement OANDAND (and OOROR), we introduce a
// new temporary variable to hold the result. The
// variable is associated with the OANDAND node in the
// s.vars table (normally variables are only
// associated with ONAME nodes). We convert
// A && B
// to
// var = A
// if var {
// var = B
// }
// Using var in the subsequent block introduces the
// necessary phi variable.
n := n.(*ir.LogicalExpr)
el := s.expr(n.X)
s.vars[n] = el
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(el)
// In theory, we should set b.Likely here based on context.
// However, gc only gives us likeliness hints
// in a single place, for plain OIF statements,
// and passing around context is finnicky, so don't bother for now.
bRight := s.f.NewBlock(ssa.BlockPlain)
bResult := s.f.NewBlock(ssa.BlockPlain)
if n.Op() == ir.OANDAND {
b.AddEdgeTo(bRight)
b.AddEdgeTo(bResult)
} else if n.Op() == ir.OOROR {
b.AddEdgeTo(bResult)
b.AddEdgeTo(bRight)
}
s.startBlock(bRight)
er := s.expr(n.Y)
s.vars[n] = er
b = s.endBlock()
b.AddEdgeTo(bResult)
s.startBlock(bResult)
return s.variable(n, types.Types[types.TBOOL])
case ir.OCOMPLEX:
n := n.(*ir.BinaryExpr)
r := s.expr(n.X)
i := s.expr(n.Y)
return s.newValue2(ssa.OpComplexMake, n.Type(), r, i)
// unary ops
case ir.ONEG:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
if n.Type().IsComplex() {
tp := types.FloatForComplex(n.Type())
negop := s.ssaOp(n.Op(), tp)
return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
}
return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
case ir.ONOT, ir.OBITNOT:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
case ir.OIMAG, ir.OREAL:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
return s.newValue1(s.ssaOp(n.Op(), n.X.Type()), n.Type(), a)
case ir.OPLUS:
n := n.(*ir.UnaryExpr)
return s.expr(n.X)
case ir.OADDR:
n := n.(*ir.AddrExpr)
return s.addr(n.X)
case ir.ORESULT:
n := n.(*ir.ResultExpr)
if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
panic("Expected to see a previous call")
}
which := n.Index
if which == -1 {
panic(fmt.Errorf("ORESULT %v does not match call %s", n, s.prevCall))
}
return s.resultOfCall(s.prevCall, which, n.Type())
case ir.ODEREF:
n := n.(*ir.StarExpr)
p := s.exprPtr(n.X, n.Bounded(), n.Pos())
return s.load(n.Type(), p)
case ir.ODOT:
n := n.(*ir.SelectorExpr)
if n.X.Op() == ir.OSTRUCTLIT {
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !ir.IsZero(n.X) {
s.Fatalf("literal with nonzero value in SSA: %v", n.X)
}
return s.zeroVal(n.Type())
}
// If n is addressable and can't be represented in
// SSA, then load just the selected field. This
// prevents false memory dependencies in race/msan
// instrumentation.
if ir.IsAddressable(n) && !s.canSSA(n) {
p := s.addr(n)
return s.load(n.Type(), p)
}
v := s.expr(n.X)
return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v)
case ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
p := s.exprPtr(n.X, n.Bounded(), n.Pos())
p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p)
return s.load(n.Type(), p)
case ir.OINDEX:
n := n.(*ir.IndexExpr)
switch {
case n.X.Type().IsString():
if n.Bounded() && ir.IsConst(n.X, constant.String) && ir.IsConst(n.Index, constant.Int) {
// Replace "abc"[1] with 'b'.
// Delayed until now because "abc"[1] is not an ideal constant.
// See test/fixedbugs/issue11370.go.
return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.X)[ir.Int64Val(n.Index)])))
}
a := s.expr(n.X)
i := s.expr(n.Index)
len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
ptrtyp := s.f.Config.Types.BytePtr
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
if ir.IsConst(n.Index, constant.Int) {
ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Index), ptr)
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
return s.load(types.Types[types.TUINT8], ptr)
case n.X.Type().IsSlice():
p := s.addr(n)
return s.load(n.X.Type().Elem(), p)
case n.X.Type().IsArray():
if TypeOK(n.X.Type()) {
// SSA can handle arrays of length at most 1.
bound := n.X.Type().NumElem()
a := s.expr(n.X)
i := s.expr(n.Index)
if bound == 0 {
// Bounds check will never succeed. Might as well
// use constants for the bounds check.
z := s.constInt(types.Types[types.TINT], 0)
s.boundsCheck(z, z, ssa.BoundsIndex, false)
// The return value won't be live, return junk.
return s.newValue0(ssa.OpUnknown, n.Type())
}
len := s.constInt(types.Types[types.TINT], bound)
s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a)
}
p := s.addr(n)
return s.load(n.X.Type().Elem(), p)
default:
s.Fatalf("bad type for index %v", n.X.Type())
return nil
}
case ir.OLEN, ir.OCAP:
n := n.(*ir.UnaryExpr)
switch {
case n.X.Type().IsSlice():
op := ssa.OpSliceLen
if n.Op() == ir.OCAP {
op = ssa.OpSliceCap
}
return s.newValue1(op, types.Types[types.TINT], s.expr(n.X))
case n.X.Type().IsString(): // string; not reachable for OCAP
return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.X))
case n.X.Type().IsMap(), n.X.Type().IsChan():
return s.referenceTypeBuiltin(n, s.expr(n.X))
default: // array
return s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
}
case ir.OSPTR:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
if n.X.Type().IsSlice() {
return s.newValue1(ssa.OpSlicePtr, n.Type(), a)
} else {
return s.newValue1(ssa.OpStringPtr, n.Type(), a)
}
case ir.OITAB:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
return s.newValue1(ssa.OpITab, n.Type(), a)
case ir.OIDATA:
n := n.(*ir.UnaryExpr)
a := s.expr(n.X)
return s.newValue1(ssa.OpIData, n.Type(), a)
case ir.OEFACE:
n := n.(*ir.BinaryExpr)
tab := s.expr(n.X)
data := s.expr(n.Y)
return s.newValue2(ssa.OpIMake, n.Type(), tab, data)
case ir.OSLICEHEADER:
n := n.(*ir.SliceHeaderExpr)
p := s.expr(n.Ptr)
l := s.expr(n.Len)
c := s.expr(n.Cap)
return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
n := n.(*ir.SliceExpr)
v := s.expr(n.X)
var i, j, k *ssa.Value
if n.Low != nil {
i = s.expr(n.Low)
}
if n.High != nil {
j = s.expr(n.High)
}
if n.Max != nil {
k = s.expr(n.Max)
}
p, l, c := s.slice(v, i, j, k, n.Bounded())
return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
case ir.OSLICESTR:
n := n.(*ir.SliceExpr)
v := s.expr(n.X)
var i, j *ssa.Value
if n.Low != nil {
i = s.expr(n.Low)
}
if n.High != nil {
j = s.expr(n.High)
}
p, l, _ := s.slice(v, i, j, nil, n.Bounded())
return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
case ir.OSLICE2ARRPTR:
// if arrlen > slice.len {
// panic(...)
// }
// slice.ptr
n := n.(*ir.ConvExpr)
v := s.expr(n.X)
arrlen := s.constInt(types.Types[types.TINT], n.Type().Elem().NumElem())
cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false)
return s.newValue1(ssa.OpSlicePtrUnchecked, types.Types[types.TINT], v)
case ir.OCALLFUNC:
n := n.(*ir.CallExpr)
if ir.IsIntrinsicCall(n) {
return s.intrinsicCall(n)
}
fallthrough
case ir.OCALLINTER, ir.OCALLMETH:
n := n.(*ir.CallExpr)
return s.callResult(n, callNormal)
case ir.OGETG:
n := n.(*ir.CallExpr)
return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
case ir.OAPPEND:
return s.append(n.(*ir.CallExpr), false)
case ir.OSTRUCTLIT, ir.OARRAYLIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
n := n.(*ir.CompLitExpr)
if !ir.IsZero(n) {
s.Fatalf("literal with nonzero value in SSA: %v", n)
}
return s.zeroVal(n.Type())
case ir.ONEW:
n := n.(*ir.UnaryExpr)
return s.newObject(n.Type().Elem())
case ir.OUNSAFEADD:
n := n.(*ir.BinaryExpr)
ptr := s.expr(n.X)
len := s.expr(n.Y)
return s.newValue2(ssa.OpAddPtr, n.Type(), ptr, len)
default:
s.Fatalf("unhandled expr %v", n.Op())
return nil
}
}
func (s *state) resultOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
aux := c.Aux.(*ssa.AuxCall)
pa := aux.ParamAssignmentForResult(which)
// TODO(register args) determine if in-memory TypeOK is better loaded early from SelectNAddr or later when SelectN is expanded.
// SelectN is better for pattern-matching and possible call-aware analysis we might want to do in the future.
if len(pa.Registers) == 0 && !TypeOK(t) {
addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
return s.rawLoad(t, addr)
}
return s.newValue1I(ssa.OpSelectN, t, which, c)
}
func (s *state) resultAddrOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
aux := c.Aux.(*ssa.AuxCall)
pa := aux.ParamAssignmentForResult(which)
if len(pa.Registers) == 0 {
return s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
}
_, addr := s.temp(c.Pos, t)
rval := s.newValue1I(ssa.OpSelectN, t, which, c)
s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, addr, rval, s.mem(), false)
return addr
}
// append converts an OAPPEND node to SSA.
// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
// adds it to s, and returns the Value.
// If inplace is true, it writes the result of the OAPPEND expression n
// back to the slice being appended to, and returns nil.
// inplace MUST be set to false if the slice can be SSA'd.
func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value {
// If inplace is false, process as expression "append(s, e1, e2, e3)":
//
// ptr, len, cap := s
// newlen := len + 3
// if newlen > cap {
// ptr, len, cap = growslice(s, newlen)
// newlen = len + 3 // recalculate to avoid a spill
// }
// // with write barriers, if needed:
// *(ptr+len) = e1
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
// return makeslice(ptr, newlen, cap)
//
//
// If inplace is true, process as statement "s = append(s, e1, e2, e3)":
//
// a := &s
// ptr, len, cap := s
// newlen := len + 3
// if uint(newlen) > uint(cap) {
// newptr, len, newcap = growslice(ptr, len, cap, newlen)
// vardef(a) // if necessary, advise liveness we are writing a new a
// *a.cap = newcap // write before ptr to avoid a spill
// *a.ptr = newptr // with write barrier
// }
// newlen = len + 3 // recalculate to avoid a spill
// *a.len = newlen
// // with write barriers, if needed:
// *(ptr+len) = e1
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
et := n.Type().Elem()
pt := types.NewPtr(et)
// Evaluate slice
sn := n.Args[0] // the slice node is the first in the list
var slice, addr *ssa.Value
if inplace {
addr = s.addr(sn)
slice = s.load(n.Type(), addr)
} else {
slice = s.expr(sn)
}
// Allocate new blocks
grow := s.f.NewBlock(ssa.BlockPlain)
assign := s.f.NewBlock(ssa.BlockPlain)
// Decide if we need to grow
nargs := int64(len(n.Args) - 1)
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice)
nl := s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
cmp := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT]), types.Types[types.TBOOL], c, nl)
s.vars[ptrVar] = p
if !inplace {
s.vars[newlenVar] = nl
s.vars[capVar] = c
} else {
s.vars[lenVar] = l
}
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(cmp)
b.AddEdgeTo(grow)
b.AddEdgeTo(assign)
// Call growslice
s.startBlock(grow)
taddr := s.expr(n.X)
r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl)
if inplace {
if sn.Op() == ir.ONAME {
sn := sn.(*ir.Name)
if sn.Class != ir.PEXTERN {
// Tell liveness we're about to build a new slice
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
}
}
capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceCapOffset, addr)
s.store(types.Types[types.TINT], capaddr, r[2])
s.store(pt, addr, r[0])
// load the value we just stored to avoid having to spill it
s.vars[ptrVar] = s.load(pt, addr)
s.vars[lenVar] = r[1] // avoid a spill in the fast path
} else {
s.vars[ptrVar] = r[0]
s.vars[newlenVar] = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], r[1], s.constInt(types.Types[types.TINT], nargs))
s.vars[capVar] = r[2]
}
b = s.endBlock()
b.AddEdgeTo(assign)
// assign new elements to slots
s.startBlock(assign)
if inplace {
l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len
nl = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceLenOffset, addr)
s.store(types.Types[types.TINT], lenaddr, nl)
}
// Evaluate args
type argRec struct {
// if store is true, we're appending the value v. If false, we're appending the
// value at *v.
v *ssa.Value
store bool
}
args := make([]argRec, 0, nargs)
for _, n := range n.Args[1:] {
if TypeOK(n.Type()) {
args = append(args, argRec{v: s.expr(n), store: true})
} else {
v := s.addr(n)
args = append(args, argRec{v: v})
}
}
p = s.variable(ptrVar, pt) // generates phi for ptr
if !inplace {
nl = s.variable(newlenVar, types.Types[types.TINT]) // generates phi for nl
c = s.variable(capVar, types.Types[types.TINT]) // generates phi for cap
}
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
for i, arg := range args {
addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[types.TINT], int64(i)))
if arg.store {
s.storeType(et, addr, arg.v, 0, true)
} else {
s.move(et, addr, arg.v)
}
}
delete(s.vars, ptrVar)
if inplace {
delete(s.vars, lenVar)
return nil
}
delete(s.vars, newlenVar)
delete(s.vars, capVar)
// make result
return s.newValue3(ssa.OpSliceMake, n.Type(), p, nl, c)
}
// condBranch evaluates the boolean expression cond and branches to yes
// if cond is true and no if cond is false.
// This function is intended to handle && and || better than just calling
// s.expr(cond) and branching on the result.
func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
switch cond.Op() {
case ir.OANDAND:
cond := cond.(*ir.LogicalExpr)
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Init())
s.condBranch(cond.X, mid, no, max8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Y, yes, no, likely)
return
// Note: if likely==1, then both recursive calls pass 1.
// If likely==-1, then we don't have enough information to decide
// whether the first branch is likely or not. So we pass 0 for
// the likeliness of the first branch.
// TODO: have the frontend give us branch prediction hints for
// OANDAND and OOROR nodes (if it ever has such info).
case ir.OOROR:
cond := cond.(*ir.LogicalExpr)
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Init())
s.condBranch(cond.X, yes, mid, min8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Y, yes, no, likely)
return
// Note: if likely==-1, then both recursive calls pass -1.
// If likely==1, then we don't have enough info to decide
// the likelihood of the first branch.
case ir.ONOT:
cond := cond.(*ir.UnaryExpr)
s.stmtList(cond.Init())
s.condBranch(cond.X, no, yes, -likely)
return
case ir.OCONVNOP:
cond := cond.(*ir.ConvExpr)
s.stmtList(cond.Init())
s.condBranch(cond.X, yes, no, likely)
return
}
c := s.expr(cond)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(c)
b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
b.AddEdgeTo(yes)
b.AddEdgeTo(no)
}
type skipMask uint8
const (
skipPtr skipMask = 1 << iota
skipLen
skipCap
)
// assign does left = right.
// Right has already been evaluated to ssa, left has not.
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
// If deref is true and right == nil, just do left = 0.
// skip indicates assignments (at the top level) that can be avoided.
func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
if left.Op() == ir.ONAME && ir.IsBlank(left) {
return
}
t := left.Type()
types.CalcSize(t)
if s.canSSA(left) {
if deref {
s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
}
if left.Op() == ir.ODOT {
// We're assigning to a field of an ssa-able value.
// We need to build a new structure with the new value for the
// field we're assigning and the old values for the other fields.
// For instance:
// type T struct {a, b, c int}
// var T x
// x.b = 5
// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
// Grab information about the structure type.
left := left.(*ir.SelectorExpr)
t := left.X.Type()
nf := t.NumFields()
idx := fieldIdx(left)
// Grab old value of structure.
old := s.expr(left.X)
// Make new structure.
new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
// Add fields as args.
for i := 0; i < nf; i++ {
if i == idx {
new.AddArg(right)
} else {
new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
}
}
// Recursively assign the new value we've made to the base of the dot op.
s.assign(left.X, new, false, 0)
// TODO: do we need to update named values here?
return
}
if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).X.Type().IsArray() {
left := left.(*ir.IndexExpr)
s.pushLine(left.Pos())
defer s.popLine()
// We're assigning to an element of an ssa-able array.
// a[i] = v
t := left.X.Type()
n := t.NumElem()
i := s.expr(left.Index) // index
if n == 0 {
// The bounds check must fail. Might as well
// ignore the actual index and just use zeros.
z := s.constInt(types.Types[types.TINT], 0)
s.boundsCheck(z, z, ssa.BoundsIndex, false)
return
}
if n != 1 {
s.Fatalf("assigning to non-1-length array")
}
// Rewrite to a = [1]{v}
len := s.constInt(types.Types[types.TINT], 1)
s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
v := s.newValue1(ssa.OpArrayMake1, t, right)
s.assign(left.X, v, false, 0)
return
}
left := left.(*ir.Name)
// Update variable assignment.
s.vars[left] = right
s.addNamedValue(left, right)
return
}
// If this assignment clobbers an entire local variable, then emit
// OpVarDef so liveness analysis knows the variable is redefined.
if base, ok := clobberBase(left).(*ir.Name); ok && base.OnStack() && skip == 0 {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base))
}
// Left is not ssa-able. Compute its address.
addr := s.addr(left)
if ir.IsReflectHeaderDataField(left) {
// Package unsafe's documentation says storing pointers into
// reflect.SliceHeader and reflect.StringHeader's Data fields
// is valid, even though they have type uintptr (#19168).
// Mark it pointer type to signal the writebarrier pass to
// insert a write barrier.
t = types.Types[types.TUNSAFEPTR]
}
if deref {
// Treat as a mem->mem move.
if right == nil {
s.zero(t, addr)
} else {
s.move(t, addr, right)
}
return
}
// Treat as a store.
s.storeType(t, addr, right, skip, !ir.IsAutoTmp(left))
}
// zeroVal returns the zero value for type t.
func (s *state) zeroVal(t *types.Type) *ssa.Value {
switch {
case t.IsInteger():
switch t.Size() {
case 1:
return s.constInt8(t, 0)
case 2:
return s.constInt16(t, 0)
case 4:
return s.constInt32(t, 0)
case 8:
return s.constInt64(t, 0)
default:
s.Fatalf("bad sized integer type %v", t)
}
case t.IsFloat():
switch t.Size() {
case 4:
return s.constFloat32(t, 0)
case 8:
return s.constFloat64(t, 0)
default:
s.Fatalf("bad sized float type %v", t)
}
case t.IsComplex():
switch t.Size() {
case 8:
z := s.constFloat32(types.Types[types.TFLOAT32], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
case 16:
z := s.constFloat64(types.Types[types.TFLOAT64], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
default:
s.Fatalf("bad sized complex type %v", t)
}
case t.IsString():
return s.constEmptyString(t)
case t.IsPtrShaped():
return s.constNil(t)
case t.IsBoolean():
return s.constBool(false)
case t.IsInterface():
return s.constInterface(t)
case t.IsSlice():
return s.constSlice(t)
case t.IsStruct():
n := t.NumFields()
v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
for i := 0; i < n; i++ {
v.AddArg(s.zeroVal(t.FieldType(i)))
}
return v
case t.IsArray():
switch t.NumElem() {
case 0:
return s.entryNewValue0(ssa.OpArrayMake0, t)
case 1:
return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
}
}
s.Fatalf("zero for type %v not implemented", t)
return nil
}
type callKind int8
const (
callNormal callKind = iota
callDefer
callDeferStack
callGo
)
type sfRtCallDef struct {
rtfn *obj.LSym
rtype types.Kind
}
var softFloatOps map[ssa.Op]sfRtCallDef
func softfloatInit() {
// Some of these operations get transformed by sfcall.
softFloatOps = map[ssa.Op]sfRtCallDef{
ssa.OpAdd32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
ssa.OpAdd64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
ssa.OpSub32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
ssa.OpSub64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
ssa.OpMul32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul32"), types.TFLOAT32},
ssa.OpMul64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul64"), types.TFLOAT64},
ssa.OpDiv32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv32"), types.TFLOAT32},
ssa.OpDiv64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv64"), types.TFLOAT64},
ssa.OpEq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
ssa.OpEq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
ssa.OpNeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
ssa.OpNeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
ssa.OpLess64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt64"), types.TBOOL},
ssa.OpLess32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt32"), types.TBOOL},
ssa.OpLeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge64"), types.TBOOL},
ssa.OpLeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge32"), types.TBOOL},
ssa.OpCvt32to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to32"), types.TFLOAT32},
ssa.OpCvt32Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint32"), types.TINT32},
ssa.OpCvt64to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to32"), types.TFLOAT32},
ssa.OpCvt32Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint64"), types.TINT64},
ssa.OpCvt64Uto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to32"), types.TFLOAT32},
ssa.OpCvt32Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f32touint64"), types.TUINT64},
ssa.OpCvt32to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to64"), types.TFLOAT64},
ssa.OpCvt64Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint32"), types.TINT32},
ssa.OpCvt64to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to64"), types.TFLOAT64},
ssa.OpCvt64Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint64"), types.TINT64},
ssa.OpCvt64Uto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to64"), types.TFLOAT64},
ssa.OpCvt64Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f64touint64"), types.TUINT64},
ssa.OpCvt32Fto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("f32to64"), types.TFLOAT64},
ssa.OpCvt64Fto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("f64to32"), types.TFLOAT32},
}
}
// TODO: do not emit sfcall if operation can be optimized to constant in later
// opt phase
func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
if callDef, ok := softFloatOps[op]; ok {
switch op {
case ssa.OpLess32F,
ssa.OpLess64F,
ssa.OpLeq32F,
ssa.OpLeq64F:
args[0], args[1] = args[1], args[0]
case ssa.OpSub32F,
ssa.OpSub64F:
args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
}
result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0]
if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
result = s.newValue1(ssa.OpNot, result.Type, result)
}
return result, true
}
return nil, false
}
var intrinsics map[intrinsicKey]intrinsicBuilder
// An intrinsicBuilder converts a call node n into an ssa value that
// implements that call as an intrinsic. args is a list of arguments to the func.
type intrinsicBuilder func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value
type intrinsicKey struct {
arch *sys.Arch
pkg string
fn string
}
func InitTables() {
intrinsics = map[intrinsicKey]intrinsicBuilder{}
var all []*sys.Arch
var p4 []*sys.Arch
var p8 []*sys.Arch
var lwatomics []*sys.Arch
for _, a := range &sys.Archs {
all = append(all, a)
if a.PtrSize == 4 {
p4 = append(p4, a)
} else {
p8 = append(p8, a)
}
if a.Family != sys.PPC64 {
lwatomics = append(lwatomics, a)
}
}
// add adds the intrinsic b for pkg.fn for the given list of architectures.
add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
for _, a := range archs {
intrinsics[intrinsicKey{a, pkg, fn}] = b
}
}
// addF does the same as add but operates on architecture families.
addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
m := 0
for _, f := range archFamilies {
if f >= 32 {
panic("too many architecture families")
}
m |= 1 << uint(f)
}
for _, a := range all {
if m>>uint(a.Family)&1 != 0 {
intrinsics[intrinsicKey{a, pkg, fn}] = b
}
}
}
// alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
aliased := false
for _, a := range archs {
if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
intrinsics[intrinsicKey{a, pkg, fn}] = b
aliased = true
}
}
if !aliased {
panic(fmt.Sprintf("attempted to alias undefined intrinsic: %s.%s", pkg, fn))
}
}
/******** runtime ********/
if !base.Flag.Cfg.Instrumenting {
add("runtime", "slicebytetostringtmp",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
// Compiler frontend optimizations emit OBYTES2STRTMP nodes
// for the backend instead of slicebytetostringtmp calls
// when not instrumenting.
return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1])
},
all...)
}
addF("runtime/internal/math", "MulUintptr",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
}
return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
},
sys.AMD64, sys.I386, sys.MIPS64)
add("runtime", "KeepAlive",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
return nil
},
all...)
add("runtime", "getclosureptr",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallerpc",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallersp",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
},
all...)
/******** runtime/internal/sys ********/
addF("runtime/internal/sys", "Ctz32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Ctz64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Bswap32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
addF("runtime/internal/sys", "Bswap64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
/******** runtime/internal/atomic ********/
addF("runtime/internal/atomic", "Load",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StorepNoWB",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64)
addF("runtime/internal/atomic", "Xchg",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xchg64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind)
makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
// Target Atomic feature is identified by dynamic detection
addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARM64HasATOMICS, s.sb)
v := s.load(types.Types[types.TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely
// We have atomic instructions - use it directly.
s.startBlock(bTrue)
emit(s, n, args, op1, typ)
s.endBlock().AddEdgeTo(bEnd)
// Use original instruction sequence.
s.startBlock(bFalse)
emit(s, n, args, op0, typ)
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
if rtyp == types.TNIL {
return nil
} else {
return s.variable(n, types.Types[rtyp])
}
}
}
atomicXchgXaddEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
}
addF("runtime/internal/atomic", "Xchg",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xchg64",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xadd",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xadd64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xadd",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xadd64",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Cas",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Cas64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "CasRel",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.PPC64)
atomicCasEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
}
addF("runtime/internal/atomic", "Cas",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Cas64",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "And8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "And",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Or8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Or",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
atomicAndOrEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
}
addF("runtime/internal/atomic", "And8",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "And",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Or8",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Or",
makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
// Aliases for atomic load operations
alias("runtime/internal/atomic", "Loadint32", "runtime/internal/atomic", "Load", all...)
alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
// Aliases for atomic store operations
alias("runtime/internal/atomic", "Storeint32", "runtime/internal/atomic", "Store", all...)
alias("runtime/internal/atomic", "Storeint64", "runtime/internal/atomic", "Store64", all...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
// Aliases for atomic swap operations
alias("runtime/internal/atomic", "Xchgint32", "runtime/internal/atomic", "Xchg", all...)
alias("runtime/internal/atomic", "Xchgint64", "runtime/internal/atomic", "Xchg64", all...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
// Aliases for atomic add operations
alias("runtime/internal/atomic", "Xaddint32", "runtime/internal/atomic", "Xadd", all...)
alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
// Aliases for atomic CAS operations
alias("runtime/internal/atomic", "Casint32", "runtime/internal/atomic", "Cas", all...)
alias("runtime/internal/atomic", "Casint64", "runtime/internal/atomic", "Cas64", all...)
alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...)
/******** math ********/
addF("math", "Sqrt",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0])
},
sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
addF("math", "Trunc",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Ceil",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Floor",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Round",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X)
addF("math", "RoundToEven",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.S390X, sys.Wasm)
addF("math", "Abs",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm)
addF("math", "Copysign",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
},
sys.PPC64, sys.Wasm)
addF("math", "FMA",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
},
sys.ARM64, sys.PPC64, sys.S390X)
addF("math", "FMA",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
return s.variable(n, types.Types[types.TFLOAT64])
}
v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasFMA)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely // >= haswell cpus are common
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
s.startBlock(bFalse)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
return s.variable(n, types.Types[types.TFLOAT64])
},
sys.AMD64)
addF("math", "FMA",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
return s.variable(n, types.Types[types.TFLOAT64])
}
addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARMHasVFPv4, s.sb)
v := s.load(types.Types[types.TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
s.startBlock(bFalse)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
return s.variable(n, types.Types[types.TFLOAT64])
},
sys.ARM)
makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasSSE41)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
s.vars[n] = s.newValue1(op, types.Types[types.TFLOAT64], args[0])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
s.startBlock(bFalse)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
return s.variable(n, types.Types[types.TFLOAT64])
}
}
addF("math", "RoundToEven",
makeRoundAMD64(ssa.OpRoundToEven),
sys.AMD64)
addF("math", "Floor",
makeRoundAMD64(ssa.OpFloor),
sys.AMD64)
addF("math", "Ceil",
makeRoundAMD64(ssa.OpCeil),
sys.AMD64)
addF("math", "Trunc",
makeRoundAMD64(ssa.OpTrunc),
sys.AMD64)
/******** math/bits ********/
addF("math/bits", "TrailingZeros64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
c := s.constInt32(types.Types[types.TUINT32], 1<<16)
y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
},
sys.MIPS)
addF("math/bits", "TrailingZeros16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
c := s.constInt64(types.Types[types.TUINT64], 1<<16)
y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
},
sys.S390X, sys.PPC64)
addF("math/bits", "TrailingZeros8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
c := s.constInt32(types.Types[types.TUINT32], 1<<8)
y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
},
sys.MIPS)
addF("math/bits", "TrailingZeros8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
c := s.constInt64(types.Types[types.TUINT64], 1<<8)
y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
},
sys.S390X)
alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
// ReverseBytes inlines correctly, no need to intrinsify it.
// ReverseBytes16 lowers to a rotate, no need for anything special here.
addF("math/bits", "Len64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64)
addF("math/bits", "Len32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
}
x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
}
x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
// LeadingZeros is handled because it trivially calls Len.
addF("math/bits", "Reverse64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
}
return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "RotateLeft8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "RotateLeft64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasPOPCNT)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
op := op64
if s.config.PtrSize == 4 {
op = op32
}
s.vars[n] = s.newValue1(op, types.Types[types.TINT], args[0])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
s.startBlock(bFalse)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TINT]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
return s.variable(n, types.Types[types.TINT])
}
}
addF("math/bits", "OnesCount64",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
sys.AMD64)
addF("math/bits", "OnesCount64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
addF("math/bits", "OnesCount32",
makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "OnesCount32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
addF("math/bits", "OnesCount16",
makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
sys.AMD64)
addF("math/bits", "OnesCount16",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0])
},
sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "OnesCount8",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0])
},
sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "OnesCount",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "Mul64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64)
alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE)
alias("runtime/internal/math", "Mul64", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE)
addF("math/bits", "Add64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X)
addF("math/bits", "Sub64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.S390X)
alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X)
addF("math/bits", "Div64",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
// check for divide-by-zero/overflow and panic with appropriate message
cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
s.check(cmpZero, ir.Syms.Panicdivide)
cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2])
s.check(cmpOverflow, ir.Syms.Panicoverflow)
return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64)
alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
alias("runtime/internal/sys", "Ctz8", "math/bits", "TrailingZeros8", all...)
alias("runtime/internal/sys", "TrailingZeros8", "math/bits", "TrailingZeros8", all...)
alias("runtime/internal/sys", "TrailingZeros64", "math/bits", "TrailingZeros64", all...)
alias("runtime/internal/sys", "Len8", "math/bits", "Len8", all...)
alias("runtime/internal/sys", "Len64", "math/bits", "Len64", all...)
alias("runtime/internal/sys", "OnesCount64", "math/bits", "OnesCount64", all...)
/******** sync/atomic ********/
// Note: these are disabled by flag_race in findIntrinsic below.
alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
// Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap.
alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
/******** math/big ********/
add("math/big", "mulWW",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X)
}
// findIntrinsic returns a function which builds the SSA equivalent of the
// function identified by the symbol sym. If sym is not an intrinsic call, returns nil.
func findIntrinsic(sym *types.Sym) intrinsicBuilder {
if sym == nil || sym.Pkg == nil {
return nil
}
pkg := sym.Pkg.Path
if sym.Pkg == types.LocalPkg {
pkg = base.Ctxt.Pkgpath
}
if sym.Pkg == ir.Pkgs.Runtime {
pkg = "runtime"
}
if base.Flag.Race && pkg == "sync/atomic" {
// The race detector needs to be able to intercept these calls.
// We can't intrinsify them.
return nil
}
// Skip intrinsifying math functions (which may contain hard-float
// instructions) when soft-float
if Arch.SoftFloat && pkg == "math" {
return nil
}
fn := sym.Name
if ssa.IntrinsicsDisable {
if pkg == "runtime" && (fn == "getcallerpc" || fn == "getcallersp" || fn == "getclosureptr") {
// These runtime functions don't have definitions, must be intrinsics.
} else {
return nil
}
}
return intrinsics[intrinsicKey{Arch.LinkArch.Arch, pkg, fn}]
}
func IsIntrinsicCall(n *ir.CallExpr) bool {
if n == nil {
return false
}
name, ok := n.X.(*ir.Name)
if !ok {
return false
}
return findIntrinsic(name.Sym()) != nil
}
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
v := findIntrinsic(n.X.Sym())(s, n, s.intrinsicArgs(n))
if ssa.IntrinsicsDebug > 0 {
x := v
if x == nil {
x = s.mem()
}
if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
x = x.Args[0]
}
base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.X.Sym().Name, x.LongString())
}
return v
}
// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
args := make([]*ssa.Value, len(n.Args))
for i, n := range n.Args {
args[i] = s.expr(n)
}
return args
}
// openDeferRecord adds code to evaluate and store the args for an open-code defer
// call, and records info about the defer, so we can generate proper code on the
// exit paths. n is the sub-node of the defer node that is the actual function
// call. We will also record funcdata information on where the args are stored
// (as well as the deferBits variable), and this will enable us to run the proper
// defer calls during panics.
func (s *state) openDeferRecord(n *ir.CallExpr) {
var args []*ssa.Value
var argNodes []*ir.Name
if buildcfg.Experiment.RegabiDefer && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
s.Fatalf("defer call with arguments or results: %v", n)
}
opendefer := &openDeferInfo{
n: n,
}
fn := n.X
if n.Op() == ir.OCALLFUNC {
// We must always store the function value in a stack slot for the
// runtime panic code to use. But in the defer exit code, we will
// call the function directly if it is a static function.
closureVal := s.expr(fn)
closure := s.openDeferSave(nil, fn.Type(), closureVal)
opendefer.closureNode = closure.Aux.(*ir.Name)
if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
opendefer.closure = closure
}
} else if n.Op() == ir.OCALLMETH {
base.Fatalf("OCALLMETH missed by walkCall")
} else {
if fn.Op() != ir.ODOTINTER {
base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
}
fn := fn.(*ir.SelectorExpr)
closure, rcvr := s.getClosureAndRcvr(fn)
opendefer.closure = s.openDeferSave(nil, closure.Type, closure)
// Important to get the receiver type correct, so it is recognized
// as a pointer for GC purposes.
opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr)
opendefer.closureNode = opendefer.closure.Aux.(*ir.Name)
opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Name)
}
for _, argn := range n.Args {
var v *ssa.Value
if TypeOK(argn.Type()) {
v = s.openDeferSave(nil, argn.Type(), s.expr(argn))
} else {
v = s.openDeferSave(argn, argn.Type(), nil)
}
args = append(args, v)
argNodes = append(argNodes, v.Aux.(*ir.Name))
}
opendefer.argVals = args
opendefer.argNodes = argNodes
index := len(s.openDefers)
s.openDefers = append(s.openDefers, opendefer)
// Update deferBits only after evaluation and storage to stack of
// args/receiver/interface is successful.
bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
s.vars[deferBitsVar] = newDeferBits
s.store(types.Types[types.TUINT8], s.deferBitsAddr, newDeferBits)
}
// openDeferSave generates SSA nodes to store a value (with type t) for an
// open-coded defer at an explicit autotmp location on the stack, so it can be
// reloaded and used for the appropriate call on exit. If type t is SSAable, then
// val must be non-nil (and n should be nil) and val is the value to be stored. If
// type t is non-SSAable, then n must be non-nil (and val should be nil) and n is
// evaluated (via s.addr() below) to get the value that is to be stored. The
// function returns an SSA value representing a pointer to the autotmp location.
func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
canSSA := TypeOK(t)
var pos src.XPos
if canSSA {
pos = val.Pos
} else {
pos = n.Pos()
}
argTemp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
argTemp.SetOpenDeferSlot(true)
var addrArgTemp *ssa.Value
// Use OpVarLive to make sure stack slots for the args, etc. are not
// removed by dead-store elimination
if s.curBlock.ID != s.f.Entry.ID {
// Force the argtmp storing this defer function/receiver/arg to be
// declared in the entry block, so that it will be live for the
// defer exit code (which will actually access it only if the
// associated defer call has been activated).
s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
addrArgTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar])
} else {
// Special case if we're still in the entry block. We can't use
// the above code, since s.defvars[s.f.Entry.ID] isn't defined
// until we end the entry block with s.endBlock().
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.mem(), false)
}
if t.HasPointers() {
// Since we may use this argTemp during exit depending on the
// deferBits, we must define it unconditionally on entry.
// Therefore, we must make sure it is zeroed out in the entry
// block if it contains pointers, else GC may wrongly follow an
// uninitialized pointer value.
argTemp.SetNeedzero(true)
}
if !canSSA {
a := s.addr(n)
s.move(t, addrArgTemp, a)
return addrArgTemp
}
// We are storing to the stack, hence we can avoid the full checks in
// storeType() (no write barrier) and do a simple store().
s.store(t, addrArgTemp, val)
return addrArgTemp
}
// openDeferExit generates SSA for processing all the open coded defers at exit.
// The code involves loading deferBits, and checking each of the bits to see if
// the corresponding defer statement was executed. For each bit that is turned
// on, the associated defer call is made.
func (s *state) openDeferExit() {
deferExit := s.f.NewBlock(ssa.BlockPlain)
s.endBlock().AddEdgeTo(deferExit)
s.startBlock(deferExit)
s.lastDeferExit = deferExit
s.lastDeferCount = len(s.openDefers)
zeroval := s.constInt8(types.Types[types.TUINT8], 0)
// Test for and run defers in reverse order
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
bCond := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
deferBits := s.variable(deferBitsVar, types.Types[types.TUINT8])
// Generate code to check if the bit associated with the current
// defer is set.
bitval := s.constInt8(types.Types[types.TUINT8], 1<<uint(i))
andval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, bitval)
eqVal := s.newValue2(ssa.OpEq8, types.Types[types.TBOOL], andval, zeroval)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(eqVal)
b.AddEdgeTo(bEnd)
b.AddEdgeTo(bCond)
bCond.AddEdgeTo(bEnd)
s.startBlock(bCond)
// Clear this bit in deferBits and force store back to stack, so
// we will not try to re-run this defer call if this defer call panics.
nbitval := s.newValue1(ssa.OpCom8, types.Types[types.TUINT8], bitval)
maskedval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, nbitval)
s.store(types.Types[types.TUINT8], s.deferBitsAddr, maskedval)
// Use this value for following tests, so we keep previous
// bits cleared.
s.vars[deferBitsVar] = maskedval
// Generate code to call the function call of the defer, using the
// closure/receiver/args that were stored in argtmps at the point
// of the defer statement.
fn := r.n.X
stksize := fn.Type().ArgWidth()
var ACArgs []*types.Type
var ACResults []*types.Type
var callArgs []*ssa.Value
if r.rcvr != nil {
// rcvr in case of OCALLINTER
v := s.load(r.rcvr.Type.Elem(), r.rcvr)
ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
callArgs = append(callArgs, v)
}
for j, argAddrVal := range r.argVals {
f := getParam(r.n, j)
ACArgs = append(ACArgs, f.Type)
var a *ssa.Value
if !TypeOK(f.Type) {
a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem())
} else {
a = s.load(f.Type, argAddrVal)
}
callArgs = append(callArgs, a)
}
var call *ssa.Value
if r.closure != nil {
v := s.load(r.closure.Type.Elem(), r.closure)
s.maybeNilCheckClosure(v, callDefer)
codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
} else {
aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
}
callArgs = append(callArgs, s.mem())
call.AddArgs(callArgs...)
call.AuxInt = stksize
s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
// Make sure that the stack slots with pointers are kept live
// through the call (which is a pre-emption point). Also, we will
// use the first call of the last defer exit to compute liveness
// for the deferreturn, so we want all stack slots to be live.
if r.closureNode != nil {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
}
if r.rcvrNode != nil {
if r.rcvrNode.Type().HasPointers() {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
}
}
for _, argNode := range r.argNodes {
if argNode.Type().HasPointers() {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
}
}
s.endBlock()
s.startBlock(bEnd)
}
}
func (s *state) callResult(n *ir.CallExpr, k callKind) *ssa.Value {
return s.call(n, k, false)
}
func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value {
return s.call(n, k, true)
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Value {
s.prevCall = nil
var callee *ir.Name // target function (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
fn := n.X
var ACArgs []*types.Type // AuxCall args
var ACResults []*types.Type // AuxCall results
var callArgs []*ssa.Value // For late-expansion, the args themselves (not stored, args to the call instead).
callABI := s.f.ABIDefault
if !buildcfg.Experiment.RegabiArgs {
var magicFnNameSym *types.Sym
if fn.Name() != nil {
magicFnNameSym = fn.Name().Sym()
ss := magicFnNameSym.Name
if strings.HasSuffix(ss, magicNameDotSuffix) {
callABI = s.f.ABI1
}
}
if magicFnNameSym == nil && n.Op() == ir.OCALLINTER {
magicFnNameSym = fn.(*ir.SelectorExpr).Sym()
ss := magicFnNameSym.Name
if strings.HasSuffix(ss, magicNameDotSuffix[1:]) {
callABI = s.f.ABI1
}
}
}
if buildcfg.Experiment.RegabiDefer && k != callNormal && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
s.Fatalf("go/defer call with arguments: %v", n)
}
switch n.Op() {
case ir.OCALLFUNC:
if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
fn := fn.(*ir.Name)
callee = fn
if buildcfg.Experiment.RegabiArgs {
// This is a static call, so it may be
// a direct call to a non-ABIInternal
// function. fn.Func may be nil for
// some compiler-generated functions,
// but those are all ABIInternal.
if fn.Func != nil {
callABI = abiForFunc(fn.Func, s.f.ABI0, s.f.ABI1)
}
} else {
// TODO(register args) remove after register abi is working
inRegistersImported := fn.Pragma()&ir.RegisterParams != 0
inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0
if inRegistersImported || inRegistersSamePackage {
callABI = s.f.ABI1
}
}
break
}
closure = s.expr(fn)
if k != callDefer && k != callDeferStack {
// Deferred nil function needs to panic when the function is invoked,
// not the point of defer statement.
s.maybeNilCheckClosure(closure, k)
}
case ir.OCALLMETH:
base.Fatalf("OCALLMETH missed by walkCall")
case ir.OCALLINTER:
if fn.Op() != ir.ODOTINTER {
s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
}
fn := fn.(*ir.SelectorExpr)
var iclosure *ssa.Value
iclosure, rcvr = s.getClosureAndRcvr(fn)
if k == callNormal {
codeptr = s.load(types.Types[types.TUINTPTR], iclosure)
} else {
closure = iclosure
}
}
if !buildcfg.Experiment.RegabiArgs {
if regAbiForFuncType(n.X.Type().FuncType()) {
// Magic last type in input args to call
callABI = s.f.ABI1
}
}
params := callABI.ABIAnalyze(n.X.Type(), false /* Do not set (register) nNames from caller side -- can cause races. */)
types.CalcSize(fn.Type())
stksize := params.ArgWidth() // includes receiver, args, and results
res := n.X.Type().Results()
if k == callNormal {
for _, p := range params.OutParams() {
ACResults = append(ACResults, p.Type)
}
}
var call *ssa.Value
if k == callDeferStack {
// Make a defer struct d on the stack.
t := deferstruct(stksize)
d := typecheck.TempAt(n.Pos(), s.curfn, t)
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
addr := s.addr(d)
// Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer.
// 0: siz
s.store(types.Types[types.TUINT32],
s.newValue1I(ssa.OpOffPtr, types.Types[types.TUINT32].PtrTo(), t.FieldOff(0), addr),
s.constInt32(types.Types[types.TUINT32], int32(stksize)))
// 1: started, set in deferprocStack
// 2: heap, set in deferprocStack
// 3: openDefer
// 4: sp, set in deferprocStack
// 5: pc, set in deferprocStack
// 6: fn
s.store(closure.Type,
s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(6), addr),
closure)
// 7: panic, set in deferprocStack
// 8: link, set in deferprocStack
// 9: framepc
// 10: varp
// 11: fd
// Then, store all the arguments of the defer call.
ft := fn.Type()
off := t.FieldOff(12) // TODO register args: be sure this isn't a hardcoded param stack offset.
args := n.Args
// Set receiver (for interface calls). Always a pointer.
if rcvr != nil {
p := s.newValue1I(ssa.OpOffPtr, ft.Recv().Type.PtrTo(), off, addr)
s.store(types.Types[types.TUINTPTR], p, rcvr)
}
// Set receiver (for method calls).
if n.Op() == ir.OCALLMETH {
base.Fatalf("OCALLMETH missed by walkCall")
}
// Set other args.
for _, f := range ft.Params().Fields().Slice() {
s.storeArgWithBase(args[0], f.Type, addr, off+abi.FieldOffsetOf(f))
args = args[1:]
}
// Call runtime.deferprocStack with pointer to _defer record.
ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
callArgs = append(callArgs, addr, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
if stksize < int64(types.PtrSize) {
// We need room for both the call to deferprocStack and the call to
// the deferred function.
// TODO(register args) Revisit this if/when we pass args in registers.
stksize = int64(types.PtrSize)
}
call.AuxInt = stksize
} else {
// Store arguments to stack, including defer/go arguments and receiver for method calls.
// These are written in SP-offset order.
argStart := base.Ctxt.FixedFrameSize()
// Defer/go args.
if k != callNormal {
// Write argsize and closure (args to newproc/deferproc).
argsize := s.constInt32(types.Types[types.TUINT32], int32(stksize))
ACArgs = append(ACArgs, types.Types[types.TUINT32]) // not argExtra
callArgs = append(callArgs, argsize)
ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
callArgs = append(callArgs, closure)
stksize += 2 * int64(types.PtrSize)
argStart += 2 * int64(types.PtrSize)
}
// Set receiver (for interface calls).
if rcvr != nil {
callArgs = append(callArgs, rcvr)
}
// Write args.
t := n.X.Type()
args := n.Args
if n.Op() == ir.OCALLMETH {
base.Fatalf("OCALLMETH missed by walkCall")
}
for _, p := range params.InParams() { // includes receiver for interface calls
ACArgs = append(ACArgs, p.Type)
}
for i, n := range args {
callArgs = append(callArgs, s.putArg(n, t.Params().Field(i).Type))
}
callArgs = append(callArgs, s.mem())
// call target
switch {
case k == callDefer:
aux := ssa.StaticAuxCall(ir.Syms.Deferproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults)) // TODO paramResultInfo for DeferProc
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
case k == callGo:
aux := ssa.StaticAuxCall(ir.Syms.Newproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for NewProc
case closure != nil:
// rawLoad because loading the code pointer from a
// closure is always safe, but IsSanitizerSafeAddr
// can't always figure that out currently, and it's
// critical that we not clobber any arguments already
// stored onto the stack.
codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
aux := ssa.ClosureAuxCall(callABI.ABIAnalyzeTypes(nil, ACArgs, ACResults))
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
case codeptr != nil:
// Note that the "receiver" parameter is nil because the actual receiver is the first input parameter.
aux := ssa.InterfaceAuxCall(params)
call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
case callee != nil:
aux := ssa.StaticAuxCall(callTargetLSym(callee), params)
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
default:
s.Fatalf("bad call type %v %v", n.Op(), n)
}
call.AddArgs(callArgs...)
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
}
s.prevCall = call
s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
// Insert OVARLIVE nodes
for _, name := range n.KeepAlive {
s.stmt(ir.NewUnaryExpr(n.Pos(), ir.OVARLIVE, name))
}
// Finish block for defers
if k == callDefer || k == callDeferStack {
b := s.endBlock()
b.Kind = ssa.BlockDefer
b.SetControl(call)
bNext := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bNext)
// Add recover edge to exit code.
r := s.f.NewBlock(ssa.BlockPlain)
s.startBlock(r)
s.exit()
b.AddEdgeTo(r)
b.Likely = ssa.BranchLikely
s.startBlock(bNext)
}
if res.NumFields() == 0 || k != callNormal {
// call has no return value. Continue with the next statement.
return nil
}
fp := res.Field(0)
if returnResultAddr {
return s.resultAddrOfCall(call, 0, fp.Type)
}
return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
}
// maybeNilCheckClosure checks if a nil check of a closure is needed in some
// architecture-dependent situations and, if so, emits the nil check.
func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
if Arch.LinkArch.Family == sys.Wasm || buildcfg.GOOS == "aix" && k != callGo {
// On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
// TODO(neelance): On other architectures this should be eliminated by the optimization steps
s.nilCheck(closure)
}
}
// getClosureAndRcvr returns values for the appropriate closure and receiver of an
// interface call
func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) {
i := s.expr(fn.X)
itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
s.nilCheck(itab)
itabidx := fn.Offset() + 2*int64(types.PtrSize) + 8 // offset of fun field in runtime.itab
closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
return closure, rcvr
}
// etypesign returns the signed-ness of e, for integer/pointer etypes.
// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
func etypesign(e types.Kind) int8 {
switch e {
case types.TINT8, types.TINT16, types.TINT32, types.TINT64, types.TINT:
return -1
case types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINT, types.TUINTPTR, types.TUNSAFEPTR:
return +1
}
return 0
}
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
// The value that the returned Value represents is guaranteed to be non-nil.
func (s *state) addr(n ir.Node) *ssa.Value {
if n.Op() != ir.ONAME {
s.pushLine(n.Pos())
defer s.popLine()
}
if s.canSSA(n) {
s.Fatalf("addr of canSSA expression: %+v", n)
}
t := types.NewPtr(n.Type())
linksymOffset := func(lsym *obj.LSym, offset int64) *ssa.Value {
v := s.entryNewValue1A(ssa.OpAddr, t, lsym, s.sb)
// TODO: Make OpAddr use AuxInt as well as Aux.
if offset != 0 {
v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v)
}
return v
}
switch n.Op() {
case ir.OLINKSYMOFFSET:
no := n.(*ir.LinksymOffsetExpr)
return linksymOffset(no.Linksym, no.Offset_)
case ir.ONAME:
n := n.(*ir.Name)
if n.Heapaddr != nil {
return s.expr(n.Heapaddr)
}
switch n.Class {
case ir.PEXTERN:
// global variable
return linksymOffset(n.Linksym(), 0)
case ir.PPARAM:
// parameter slot
v := s.decladdrs[n]
if v != nil {
return v
}
s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
return nil
case ir.PAUTO:
return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n))
case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
// ensure that we reuse symbols for out parameters so
// that cse works on their addresses
return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
default:
s.Fatalf("variable address class %v not implemented", n.Class)
return nil
}
case ir.ORESULT:
// load return from callee
n := n.(*ir.ResultExpr)
return s.resultAddrOfCall(s.prevCall, n.Index, n.Type())
case ir.OINDEX:
n := n.(*ir.IndexExpr)
if n.X.Type().IsSlice() {
a := s.expr(n.X)
i := s.expr(n.Index)
len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
p := s.newValue1(ssa.OpSlicePtr, t, a)
return s.newValue2(ssa.OpPtrIndex, t, p, i)
} else { // array
a := s.addr(n.X)
i := s.expr(n.Index)
len := s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.X.Type().Elem()), a, i)
}
case ir.ODEREF:
n := n.(*ir.StarExpr)
return s.exprPtr(n.X, n.Bounded(), n.Pos())
case ir.ODOT:
n := n.(*ir.SelectorExpr)
p := s.addr(n.X)
return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
case ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
p := s.exprPtr(n.X, n.Bounded(), n.Pos())
return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
case ir.OCONVNOP:
n := n.(*ir.ConvExpr)
if n.Type() == n.X.Type() {
return s.addr(n.X)
}
addr := s.addr(n.X)
return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
case ir.OCALLFUNC, ir.OCALLINTER:
n := n.(*ir.CallExpr)
return s.callAddr(n, callNormal)
case ir.ODOTTYPE:
n := n.(*ir.TypeAssertExpr)
v, _ := s.dottype(n, false)
if v.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
if v.Args[1] != s.mem() {
s.Fatalf("memory no longer live from dottype load")
}
return v.Args[0]
default:
s.Fatalf("unhandled addr %v", n.Op())
return nil
}
}
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
func (s *state) canSSA(n ir.Node) bool {
if base.Flag.N != 0 {
return false
}
for {
nn := n
if nn.Op() == ir.ODOT {
nn := nn.(*ir.SelectorExpr)
n = nn.X
continue
}
if nn.Op() == ir.OINDEX {
nn := nn.(*ir.IndexExpr)
if nn.X.Type().IsArray() {
n = nn.X
continue
}
}
break
}
if n.Op() != ir.ONAME {
return false
}
return s.canSSAName(n.(*ir.Name)) && TypeOK(n.Type())
}
func (s *state) canSSAName(name *ir.Name) bool {
if name.Addrtaken() || !name.OnStack() {
return false
}
switch name.Class {
case ir.PPARAMOUT:
if s.hasdefer {
// TODO: handle this case? Named return values must be
// in memory so that the deferred function can see them.
// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
// Or maybe not, see issue 18860. Even unnamed return values
// must be written back so if a defer recovers, the caller can see them.
return false
}
if s.cgoUnsafeArgs {
// Cgo effectively takes the address of all result args,
// but the compiler can't see that.
return false
}
}
if name.Class == ir.PPARAM && name.Sym() != nil && name.Sym().Name == ".this" {
// wrappers generated by genwrapper need to update
// the .this pointer in place.
// TODO: treat as a PPARAMOUT?
return false
}
return true
// TODO: try to make more variables SSAable?
}
// TypeOK reports whether variables of type t are SSA-able.
func TypeOK(t *types.Type) bool {
types.CalcSize(t)
if t.Width > int64(4*types.PtrSize) {
// 4*Widthptr is an arbitrary constant. We want it
// to be at least 3*Widthptr so slices can be registerized.
// Too big and we'll introduce too much register pressure.
return false
}
switch t.Kind() {
case types.TARRAY:
// We can't do larger arrays because dynamic indexing is
// not supported on SSA variables.
// TODO: allow if all indexes are constant.
if t.NumElem() <= 1 {
return TypeOK(t.Elem())
}
return false
case types.TSTRUCT:
if t.NumFields() > ssa.MaxStruct {
return false
}
for _, t1 := range t.Fields().Slice() {
if !TypeOK(t1.Type) {
return false
}
}
return true
default:
return true
}
}
// exprPtr evaluates n to a pointer and nil-checks it.
func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
p := s.expr(n)
if bounded || n.NonNil() {
if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
s.f.Warnl(lineno, "removed nil check")
}
return p
}
s.nilCheck(p)
return p
}
// nilCheck generates nil pointer checking code.
// Used only for automatically inserted nil checks,
// not for user code like 'x != nil'.
func (s *state) nilCheck(ptr *ssa.Value) {
if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() {
return
}
s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
}
// boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
// Starts a new block on return.
// On input, len must be converted to full int width and be nonnegative.
// Returns idx converted to full int width.
// If bounded is true then caller guarantees the index is not out of bounds
// (but boundsCheck will still extend the index to full int width).
func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
idx = s.extendIndex(idx, len, kind, bounded)
if bounded || base.Flag.B != 0 {
// If bounded or bounds checking is flag-disabled, then no check necessary,
// just return the extended index.
//
// Here, bounded == true if the compiler generated the index itself,
// such as in the expansion of a slice initializer. These indexes are
// compiler-generated, not Go program variables, so they cannot be
// attacker-controlled, so we can omit Spectre masking as well.
//
// Note that we do not want to omit Spectre masking in code like:
//
// if 0 <= i && i < len(x) {
// use(x[i])
// }
//
// Lucky for us, bounded==false for that code.
// In that case (handled below), we emit a bound check (and Spectre mask)
// and then the prove pass will remove the bounds check.
// In theory the prove pass could potentially remove certain
// Spectre masks, but it's very delicate and probably better
// to be conservative and leave them all in.
return idx
}
bNext := s.f.NewBlock(ssa.BlockPlain)
bPanic := s.f.NewBlock(ssa.BlockExit)
if !idx.Type.IsSigned() {
switch kind {
case ssa.BoundsIndex:
kind = ssa.BoundsIndexU
case ssa.BoundsSliceAlen:
kind = ssa.BoundsSliceAlenU
case ssa.BoundsSliceAcap:
kind = ssa.BoundsSliceAcapU
case ssa.BoundsSliceB:
kind = ssa.BoundsSliceBU
case ssa.BoundsSlice3Alen:
kind = ssa.BoundsSlice3AlenU
case ssa.BoundsSlice3Acap:
kind = ssa.BoundsSlice3AcapU
case ssa.BoundsSlice3B:
kind = ssa.BoundsSlice3BU
case ssa.BoundsSlice3C:
kind = ssa.BoundsSlice3CU
}
}
var cmp *ssa.Value
if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
cmp = s.newValue2(ssa.OpIsInBounds, types.Types[types.TBOOL], idx, len)
} else {
cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[types.TBOOL], idx, len)
}
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
b.AddEdgeTo(bNext)
b.AddEdgeTo(bPanic)
s.startBlock(bPanic)
if Arch.LinkArch.Family == sys.Wasm {
// TODO(khr): figure out how to do "register" based calling convention for bounds checks.
// Should be similar to gcWriteBarrier, but I can't make it work.
s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len)
} else {
mem := s.newValue3I(ssa.OpPanicBounds, types.TypeMem, int64(kind), idx, len, s.mem())
s.endBlock().SetControl(mem)
}
s.startBlock(bNext)
// In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
if base.Flag.Cfg.SpectreIndex {
op := ssa.OpSpectreIndex
if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
op = ssa.OpSpectreSliceIndex
}
idx = s.newValue2(op, types.Types[types.TINT], idx, len)
}
return idx
}
// If cmp (a bool) is false, panic using the given function.
func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bNext := s.f.NewBlock(ssa.BlockPlain)
line := s.peekPos()
pos := base.Ctxt.PosTable.Pos(line)
fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
bPanic := s.panics[fl]
if bPanic == nil {
bPanic = s.f.NewBlock(ssa.BlockPlain)
s.panics[fl] = bPanic
s.startBlock(bPanic)
// The panic call takes/returns memory to ensure that the right
// memory state is observed if the panic happens.
s.rtcall(fn, false, nil)
}
b.AddEdgeTo(bNext)
b.AddEdgeTo(bPanic)
s.startBlock(bNext)
}
func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
needcheck := true
switch b.Op {
case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
if b.AuxInt != 0 {
needcheck = false
}
}
if needcheck {
// do a size-appropriate check for zero
cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type()))
s.check(cmp, ir.Syms.Panicdivide)
}
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
// rtcall issues a call to the given runtime function fn with the listed args.
// Returns a slice of results of the given result types.
// The call is added to the end of the current block.
// If returns is false, the block is marked as an exit block.
func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
s.prevCall = nil
// Write args to the stack
off := base.Ctxt.FixedFrameSize()
var callArgs []*ssa.Value
var callArgTypes []*types.Type
for _, arg := range args {
t := arg.Type
off = types.Rnd(off, t.Alignment())
size := t.Size()
callArgs = append(callArgs, arg)
callArgTypes = append(callArgTypes, t)
off += size
}
off = types.Rnd(off, int64(types.RegSize))
// Accumulate results types and offsets
offR := off
for _, t := range results {
offR = types.Rnd(offR, t.Alignment())
offR += t.Size()
}
// Issue call
var call *ssa.Value
aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(nil, callArgTypes, results))
callArgs = append(callArgs, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(results)), call)
if !returns {
// Finish block
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(call)
call.AuxInt = off - base.Ctxt.FixedFrameSize()
if len(results) > 0 {
s.Fatalf("panic call can't have results")
}
return nil
}
// Load results
res := make([]*ssa.Value, len(results))
for i, t := range results {
off = types.Rnd(off, t.Alignment())
res[i] = s.resultOfCall(call, int64(i), t)
off += t.Size()
}
off = types.Rnd(off, int64(types.PtrSize))
// Remember how much callee stack space we needed.
call.AuxInt = off
return res
}
// do *left = right for type t.
func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
s.instrument(t, left, instrumentWrite)
if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
// Known to not have write barrier. Store the whole type.
s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
return
}
// store scalar fields first, so write barrier stores for
// pointer fields can be grouped together, and scalar values
// don't need to be live across the write barrier call.
// TODO: if the writebarrier pass knows how to reorder stores,
// we can do a single store here as long as skip==0.
s.storeTypeScalars(t, left, right, skip)
if skip&skipPtr == 0 && t.HasPointers() {
s.storeTypePtrs(t, left, right)
}
}
// do *left = right for all scalar (non-pointer) parts of t.
func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
switch {
case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
s.store(t, left, right)
case t.IsPtrShaped():
if t.IsPtr() && t.Elem().NotInHeap() {
s.store(t, left, right) // see issue 42032
}
// otherwise, no scalar fields.
case t.IsString():
if skip&skipLen != 0 {
return
}
len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
s.store(types.Types[types.TINT], lenAddr, len)
case t.IsSlice():
if skip&skipLen == 0 {
len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
s.store(types.Types[types.TINT], lenAddr, len)
}
if skip&skipCap == 0 {
cap := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], right)
capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
s.store(types.Types[types.TINT], capAddr, cap)
}
case t.IsInterface():
// itab field doesn't need a write barrier (even though it is a pointer).
itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
s.store(types.Types[types.TUINTPTR], left, itab)
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypeScalars(ft, addr, val, 0)
}
case t.IsArray() && t.NumElem() == 0:
// nothing
case t.IsArray() && t.NumElem() == 1:
s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
default:
s.Fatalf("bad write barrier type %v", t)
}
}
// do *left = right for all pointer parts of t.
func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
if t.IsPtr() && t.Elem().NotInHeap() {
break // see issue 42032
}
s.store(t, left, right)
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
s.store(s.f.Config.Types.BytePtr, left, ptr)
case t.IsSlice():
elType := types.NewPtr(t.Elem())
ptr := s.newValue1(ssa.OpSlicePtr, elType, right)
s.store(elType, left, ptr)
case t.IsInterface():
// itab field is treated as a scalar.
idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
s.store(s.f.Config.Types.BytePtr, idataAddr, idata)
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
if !ft.HasPointers() {
continue
}
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypePtrs(ft, addr, val)
}
case t.IsArray() && t.NumElem() == 0:
// nothing
case t.IsArray() && t.NumElem() == 1:
s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
default:
s.Fatalf("bad write barrier type %v", t)
}
}
// putArg evaluates n for the purpose of passing it as an argument to a function and returns the value for the call.
func (s *state) putArg(n ir.Node, t *types.Type) *ssa.Value {
var a *ssa.Value
if !TypeOK(t) {
a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
} else {
a = s.expr(n)
}
return a
}
func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
pt := types.NewPtr(t)
var addr *ssa.Value
if base == s.sp {
// Use special routine that avoids allocation on duplicate offsets.
addr = s.constOffPtrSP(pt, off)
} else {
addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
}
if !TypeOK(t) {
a := s.addr(n)
s.move(t, addr, a)
return
}
a := s.expr(n)
s.storeType(t, addr, a, 0, false)
}
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
// i,j,k may be nil, in which case they are set to their default value.
// v may be a slice, string or pointer to an array.
func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) {
t := v.Type
var ptr, len, cap *ssa.Value
switch {
case t.IsSlice():
ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
len = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
cap = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], v)
case t.IsString():
ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[types.TUINT8]), v)
len = s.newValue1(ssa.OpStringLen, types.Types[types.TINT], v)
cap = len
case t.IsPtr():
if !t.Elem().IsArray() {
s.Fatalf("bad ptr to array in slice %v\n", t)
}
s.nilCheck(v)
ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
cap = len
default:
s.Fatalf("bad type in slice %v\n", t)
}
// Set default values
if i == nil {
i = s.constInt(types.Types[types.TINT], 0)
}
if j == nil {
j = len
}
three := true
if k == nil {
three = false
k = cap
}
// Panic if slice indices are not in bounds.
// Make sure we check these in reverse order so that we're always
// comparing against a value known to be nonnegative. See issue 28797.
if three {
if k != cap {
kind := ssa.BoundsSlice3Alen
if t.IsSlice() {
kind = ssa.BoundsSlice3Acap
}
k = s.boundsCheck(k, cap, kind, bounded)
}
if j != k {
j = s.boundsCheck(j, k, ssa.BoundsSlice3B, bounded)
}
i = s.boundsCheck(i, j, ssa.BoundsSlice3C, bounded)
} else {
if j != k {
kind := ssa.BoundsSliceAlen
if t.IsSlice() {
kind = ssa.BoundsSliceAcap
}
j = s.boundsCheck(j, k, kind, bounded)
}
i = s.boundsCheck(i, j, ssa.BoundsSliceB, bounded)
}
// Word-sized integer operations.
subOp := s.ssaOp(ir.OSUB, types.Types[types.TINT])
mulOp := s.ssaOp(ir.OMUL, types.Types[types.TINT])
andOp := s.ssaOp(ir.OAND, types.Types[types.TINT])
// Calculate the length (rlen) and capacity (rcap) of the new slice.
// For strings the capacity of the result is unimportant. However,
// we use rcap to test if we've generated a zero-length slice.
// Use length of strings for that.
rlen := s.newValue2(subOp, types.Types[types.TINT], j, i)
rcap := rlen
if j != k && !t.IsString() {
rcap = s.newValue2(subOp, types.Types[types.TINT], k, i)
}
if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
// No pointer arithmetic necessary.
return ptr, rlen, rcap
}
// Calculate the base pointer (rptr) for the new slice.
//
// Generate the following code assuming that indexes are in bounds.
// The masking is to make sure that we don't generate a slice
// that points to the next object in memory. We cannot just set
// the pointer to nil because then we would create a nil slice or
// string.
//
// rcap = k - i
// rlen = j - i
// rptr = ptr + (mask(rcap) & (i * stride))
//
// Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
// of the element type.
stride := s.constInt(types.Types[types.TINT], ptr.Type.Elem().Width)
// The delta is the number of bytes to offset ptr by.
delta := s.newValue2(mulOp, types.Types[types.TINT], i, stride)
// If we're slicing to the point where the capacity is zero,
// zero out the delta.
mask := s.newValue1(ssa.OpSlicemask, types.Types[types.TINT], rcap)
delta = s.newValue2(andOp, types.Types[types.TINT], delta, mask)
// Compute rptr = ptr + delta.
rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
return rptr, rlen, rcap
}
type u642fcvtTab struct {
leq, cvt2F, and, rsh, or, add ssa.Op
one func(*state, *types.Type, int64) *ssa.Value
}
var u64_f64 = u642fcvtTab{
leq: ssa.OpLeq64,
cvt2F: ssa.OpCvt64to64F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd64F,
one: (*state).constInt64,
}
var u64_f32 = u642fcvtTab{
leq: ssa.OpLeq64,
cvt2F: ssa.OpCvt64to32F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd32F,
one: (*state).constInt64,
}
func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
}
func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
}
func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = (floatY) x
// } else {
// y = uintX(x) ; y = x & 1
// z = uintX(x) ; z = z >> 1
// z = z >> 1
// z = z | y
// result = floatY(z)
// result = result + result
// }
//
// Code borrowed from old code generator.
// What's going on: large 64-bit "unsigned" looks like
// negative number to hardware's integer-to-float
// conversion. However, because the mantissa is only
// 63 bits, we don't need the LSB, so instead we do an
// unsigned right shift (divide by two), convert, and
// double. However, before we do that, we need to be
// sure that we do not lose a "1" if that made the
// difference in the resulting rounding. Therefore, we
// preserve it, and OR (not ADD) it back in. The case
// that matters is when the eleven discarded bits are
// equal to 10000000001; that rounds up, and the 1 cannot
// be lost else it would round down if the LSB of the
// candidate mantissa is 0.
cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2F, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
one := cvttab.one(s, ft, 1)
y := s.newValue2(cvttab.and, ft, x, one)
z := s.newValue2(cvttab.rsh, ft, x, one)
z = s.newValue2(cvttab.or, ft, z, y)
a := s.newValue1(cvttab.cvt2F, tt, z)
a1 := s.newValue2(cvttab.add, tt, a, a)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type())
}
type u322fcvtTab struct {
cvtI2F, cvtF2F ssa.Op
}
var u32_f64 = u322fcvtTab{
cvtI2F: ssa.OpCvt32to64F,
cvtF2F: ssa.OpCopy,
}
var u32_f32 = u322fcvtTab{
cvtI2F: ssa.OpCvt32to32F,
cvtF2F: ssa.OpCvt64Fto32F,
}
func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
}
func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
}
func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = floatY(x)
// } else {
// result = floatY(float64(x) + (1<<32))
// }
cmp := s.newValue2(ssa.OpLeq32, types.Types[types.TBOOL], s.zeroVal(ft), x)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvtI2F, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[types.TFLOAT64], x)
twoToThe32 := s.constFloat64(types.Types[types.TFLOAT64], float64(1<<32))
a2 := s.newValue2(ssa.OpAdd64F, types.Types[types.TFLOAT64], a1, twoToThe32)
a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
s.vars[n] = a3
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type())
}
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
if !n.X.Type().IsMap() && !n.X.Type().IsChan() {
s.Fatalf("node must be a map or a channel")
}
// if n == nil {
// return 0
// } else {
// // len
// return *((*int)n)
// // cap
// return *(((*int)n)+1)
// }
lenType := n.Type()
nilValue := s.constNil(types.Types[types.TUINTPTR])
cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchUnlikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
// length/capacity of a nil map/chan is zero
b.AddEdgeTo(bThen)
s.startBlock(bThen)
s.vars[n] = s.zeroVal(lenType)
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
switch n.Op() {
case ir.OLEN:
// length is stored in the first word for map/chan
s.vars[n] = s.load(lenType, x)
case ir.OCAP:
// capacity is stored in the second word for chan
sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
s.vars[n] = s.load(lenType, sw)
default:
s.Fatalf("op must be OLEN or OCAP")
}
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, lenType)
}
type f2uCvtTab struct {
ltf, cvt2U, subf, or ssa.Op
floatValue func(*state, *types.Type, float64) *ssa.Value
intValue func(*state, *types.Type, int64) *ssa.Value
cutoff uint64
}
var f32_u64 = f2uCvtTab{
ltf: ssa.OpLess32F,
cvt2U: ssa.OpCvt32Fto64,
subf: ssa.OpSub32F,
or: ssa.OpOr64,
floatValue: (*state).constFloat32,
intValue: (*state).constInt64,
cutoff: 1 << 63,
}
var f64_u64 = f2uCvtTab{
ltf: ssa.OpLess64F,
cvt2U: ssa.OpCvt64Fto64,
subf: ssa.OpSub64F,
or: ssa.OpOr64,
floatValue: (*state).constFloat64,
intValue: (*state).constInt64,
cutoff: 1 << 63,
}
var f32_u32 = f2uCvtTab{
ltf: ssa.OpLess32F,
cvt2U: ssa.OpCvt32Fto32,
subf: ssa.OpSub32F,
or: ssa.OpOr32,
floatValue: (*state).constFloat32,
intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
cutoff: 1 << 31,
}
var f64_u32 = f2uCvtTab{
ltf: ssa.OpLess64F,
cvt2U: ssa.OpCvt64Fto32,
subf: ssa.OpSub64F,
or: ssa.OpOr32,
floatValue: (*state).constFloat64,
intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
cutoff: 1 << 31,
}
func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u64, n, x, ft, tt)
}
func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u64, n, x, ft, tt)
}
func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u32, n, x, ft, tt)
}
func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u32, n, x, ft, tt)
}
func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// cutoff:=1<<(intY_Size-1)
// if x < floatX(cutoff) {
// result = uintY(x)
// } else {
// y = x - floatX(cutoff)
// z = uintY(y)
// result = z | -(cutoff)
// }
cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2U, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
y := s.newValue2(cvttab.subf, ft, x, cutoff)
y = s.newValue1(cvttab.cvt2U, tt, y)
z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
a1 := s.newValue2(cvttab.or, tt, y, z)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type())
}
// dottype generates SSA for a type assertion node.
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.X) // input interface
target := s.reflectType(n.Type()) // target type
byteptr := s.f.Config.Types.BytePtr
if n.Type().IsInterface() {
if n.Type().IsEmptyInterface() {
// Converting to an empty interface.
// Input could be an empty or nonempty interface.
if base.Debug.TypeAssert > 0 {
base.WarnfAt(n.Pos(), "type assertion inlined")
}
// Get itab/type field from input.
itab := s.newValue1(ssa.OpITab, byteptr, iface)
// Conversion succeeds iff that field is not nil.
cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
if n.X.Type().IsEmptyInterface() && commaok {
// Converting empty interface to empty interface with ,ok is just a nil check.
return iface, cond
}
// Branch on nilness.
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
b.Likely = ssa.BranchLikely
bOk := s.f.NewBlock(ssa.BlockPlain)
bFail := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bOk)
b.AddEdgeTo(bFail)
if !commaok {
// On failure, panic by calling panicnildottype.
s.startBlock(bFail)
s.rtcall(ir.Syms.Panicnildottype, false, nil, target)
// On success, return (perhaps modified) input interface.
s.startBlock(bOk)
if n.X.Type().IsEmptyInterface() {
res = iface // Use input interface unchanged.
return
}
// Load type out of itab, build interface with existing idata.
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
typ := s.load(byteptr, off)
idata := s.newValue1(ssa.OpIData, byteptr, iface)
res = s.newValue2(ssa.OpIMake, n.Type(), typ, idata)
return
}
s.startBlock(bOk)
// nonempty -> empty
// Need to load type from itab
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
s.vars[typVar] = s.load(byteptr, off)
s.endBlock()
// itab is nil, might as well use that as the nil result.
s.startBlock(bFail)
s.vars[typVar] = itab
s.endBlock()
// Merge point.
bEnd := s.f.NewBlock(ssa.BlockPlain)
bOk.AddEdgeTo(bEnd)
bFail.AddEdgeTo(bEnd)
s.startBlock(bEnd)
idata := s.newValue1(ssa.OpIData, byteptr, iface)
res = s.newValue2(ssa.OpIMake, n.Type(), s.variable(typVar, byteptr), idata)
resok = cond
delete(s.vars, typVar)
return
}
// converting to a nonempty interface needs a runtime call.
if base.Debug.TypeAssert > 0 {
base.WarnfAt(n.Pos(), "type assertion not inlined")
}
if !commaok {
fn := ir.Syms.AssertI2I
if n.X.Type().IsEmptyInterface() {
fn = ir.Syms.AssertE2I
}
data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface)
tab := s.newValue1(ssa.OpITab, byteptr, iface)
tab = s.rtcall(fn, true, []*types.Type{byteptr}, target, tab)[0]
return s.newValue2(ssa.OpIMake, n.Type(), tab, data), nil
}
fn := ir.Syms.AssertI2I2
if n.X.Type().IsEmptyInterface() {
fn = ir.Syms.AssertE2I2
}
res = s.rtcall(fn, true, []*types.Type{n.Type()}, target, iface)[0]
resok = s.newValue2(ssa.OpNeqInter, types.Types[types.TBOOL], res, s.constInterface(n.Type()))
return
}
if base.Debug.TypeAssert > 0 {
base.WarnfAt(n.Pos(), "type assertion inlined")
}
// Converting to a concrete type.
direct := types.IsDirectIface(n.Type())
itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
if base.Debug.TypeAssert > 0 {
base.WarnfAt(n.Pos(), "type assertion inlined")
}
var targetITab *ssa.Value
if n.X.Type().IsEmptyInterface() {
// Looking for pointer to target type.
targetITab = target
} else {
// Looking for pointer to itab for target type and source interface.
targetITab = s.expr(n.Itab)
}
var tmp ir.Node // temporary for use with large types
var addr *ssa.Value // address of tmp
if commaok && !TypeOK(n.Type()) {
// unSSAable type, use temporary.
// TODO: get rid of some of these temporaries.
tmp, addr = s.temp(n.Pos(), n.Type())
}
cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, targetITab)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
b.Likely = ssa.BranchLikely
bOk := s.f.NewBlock(ssa.BlockPlain)
bFail := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bOk)
b.AddEdgeTo(bFail)
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
taddr := s.reflectType(n.X.Type())
if n.X.Type().IsEmptyInterface() {
s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
} else {
s.rtcall(ir.Syms.PanicdottypeI, false, nil, itab, target, taddr)
}
// on success, return data from interface
s.startBlock(bOk)
if direct {
return s.newValue1(ssa.OpIData, n.Type(), iface), nil
}
p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
return s.load(n.Type(), p), nil
}
// commaok is the more complicated case because we have
// a control flow merge point.
bEnd := s.f.NewBlock(ssa.BlockPlain)
// Note that we need a new valVar each time (unlike okVar where we can
// reuse the variable) because it might have a different type every time.
valVar := ssaMarker("val")
// type assertion succeeded
s.startBlock(bOk)
if tmp == nil {
if direct {
s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type(), iface)
} else {
p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
s.vars[valVar] = s.load(n.Type(), p)
}
} else {
p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
s.move(n.Type(), addr, p)
}
s.vars[okVar] = s.constBool(true)
s.endBlock()
bOk.AddEdgeTo(bEnd)
// type assertion failed
s.startBlock(bFail)
if tmp == nil {
s.vars[valVar] = s.zeroVal(n.Type())
} else {
s.zero(n.Type(), addr)
}
s.vars[okVar] = s.constBool(false)
s.endBlock()
bFail.AddEdgeTo(bEnd)
// merge point
s.startBlock(bEnd)
if tmp == nil {
res = s.variable(valVar, n.Type())
delete(s.vars, valVar)
} else {
res = s.load(n.Type(), addr)
s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp.(*ir.Name), s.mem())
}
resok = s.variable(okVar, types.Types[types.TBOOL])
delete(s.vars, okVar)
return res, resok
}
// temp allocates a temp of type t at position pos
func (s *state) temp(pos src.XPos, t *types.Type) (*ir.Name, *ssa.Value) {
tmp := typecheck.TempAt(pos, s.curfn, t)
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
addr := s.addr(tmp)
return tmp, addr
}
// variable returns the value of a variable at the current location.
func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value {
v := s.vars[n]
if v != nil {
return v
}
v = s.fwdVars[n]
if v != nil {
return v
}
if s.curBlock == s.f.Entry {
// No variable should be live at entry.
s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, n, v)
}
// Make a FwdRef, which records a value that's live on block input.
// We'll find the matching definition as part of insertPhis.
v = s.newValue0A(ssa.OpFwdRef, t, fwdRefAux{N: n})
s.fwdVars[n] = v
if n.Op() == ir.ONAME {
s.addNamedValue(n.(*ir.Name), v)
}
return v
}
func (s *state) mem() *ssa.Value {
return s.variable(memVar, types.TypeMem)
}
func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) {
if n.Class == ir.Pxxx {
// Don't track our marker nodes (memVar etc.).
return
}
if ir.IsAutoTmp(n) {
// Don't track temporary variables.
return
}
if n.Class == ir.PPARAMOUT {
// Don't track named output values. This prevents return values
// from being assigned too early. See #14591 and #14762. TODO: allow this.
return
}
loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0}
values, ok := s.f.NamedValues[loc]
if !ok {
s.f.Names = append(s.f.Names, &loc)
s.f.CanonicalLocalSlots[loc] = &loc
}
s.f.NamedValues[loc] = append(values, v)
}
// Branch is an unresolved branch.
type Branch struct {
P *obj.Prog // branch instruction
B *ssa.Block // target
}
// State contains state needed during Prog generation.
type State struct {
ABI obj.ABI
pp *objw.Progs
// Branches remembers all the branch instructions we've seen
// and where they would like to go.
Branches []Branch
// bstart remembers where each block starts (indexed by block ID)
bstart []*obj.Prog
maxarg int64 // largest frame size for arguments to calls made by the function
// Map from GC safe points to liveness index, generated by
// liveness analysis.
livenessMap liveness.Map
// partLiveArgs includes arguments that may be partially live, for which we
// need to generate instructions that spill the argument registers.
partLiveArgs map[*ir.Name]bool
// lineRunStart records the beginning of the current run of instructions
// within a single block sharing the same line number
// Used to move statement marks to the beginning of such runs.
lineRunStart *obj.Prog
// wasm: The number of values on the WebAssembly stack. This is only used as a safeguard.
OnWasmStackSkipped int
}
func (s *State) FuncInfo() *obj.FuncInfo {
return s.pp.CurFunc.LSym.Func()
}
// Prog appends a new Prog.
func (s *State) Prog(as obj.As) *obj.Prog {
p := s.pp.Prog(as)
if objw.LosesStmtMark(as) {
return p
}
// Float a statement start to the beginning of any same-line run.
// lineRunStart is reset at block boundaries, which appears to work well.
if s.lineRunStart == nil || s.lineRunStart.Pos.Line() != p.Pos.Line() {
s.lineRunStart = p
} else if p.Pos.IsStmt() == src.PosIsStmt {
s.lineRunStart.Pos = s.lineRunStart.Pos.WithIsStmt()
p.Pos = p.Pos.WithNotStmt()
}
return p
}
// Pc returns the current Prog.
func (s *State) Pc() *obj.Prog {
return s.pp.Next
}
// SetPos sets the current source position.
func (s *State) SetPos(pos src.XPos) {
s.pp.Pos = pos
}
// Br emits a single branch instruction and returns the instruction.
// Not all architectures need the returned instruction, but otherwise
// the boilerplate is common to all.
func (s *State) Br(op obj.As, target *ssa.Block) *obj.Prog {
p := s.Prog(op)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, Branch{P: p, B: target})
return p
}
// DebugFriendlySetPosFrom adjusts Pos.IsStmt subject to heuristics
// that reduce "jumpy" line number churn when debugging.
// Spill/fill/copy instructions from the register allocator,
// phi functions, and instructions with a no-pos position
// are examples of instructions that can cause churn.
func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
switch v.Op {
case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
// These are not statements
s.SetPos(v.Pos.WithNotStmt())
default:
p := v.Pos
if p != src.NoXPos {
// If the position is defined, update the position.
// Also convert default IsStmt to NotStmt; only
// explicit statement boundaries should appear
// in the generated code.
if p.IsStmt() != src.PosIsStmt {
p = p.WithNotStmt()
// Calls use the pos attached to v, but copy the statement mark from State
}
s.SetPos(p)
} else {
s.SetPos(s.pp.Pos.WithNotStmt())
}
}
}
// emit argument info (locations on stack) for traceback.
func emitArgInfo(e *ssafn, f *ssa.Func, pp *objw.Progs) {
ft := e.curfn.Type()
if ft.NumRecvs() == 0 && ft.NumParams() == 0 {
return
}
x := EmitArgInfo(e.curfn, f.OwnAux.ABIInfo())
e.curfn.LSym.Func().ArgInfo = x
// Emit a funcdata pointing at the arg info data.
p := pp.Prog(obj.AFUNCDATA)
p.From.SetConst(objabi.FUNCDATA_ArgInfo)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = x
}
// emit argument info (locations on stack) of f for traceback.
func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym {
x := base.Ctxt.Lookup(fmt.Sprintf("%s.arginfo%d", f.LSym.Name, f.ABI))
PtrSize := int64(types.PtrSize)
isAggregate := func(t *types.Type) bool {
return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice()
}
// Populate the data.
// The data is a stream of bytes, which contains the offsets and sizes of the
// non-aggregate arguments or non-aggregate fields/elements of aggregate-typed
// arguments, along with special "operators". Specifically,
// - for each non-aggrgate arg/field/element, its offset from FP (1 byte) and
// size (1 byte)
// - special operators:
// - 0xff - end of sequence
// - 0xfe - print { (at the start of an aggregate-typed argument)
// - 0xfd - print } (at the end of an aggregate-typed argument)
// - 0xfc - print ... (more args/fields/elements)
// - 0xfb - print _ (offset too large)
// These constants need to be in sync with runtime.traceback.go:printArgs.
const (
_endSeq = 0xff
_startAgg = 0xfe
_endAgg = 0xfd
_dotdotdot = 0xfc
_offsetTooLarge = 0xfb
_special = 0xf0 // above this are operators, below this are ordinary offsets
)
const (
limit = 10 // print no more than 10 args/components
maxDepth = 5 // no more than 5 layers of nesting
// maxLen is a (conservative) upper bound of the byte stream length. For
// each arg/component, it has no more than 2 bytes of data (size, offset),
// and no more than one {, }, ... at each level (it cannot have both the
// data and ... unless it is the last one, just be conservative). Plus 1
// for _endSeq.
maxLen = (maxDepth*3+2)*limit + 1
)
wOff := 0
n := 0
writebyte := func(o uint8) { wOff = objw.Uint8(x, wOff, o) }
// Write one non-aggrgate arg/field/element if there is room.
// Returns whether to continue.
write1 := func(sz, offset int64) bool {
if n >= limit {
return false
}
if offset >= _special {
writebyte(_offsetTooLarge)
} else {
writebyte(uint8(offset))
writebyte(uint8(sz))
}
n++
return true
}
// Visit t recursively and write it out.
// Returns whether to continue visiting.
var visitType func(baseOffset int64, t *types.Type, depth int) bool
visitType = func(baseOffset int64, t *types.Type, depth int) bool {
if n >= limit {
return false
}
if !isAggregate(t) {
return write1(t.Size(), baseOffset)
}
writebyte(_startAgg)
depth++
if depth >= maxDepth {
writebyte(_dotdotdot)
writebyte(_endAgg)
n++
return true
}
var r bool
switch {
case t.IsInterface(), t.IsString():
r = write1(PtrSize, baseOffset) &&
write1(PtrSize, baseOffset+PtrSize)
case t.IsSlice():
r = write1(PtrSize, baseOffset) &&
write1(PtrSize, baseOffset+PtrSize) &&
write1(PtrSize, baseOffset+PtrSize*2)
case t.IsComplex():
r = write1(t.Size()/2, baseOffset) &&
write1(t.Size()/2, baseOffset+t.Size()/2)
case t.IsArray():
r = true
if t.NumElem() == 0 {
n++ // {} counts as a component
break
}
for i := int64(0); i < t.NumElem(); i++ {
if !visitType(baseOffset, t.Elem(), depth) {
r = false
break
}
baseOffset += t.Elem().Size()
}
case t.IsStruct():
r = true
if t.NumFields() == 0 {
n++ // {} counts as a component
break
}
for _, field := range t.Fields().Slice() {
if !visitType(baseOffset+field.Offset, field.Type, depth) {
r = false
break
}
}
}
if !r {
writebyte(_dotdotdot)
}
writebyte(_endAgg)
return r
}
c := true
for _, a := range abiInfo.InParams() {
if !c {
writebyte(_dotdotdot)
break
}
c = visitType(a.FrameOffset(abiInfo), a.Type, 0)
}
writebyte(_endSeq)
if wOff > maxLen {
base.Fatalf("ArgInfo too large")
}
return x
}
// genssa appends entries to pp for each instruction in f.
func genssa(f *ssa.Func, pp *objw.Progs) {
var s State
s.ABI = f.OwnAux.Fn.ABI()
e := f.Frontend().(*ssafn)
s.livenessMap, s.partLiveArgs = liveness.Compute(e.curfn, f, e.stkptrsize, pp)
emitArgInfo(e, f, pp)
openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
if openDeferInfo != nil {
// This function uses open-coded defers -- write out the funcdata
// info that we computed at the end of genssa.
p := pp.Prog(obj.AFUNCDATA)
p.From.SetConst(objabi.FUNCDATA_OpenCodedDeferInfo)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = openDeferInfo
}
// Remember where each block starts.
s.bstart = make([]*obj.Prog, f.NumBlocks())
s.pp = pp
var progToValue map[*obj.Prog]*ssa.Value
var progToBlock map[*obj.Prog]*ssa.Block
var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
if f.PrintOrHtmlSSA {
progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
f.Logf("genssa %s\n", f.Name)
progToBlock[s.pp.Next] = f.Blocks[0]
}
if base.Ctxt.Flag_locationlists {
if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
}
valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()]
for i := range valueToProgAfter {
valueToProgAfter[i] = nil
}
}
// If the very first instruction is not tagged as a statement,
// debuggers may attribute it to previous function in program.
firstPos := src.NoXPos
for _, v := range f.Entry.Values {
if v.Pos.IsStmt() == src.PosIsStmt {
firstPos = v.Pos
v.Pos = firstPos.WithDefaultStmt()
break
}
}
// inlMarks has an entry for each Prog that implements an inline mark.
// It maps from that Prog to the global inlining id of the inlined body
// which should unwind to this Prog's location.
var inlMarks map[*obj.Prog]int32
var inlMarkList []*obj.Prog
// inlMarksByPos maps from a (column 1) source position to the set of
// Progs that are in the set above and have that source position.
var inlMarksByPos map[src.XPos][]*obj.Prog
// Emit basic blocks
for i, b := range f.Blocks {
s.bstart[b.ID] = s.pp.Next
s.lineRunStart = nil
// Attach a "default" liveness info. Normally this will be
// overwritten in the Values loop below for each Value. But
// for an empty block this will be used for its control
// instruction. We won't use the actual liveness map on a
// control instruction. Just mark it something that is
// preemptible, unless this function is "all unsafe".
s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)}
// Emit values in block
Arch.SSAMarkMoves(&s, b)
for _, v := range b.Values {
x := s.pp.Next
s.DebugFriendlySetPosFrom(v)
if v.Op.ResultInArg0() && v.ResultReg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
switch v.Op {
case ssa.OpInitMem:
// memory arg needs no code
case ssa.OpArg:
// input args need no code
case ssa.OpSP, ssa.OpSB:
// nothing to do
case ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult:
// nothing to do
case ssa.OpGetG:
// nothing to do when there's a g register,
// and checkLower complains if there's not
case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpVarKill:
// nothing to do; already used by liveness
case ssa.OpPhi:
CheckLoweredPhi(v)
case ssa.OpConvert:
// nothing to do; no-op conversion for liveness
if v.Args[0].Reg() != v.Reg() {
v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString())
}
case ssa.OpInlMark:
p := Arch.Ginsnop(s.pp)
if inlMarks == nil {
inlMarks = map[*obj.Prog]int32{}
inlMarksByPos = map[src.XPos][]*obj.Prog{}
}
inlMarks[p] = v.AuxInt32()
inlMarkList = append(inlMarkList, p)
pos := v.Pos.AtColumn1()
inlMarksByPos[pos] = append(inlMarksByPos[pos], p)
default:
// Special case for first line in function; move it to the start (which cannot be a register-valued instruction)
if firstPos != src.NoXPos && v.Op != ssa.OpArgIntReg && v.Op != ssa.OpArgFloatReg && v.Op != ssa.OpLoadReg && v.Op != ssa.OpStoreReg {
s.SetPos(firstPos)
firstPos = src.NoXPos
}
// Attach this safe point to the next
// instruction.
s.pp.NextLive = s.livenessMap.Get(v)
// let the backend handle it
Arch.SSAGenValue(&s, v)
}
if base.Ctxt.Flag_locationlists {
valueToProgAfter[v.ID] = s.pp.Next
}
if f.PrintOrHtmlSSA {
for ; x != s.pp.Next; x = x.Link {
progToValue[x] = v
}
}
}
// If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
p := Arch.Ginsnop(s.pp)
p.Pos = p.Pos.WithIsStmt()
if b.Pos == src.NoXPos {
b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion. See #35652.
if b.Pos == src.NoXPos {
b.Pos = pp.Text.Pos // Sometimes p.Pos is empty. See #35695.
}
}
b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number
}
// Emit control flow instructions for block
var next *ssa.Block
if i < len(f.Blocks)-1 && base.Flag.N == 0 {
// If -N, leave next==nil so every block with successors
// ends in a JMP (except call blocks - plive doesn't like
// select{send,recv} followed by a JMP call). Helps keep
// line numbers for otherwise empty blocks.
next = f.Blocks[i+1]
}
x := s.pp.Next
s.SetPos(b.Pos)
Arch.SSAGenBlock(&s, b, next)
if f.PrintOrHtmlSSA {
for ; x != s.pp.Next; x = x.Link {
progToBlock[x] = b
}
}
}
if f.Blocks[len(f.Blocks)-1].Kind == ssa.BlockExit {
// We need the return address of a panic call to
// still be inside the function in question. So if
// it ends in a call which doesn't return, add a
// nop (which will never execute) after the call.
Arch.Ginsnop(pp)
}
if openDeferInfo != nil {
// When doing open-coded defers, generate a disconnected call to
// deferreturn and a return. This will be used to during panic
// recovery to unwind the stack and return back to the runtime.
s.pp.NextLive = s.livenessMap.DeferReturn
p := pp.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Deferreturn
// Load results into registers. So when a deferred function
// recovers a panic, it will return to caller with right results.
// The results are already in memory, because they are not SSA'd
// when the function has defers (see canSSAName).
if f.OwnAux.ABIInfo().OutRegistersUsed() != 0 {
Arch.LoadRegResults(&s, f)
}
pp.Prog(obj.ARET)
}
if inlMarks != nil {
// We have some inline marks. Try to find other instructions we're
// going to emit anyway, and use those instructions instead of the
// inline marks.
for p := pp.Text; p != nil; p = p.Link {
if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm {
// Don't use 0-sized instructions as inline marks, because we need
// to identify inline mark instructions by pc offset.
// (Some of these instructions are sometimes zero-sized, sometimes not.
// We must not use anything that even might be zero-sized.)
// TODO: are there others?
continue
}
if _, ok := inlMarks[p]; ok {
// Don't use inline marks themselves. We don't know
// whether they will be zero-sized or not yet.
continue
}
pos := p.Pos.AtColumn1()
s := inlMarksByPos[pos]
if len(s) == 0 {
continue
}
for _, m := range s {
// We found an instruction with the same source position as
// some of the inline marks.
// Use this instruction instead.
p.Pos = p.Pos.WithIsStmt() // promote position to a statement
pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m])
// Make the inline mark a real nop, so it doesn't generate any code.
m.As = obj.ANOP
m.Pos = src.NoXPos
m.From = obj.Addr{}
m.To = obj.Addr{}
}
delete(inlMarksByPos, pos)
}
// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
for _, p := range inlMarkList {
if p.As != obj.ANOP {
pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p])
}
}
}
if base.Ctxt.Flag_locationlists {
var debugInfo *ssa.FuncDebug
if e.curfn.ABI == obj.ABIInternal && base.Flag.N != 0 {
debugInfo = ssa.BuildFuncDebugNoOptimized(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset)
} else {
debugInfo = ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset)
}
e.curfn.DebugInfo = debugInfo
bstart := s.bstart
idToIdx := make([]int, f.NumBlocks())
for i, b := range f.Blocks {
idToIdx[b.ID] = i
}
// Note that at this moment, Prog.Pc is a sequence number; it's
// not a real PC until after assembly, so this mapping has to
// be done later.
debugInfo.GetPC = func(b, v ssa.ID) int64 {
switch v {
case ssa.BlockStart.ID:
if b == f.Entry.ID {
return 0 // Start at the very beginning, at the assembler-generated prologue.
// this should only happen for function args (ssa.OpArg)
}
return bstart[b].Pc
case ssa.BlockEnd.ID:
blk := f.Blocks[idToIdx[b]]
nv := len(blk.Values)
return valueToProgAfter[blk.Values[nv-1].ID].Pc
case ssa.FuncEnd.ID:
return e.curfn.LSym.Size
default:
return valueToProgAfter[v].Pc
}
}
}
// Resolve branches, and relax DefaultStmt into NotStmt
for _, br := range s.Branches {
br.P.To.SetTarget(s.bstart[br.B.ID])
if br.P.Pos.IsStmt() != src.PosIsStmt {
br.P.Pos = br.P.Pos.WithNotStmt()
} else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt {
br.P.Pos = br.P.Pos.WithNotStmt()
}
}
if e.log { // spew to stdout
filename := ""
for p := pp.Text; p != nil; p = p.Link {
if p.Pos.IsKnown() && p.InnermostFilename() != filename {
filename = p.InnermostFilename()
f.Logf("# %s\n", filename)
}
var s string
if v, ok := progToValue[p]; ok {
s = v.String()
} else if b, ok := progToBlock[p]; ok {
s = b.String()
} else {
s = " " // most value and branch strings are 2-3 characters long
}
f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
}
}
if f.HTMLWriter != nil { // spew to ssa.html
var buf bytes.Buffer
buf.WriteString("<code>")
buf.WriteString("<dl class=\"ssa-gen\">")
filename := ""
for p := pp.Text; p != nil; p = p.Link {
// Don't spam every line with the file name, which is often huge.
// Only print changes, and "unknown" is not a change.
if p.Pos.IsKnown() && p.InnermostFilename() != filename {
filename = p.InnermostFilename()
buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">")
buf.WriteString(html.EscapeString("# " + filename))
buf.WriteString("</dd>")
}
buf.WriteString("<dt class=\"ssa-prog-src\">")
if v, ok := progToValue[p]; ok {
buf.WriteString(v.HTML())
} else if b, ok := progToBlock[p]; ok {
buf.WriteString("<b>" + b.HTML() + "</b>")
}
buf.WriteString("</dt>")
buf.WriteString("<dd class=\"ssa-prog\">")
buf.WriteString(fmt.Sprintf("%.5d <span class=\"l%v line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), p.InnermostLineNumberHTML(), html.EscapeString(p.InstructionString())))
buf.WriteString("</dd>")
}
buf.WriteString("</dl>")
buf.WriteString("</code>")
f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String())
}
defframe(&s, e, f)
f.HTMLWriter.Close()
f.HTMLWriter = nil
}
func defframe(s *State, e *ssafn, f *ssa.Func) {
pp := s.pp
frame := types.Rnd(s.maxarg+e.stksize, int64(types.RegSize))
if Arch.PadFrame != nil {
frame = Arch.PadFrame(frame)
}
// Fill in argument and frame size.
pp.Text.To.Type = obj.TYPE_TEXTSIZE
pp.Text.To.Val = int32(types.Rnd(f.OwnAux.ArgWidth(), int64(types.RegSize)))
pp.Text.To.Offset = frame
p := pp.Text
// Insert code to spill argument registers if the named slot may be partially
// live. That is, the named slot is considered live by liveness analysis,
// (because a part of it is live), but we may not spill all parts into the
// slot. This can only happen with aggregate-typed arguments that are SSA-able
// and not address-taken (for non-SSA-able or address-taken arguments we always
// spill upfront).
// Note: spilling is unnecessary in the -N/no-optimize case, since all values
// will be considered non-SSAable and spilled up front.
// TODO(register args) Make liveness more fine-grained to that partial spilling is okay.
if f.OwnAux.ABIInfo().InRegistersUsed() != 0 && base.Flag.N == 0 {
// First, see if it is already spilled before it may be live. Look for a spill
// in the entry block up to the first safepoint.
type nameOff struct {
n *ir.Name
off int64
}
partLiveArgsSpilled := make(map[nameOff]bool)
for _, v := range f.Entry.Values {
if v.Op.IsCall() {
break
}
if v.Op != ssa.OpStoreReg || v.Args[0].Op != ssa.OpArgIntReg {
continue
}
n, off := ssa.AutoVar(v)
if n.Class != ir.PPARAM || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] {
continue
}
partLiveArgsSpilled[nameOff{n, off}] = true
}
// Then, insert code to spill registers if not already.
for _, a := range f.OwnAux.ABIInfo().InParams() {
n, ok := a.Name.(*ir.Name)
if !ok || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] || len(a.Registers) <= 1 {
continue
}
rts, offs := a.RegisterTypesAndOffsets()
for i := range a.Registers {
if !rts[i].HasPointers() {
continue
}
if partLiveArgsSpilled[nameOff{n, offs[i]}] {
continue // already spilled
}
reg := ssa.ObjRegForAbiReg(a.Registers[i], f.Config)
p = Arch.SpillArgReg(pp, p, f, rts[i], reg, n, offs[i])
}
}
}
// Insert code to zero ambiguously live variables so that the
// garbage collector only sees initialized values when it
// looks for pointers.
var lo, hi int64
// Opaque state for backend to use. Current backends use it to
// keep track of which helper registers have been zeroed.
var state uint32
// Iterate through declarations. Autos are sorted in decreasing
// frame offset order.
for _, n := range e.curfn.Dcl {
if !n.Needzero() {
continue
}
if n.Class != ir.PAUTO {
e.Fatalf(n.Pos(), "needzero class %d", n.Class)
}
if n.Type().Size()%int64(types.PtrSize) != 0 || n.FrameOffset()%int64(types.PtrSize) != 0 || n.Type().Size() == 0 {
e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset_)
}
if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*types.RegSize) {
// Merge with range we already have.
lo = n.FrameOffset()
continue
}
// Zero old range
p = Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
// Set new range.
lo = n.FrameOffset()
hi = lo + n.Type().Size()
}
// Zero final range.
Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
}
// For generating consecutive jump instructions to model a specific branching
type IndexJump struct {
Jump obj.As
Index int
}
func (s *State) oneJump(b *ssa.Block, jump *IndexJump) {
p := s.Br(jump.Jump, b.Succs[jump.Index].Block())
p.Pos = b.Pos
}
// CombJump generates combinational instructions (2 at present) for a block jump,
// thereby the behaviour of non-standard condition codes could be simulated
func (s *State) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
switch next {
case b.Succs[0].Block():
s.oneJump(b, &jumps[0][0])
s.oneJump(b, &jumps[0][1])
case b.Succs[1].Block():
s.oneJump(b, &jumps[1][0])
s.oneJump(b, &jumps[1][1])
default:
var q *obj.Prog
if b.Likely != ssa.BranchUnlikely {
s.oneJump(b, &jumps[1][0])
s.oneJump(b, &jumps[1][1])
q = s.Br(obj.AJMP, b.Succs[1].Block())
} else {
s.oneJump(b, &jumps[0][0])
s.oneJump(b, &jumps[0][1])
q = s.Br(obj.AJMP, b.Succs[0].Block())
}
q.Pos = b.Pos
}
}
// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
func AddAux(a *obj.Addr, v *ssa.Value) {
AddAux2(a, v, v.AuxInt)
}
func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
v.Fatalf("bad AddAux addr %v", a)
}
// add integer offset
a.Offset += offset
// If no additional symbol offset, we're done.
if v.Aux == nil {
return
}
// Add symbol's offset from its base register.
switch n := v.Aux.(type) {
case *ssa.AuxCall:
a.Name = obj.NAME_EXTERN
a.Sym = n.Fn
case *obj.LSym:
a.Name = obj.NAME_EXTERN
a.Sym = n
case *ir.Name:
if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
a.Name = obj.NAME_PARAM
a.Sym = ir.Orig(n).(*ir.Name).Linksym()
a.Offset += n.FrameOffset()
break
}
a.Name = obj.NAME_AUTO
if n.Class == ir.PPARAMOUT {
a.Sym = ir.Orig(n).(*ir.Name).Linksym()
} else {
a.Sym = n.Linksym()
}
a.Offset += n.FrameOffset()
default:
v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
}
}
// extendIndex extends v to a full int width.
// panic with the given kind if v does not fit in an int (only on 32-bit archs).
func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
size := idx.Type.Size()
if size == s.config.PtrSize {
return idx
}
if size > s.config.PtrSize {
// truncate 64-bit indexes on 32-bit pointer archs. Test the
// high word and branch to out-of-bounds failure if it is not 0.
var lo *ssa.Value
if idx.Type.IsSigned() {
lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TINT], idx)
} else {
lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TUINT], idx)
}
if bounded || base.Flag.B != 0 {
return lo
}
bNext := s.f.NewBlock(ssa.BlockPlain)
bPanic := s.f.NewBlock(ssa.BlockExit)
hi := s.newValue1(ssa.OpInt64Hi, types.Types[types.TUINT32], idx)
cmp := s.newValue2(ssa.OpEq32, types.Types[types.TBOOL], hi, s.constInt32(types.Types[types.TUINT32], 0))
if !idx.Type.IsSigned() {
switch kind {
case ssa.BoundsIndex:
kind = ssa.BoundsIndexU
case ssa.BoundsSliceAlen:
kind = ssa.BoundsSliceAlenU
case ssa.BoundsSliceAcap:
kind = ssa.BoundsSliceAcapU
case ssa.BoundsSliceB:
kind = ssa.BoundsSliceBU
case ssa.BoundsSlice3Alen:
kind = ssa.BoundsSlice3AlenU
case ssa.BoundsSlice3Acap:
kind = ssa.BoundsSlice3AcapU
case ssa.BoundsSlice3B:
kind = ssa.BoundsSlice3BU
case ssa.BoundsSlice3C:
kind = ssa.BoundsSlice3CU
}
}
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
b.AddEdgeTo(bNext)
b.AddEdgeTo(bPanic)
s.startBlock(bPanic)
mem := s.newValue4I(ssa.OpPanicExtend, types.TypeMem, int64(kind), hi, lo, len, s.mem())
s.endBlock().SetControl(mem)
s.startBlock(bNext)
return lo
}
// Extend value to the required size
var op ssa.Op
if idx.Type.IsSigned() {
switch 10*size + s.config.PtrSize {
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad signed index extension %s", idx.Type)
}
} else {
switch 10*size + s.config.PtrSize {
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("bad unsigned index extension %s", idx.Type)
}
}
return s.newValue1(op, types.Types[types.TINT], idx)
}
// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
// Called during ssaGenValue.
func CheckLoweredPhi(v *ssa.Value) {
if v.Op != ssa.OpPhi {
v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
}
if v.Type.IsMemory() {
return
}
f := v.Block.Func
loc := f.RegAlloc[v.ID]
for _, a := range v.Args {
if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
}
}
}
// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block,
// except for incoming in-register arguments.
// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
// That register contains the closure pointer on closure entry.
func CheckLoweredGetClosurePtr(v *ssa.Value) {
entry := v.Block.Func.Entry
if entry != v.Block {
base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
}
for _, w := range entry.Values {
if w == v {
break
}
switch w.Op {
case ssa.OpArgIntReg, ssa.OpArgFloatReg:
// okay
default:
base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
}
}
}
// CheckArgReg ensures that v is in the function's entry block.
func CheckArgReg(v *ssa.Value) {
entry := v.Block.Func.Entry
if entry != v.Block {
base.Fatalf("in %s, badly placed ArgIReg or ArgFReg: %v %v", v.Block.Func.Name, v.Block, v)
}
}
func AddrAuto(a *obj.Addr, v *ssa.Value) {
n, off := ssa.AutoVar(v)
a.Type = obj.TYPE_MEM
a.Sym = n.Linksym()
a.Reg = int16(Arch.REGSP)
a.Offset = n.FrameOffset() + off
if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
a.Name = obj.NAME_PARAM
} else {
a.Name = obj.NAME_AUTO
}
}
// Call returns a new CALL instruction for the SSA value v.
// It uses PrepareCall to prepare the call.
func (s *State) Call(v *ssa.Value) *obj.Prog {
pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
s.PrepareCall(v)
p := s.Prog(obj.ACALL)
if pPosIsStmt == src.PosIsStmt {
p.Pos = v.Pos.WithIsStmt()
} else {
p.Pos = v.Pos.WithNotStmt()
}
if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = sym.Fn
} else {
// TODO(mdempsky): Can these differences be eliminated?
switch Arch.LinkArch.Family {
case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm:
p.To.Type = obj.TYPE_REG
case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
p.To.Type = obj.TYPE_MEM
default:
base.Fatalf("unknown indirect call family")
}
p.To.Reg = v.Args[0].Reg()
}
return p
}
// PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
// It must be called immediately before emitting the actual CALL instruction,
// since it emits PCDATA for the stack map at the call (calls are safe points).
func (s *State) PrepareCall(v *ssa.Value) {
idx := s.livenessMap.Get(v)
if !idx.StackMapValid() {
// See Liveness.hasStackMap.
if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == ir.Syms.Typedmemclr || sym.Fn == ir.Syms.Typedmemmove) {
base.Fatalf("missing stack map index for %v", v.LongString())
}
}
call, ok := v.Aux.(*ssa.AuxCall)
if ok && call.Fn == ir.Syms.Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
Arch.Ginsnopdefer(s.pp)
}
if ok {
// Record call graph information for nowritebarrierrec
// analysis.
if nowritebarrierrecCheck != nil {
nowritebarrierrecCheck.recordCall(s.pp.CurFunc, call.Fn, v.Pos)
}
}
if s.maxarg < v.AuxInt {
s.maxarg = v.AuxInt
}
}
// UseArgs records the fact that an instruction needs a certain amount of
// callee args space for its use.
func (s *State) UseArgs(n int64) {
if s.maxarg < n {
s.maxarg = n
}
}
// fieldIdx finds the index of the field referred to by the ODOT node n.
func fieldIdx(n *ir.SelectorExpr) int {
t := n.X.Type()
if !t.IsStruct() {
panic("ODOT's LHS is not a struct")
}
for i, f := range t.Fields().Slice() {
if f.Sym == n.Sel {
if f.Offset != n.Offset() {
panic("field offset doesn't match")
}
return i
}
}
panic(fmt.Sprintf("can't find field in expr %v\n", n))
// TODO: keep the result of this function somewhere in the ODOT Node
// so we don't have to recompute it each time we need it.
}
// ssafn holds frontend information about a function that the backend is processing.
// It also exports a bunch of compiler services for the ssa backend.
type ssafn struct {
curfn *ir.Func
strings map[string]*obj.LSym // map from constant string to data symbols
stksize int64 // stack size for current frame
stkptrsize int64 // prefix of stack containing pointers
log bool // print ssa debug to the stdout
}
// StringData returns a symbol which
// is the data component of a global string constant containing s.
func (e *ssafn) StringData(s string) *obj.LSym {
if aux, ok := e.strings[s]; ok {
return aux
}
if e.strings == nil {
e.strings = make(map[string]*obj.LSym)
}
data := staticdata.StringSym(e.curfn.Pos(), s)
e.strings[s] = data
return data
}
func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name {
return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
}
func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
return reflectdata.ITabSym(it, offset)
}
// SplitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
node := parent.N
if node.Class != ir.PAUTO || node.Addrtaken() {
// addressed things and non-autos retain their parents (i.e., cannot truly be split)
return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
}
s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg}
n := ir.NewNameAt(parent.N.Pos(), s)
s.Def = n
ir.AsNode(s.Def).Name().SetUsed(true)
n.SetType(t)
n.Class = ir.PAUTO
n.SetEsc(ir.EscNever)
n.Curfn = e.curfn
e.curfn.Dcl = append(e.curfn.Dcl, n)
types.CalcSize(t)
return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
}
func (e *ssafn) CanSSA(t *types.Type) bool {
return TypeOK(t)
}
func (e *ssafn) Line(pos src.XPos) string {
return base.FmtPos(pos)
}
// Log logs a message from the compiler.
func (e *ssafn) Logf(msg string, args ...interface{}) {
if e.log {
fmt.Printf(msg, args...)
}
}
func (e *ssafn) Log() bool {
return e.log
}
// Fatal reports a compiler error and exits.
func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
base.Pos = pos
nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...)
base.Fatalf("'%s': "+msg, nargs...)
}
// Warnl reports a "warning", which is usually flag-triggered
// logging output for the benefit of tests.
func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
base.WarnfAt(pos, fmt_, args...)
}
func (e *ssafn) Debug_checknil() bool {
return base.Debug.Nil != 0
}
func (e *ssafn) UseWriteBarrier() bool {
return base.Flag.WB
}
func (e *ssafn) Syslook(name string) *obj.LSym {
switch name {
case "goschedguarded":
return ir.Syms.Goschedguarded
case "writeBarrier":
return ir.Syms.WriteBarrier
case "gcWriteBarrier":
return ir.Syms.GCWriteBarrier
case "typedmemmove":
return ir.Syms.Typedmemmove
case "typedmemclr":
return ir.Syms.Typedmemclr
}
e.Fatalf(src.NoXPos, "unknown Syslook func %v", name)
return nil
}
func (e *ssafn) SetWBPos(pos src.XPos) {
e.curfn.SetWBPos(pos)
}
func (e *ssafn) MyImportPath() string {
return base.Ctxt.Pkgpath
}
func clobberBase(n ir.Node) ir.Node {
if n.Op() == ir.ODOT {
n := n.(*ir.SelectorExpr)
if n.X.Type().NumFields() == 1 {
return clobberBase(n.X)
}
}
if n.Op() == ir.OINDEX {
n := n.(*ir.IndexExpr)
if n.X.Type().IsArray() && n.X.Type().NumElem() == 1 {
return clobberBase(n.X)
}
}
return n
}
// callTargetLSym returns the correct LSym to call 'callee' using its ABI.
func callTargetLSym(callee *ir.Name) *obj.LSym {
if callee.Func == nil {
// TODO(austin): This happens in a few cases of
// compiler-generated functions. These are all
// ABIInternal. It would be better if callee.Func was
// never nil and we didn't need this case.
return callee.Linksym()
}
return callee.LinksymABI(callee.Func.ABI)
}
func min8(a, b int8) int8 {
if a < b {
return a
}
return b
}
func max8(a, b int8) int8 {
if a > b {
return a
}
return b
}
// deferstruct makes a runtime._defer structure, with additional space for
// stksize bytes of args.
func deferstruct(stksize int64) *types.Type {
makefield := func(name string, typ *types.Type) *types.Field {
// Unlike the global makefield function, this one needs to set Pkg
// because these types might be compared (in SSA CSE sorting).
// TODO: unify this makefield and the global one above.
sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
return types.NewField(src.NoXPos, sym, typ)
}
argtype := types.NewArray(types.Types[types.TUINT8], stksize)
argtype.Width = stksize
argtype.Align = 1
// These fields must match the ones in runtime/runtime2.go:_defer and
// cmd/compile/internal/gc/ssa.go:(*state).call.
fields := []*types.Field{
makefield("siz", types.Types[types.TUINT32]),
makefield("started", types.Types[types.TBOOL]),
makefield("heap", types.Types[types.TBOOL]),
makefield("openDefer", types.Types[types.TBOOL]),
makefield("sp", types.Types[types.TUINTPTR]),
makefield("pc", types.Types[types.TUINTPTR]),
// Note: the types here don't really matter. Defer structures
// are always scanned explicitly during stack copying and GC,
// so we make them uintptr type even though they are real pointers.
makefield("fn", types.Types[types.TUINTPTR]),
makefield("_panic", types.Types[types.TUINTPTR]),
makefield("link", types.Types[types.TUINTPTR]),
makefield("framepc", types.Types[types.TUINTPTR]),
makefield("varp", types.Types[types.TUINTPTR]),
makefield("fd", types.Types[types.TUINTPTR]),
makefield("args", argtype),
}
// build struct holding the above fields
s := types.NewStruct(types.NoPkg, fields)
s.SetNoalg(true)
types.CalcStructSize(s)
return s
}
// SlotAddr uses LocalSlot information to initialize an obj.Addr
// The resulting addr is used in a non-standard context -- in the prologue
// of a function, before the frame has been constructed, so the standard
// addressing for the parameters will be wrong.
func SpillSlotAddr(spill ssa.Spill, baseReg int16, extraOffset int64) obj.Addr {
return obj.Addr{
Name: obj.NAME_NONE,
Type: obj.TYPE_MEM,
Reg: baseReg,
Offset: spill.Offset + extraOffset,
}
}
var (
BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
)
// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
var GCWriteBarrierReg map[int16]*obj.LSym
| [
"\"GOSSAFUNC\"",
"\"GOSSADIR\""
]
| []
| [
"GOSSADIR",
"GOSSAFUNC"
]
| [] | ["GOSSADIR", "GOSSAFUNC"] | go | 2 | 0 | |
iot/api-client/beta-features/manager/src/main/java/com/example/cloud/iot/examples/DeviceGatewayExampleOptions.java | /*
* Copyright 2018 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.cloud.iot.examples;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
/** Command line options for the Device Manager example. */
public class DeviceGatewayExampleOptions {
String algorithm;
String cloudRegion = "us-central1";
String command = "help";
String deviceId; // Default to UUID?
String gatewayId;
String projectId;
String publicKeyFile;
String privateKeyFile;
String registryName;
String telemetryData = "Specify with -telemetry_data";
String mqttBridgeHostname = "mqtt.googleapis.com";
short mqttBridgePort = 8883;
int numMessages = 10;
int tokenExpMins = 60;
String messageType = "event";
static final Options options = new Options();
/** Construct an DeviceGatewayExampleOptions class from command line flags. */
public static DeviceGatewayExampleOptions fromFlags(String[] args) {
// Required arguments
options.addOption(
Option.builder()
.type(String.class)
.longOpt("command")
.hasArg()
.desc(
"Command to run:"
+ "\n\tbind-device-to-gateway"
+ "\n\tcreate-gateway"
+ "\n\tlist-gateways"
+ "\n\tlist-devices-for-gateway"
+ "\n\tlisten-for-config-messages"
+ "\n\tsend-data-from-bound-device"
+ "\n\tunbind-device-from-gateway")
.required()
.build());
// Optional arguments.
options.addOption(
Option.builder()
.type(String.class)
.longOpt("algorithm")
.hasArg()
.desc("Algorithm used for public/private keys.")
.build());
options.addOption(
Option.builder()
.type(String.class)
.longOpt("cloud_region")
.hasArg()
.desc("GCP cloud region.")
.build());
options.addOption(
Option.builder()
.type(String.class)
.longOpt("device_id")
.hasArg()
.desc("The identifier for the device bound to the gateway.")
.build());
options.addOption(
Option.builder()
.type(String.class)
.longOpt("gateway_id")
.hasArg()
.desc("The identifier for the Gateway.")
.build());
options.addOption(
Option.builder()
.type(String.class)
.longOpt("project_id")
.hasArg()
.desc("GCP cloud project name.")
.build());
options.addOption(
Option.builder()
.type(String.class)
.longOpt("private_key_file")
.hasArg()
.desc("Private key file used for connecting devices and gateways.")
.build());
options.addOption(
Option.builder()
.type(String.class)
.longOpt("public_key_file")
.hasArg()
.desc("Public key file used for registering devices and gateways.")
.build());
options.addOption(
Option.builder()
.type(String.class)
.longOpt("registry_name")
.hasArg()
.desc("Name for your Device Registry.")
.build());
options.addOption(
Option.builder()
.type(String.class)
.longOpt("telemetry_data")
.hasArg()
.desc("The telemetry data (string or JSON) to send on behalf of the delegated device.")
.build());
options.addOption(
Option.builder()
.type(Number.class)
.longOpt("num_messages")
.hasArg()
.desc("Number of messages to publish.")
.build());
options.addOption(
Option.builder()
.type(String.class)
.longOpt("mqtt_bridge_hostname")
.hasArg()
.desc("MQTT bridge hostname.")
.build());
options.addOption(
Option.builder()
.type(Number.class)
.longOpt("token_exp_minutes")
.hasArg()
.desc("Minutes to JWT token refresh (token expiration time).")
.build());
options.addOption(
Option.builder()
.type(Number.class)
.longOpt("mqtt_bridge_port")
.hasArg()
.desc("MQTT bridge port.")
.build());
options.addOption(
Option.builder()
.type(String.class)
.longOpt("message_type")
.hasArg()
.desc("Indicates whether the message is a telemetry event or a device state message")
.build());
CommandLineParser parser = new DefaultParser();
CommandLine commandLine;
try {
commandLine = parser.parse(options, args);
DeviceGatewayExampleOptions res = new DeviceGatewayExampleOptions();
res.command = commandLine.getOptionValue("command");
if (res.command.equals("help") || res.command.equals("")) {
throw new ParseException("Invalid command, showing help.");
}
if (commandLine.hasOption("algorithm")) {
res.algorithm = commandLine.getOptionValue("algorithm");
}
if (commandLine.hasOption("cloud_region")) {
res.cloudRegion = commandLine.getOptionValue("cloud_region");
}
if (commandLine.hasOption("telemetry_data")) {
res.telemetryData = commandLine.getOptionValue("telemetry_data");
}
if (commandLine.hasOption("device_id")) {
res.deviceId = commandLine.getOptionValue("device_id");
}
if (commandLine.hasOption("gateway_id")) {
res.gatewayId = commandLine.getOptionValue("gateway_id");
}
if (commandLine.hasOption("project_id")) {
res.projectId = commandLine.getOptionValue("project_id");
} else {
try {
res.projectId = System.getenv("GOOGLE_CLOUD_PROJECT");
} catch (NullPointerException npe) {
res.projectId = System.getenv("GCLOUD_PROJECT");
}
}
if (commandLine.hasOption("private_key_file")) {
res.privateKeyFile = commandLine.getOptionValue("private_key_file");
}
if (commandLine.hasOption("public_key_file")) {
res.publicKeyFile = commandLine.getOptionValue("public_key_file");
}
if (commandLine.hasOption("registry_name")) {
res.registryName = commandLine.getOptionValue("registry_name");
}
if (commandLine.hasOption("num_messages")) {
res.numMessages = ((Number) commandLine.getParsedOptionValue("num_messages")).intValue();
}
if (commandLine.hasOption("token_exp_minutes")) {
res.tokenExpMins =
((Number) commandLine.getParsedOptionValue("token_exp_minutes")).intValue();
}
if (commandLine.hasOption("mqtt_bridge_hostname")) {
res.mqttBridgeHostname = commandLine.getOptionValue("mqtt_bridge_hostname");
}
if (commandLine.hasOption("mqtt_bridge_port")) {
res.mqttBridgePort =
((Number) commandLine.getParsedOptionValue("mqtt_bridge_port")).shortValue();
}
if (commandLine.hasOption("message_type")) {
res.messageType = commandLine.getOptionValue("message_type");
}
return res;
} catch (ParseException e) {
String header = "Cloud IoT Core Commandline Example (Device Gateways): \n\n";
String footer = "\nhttps://cloud.google.com/iot-core";
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("DeviceGatewayExample", header, options, footer,
true);
System.err.println(e.getMessage());
return null;
}
}
}
| [
"\"GOOGLE_CLOUD_PROJECT\"",
"\"GCLOUD_PROJECT\""
]
| []
| [
"GCLOUD_PROJECT",
"GOOGLE_CLOUD_PROJECT"
]
| [] | ["GCLOUD_PROJECT", "GOOGLE_CLOUD_PROJECT"] | java | 2 | 0 | |
conftest.py | import os
import copy
import pytest
import time
import shutil
import tempfile
import logging
from _pytest.logging import caplog as _caplog
from contextlib import suppress
from panoptes.utils.logging import logger
from panoptes.utils.database import PanDB
from panoptes.utils.config.client import get_config
from panoptes.utils.config.client import set_config
from panoptes.utils.config.server import config_server
# Doctest modules
import numpy as np
from matplotlib import pyplot as plt
_all_databases = ['file', 'memory']
logger.enable('panoptes')
logger.level("testing", no=15, icon="🤖", color="<YELLOW><black>")
log_file_path = os.path.join(
os.getenv('PANLOG', '/var/panoptes/logs'),
'panoptes-testing.log'
)
log_fmt = "<lvl>{level:.1s}</lvl> " \
"<light-blue>{time:MM-DD HH:mm:ss.ss!UTC}</>" \
"<blue>({time:HH:mm:ss.ss})</> " \
"| <c>{name} {function}:{line}</c> | " \
"<lvl>{message}</lvl>\n"
startup_message = ' STARTING NEW PYTEST RUN '
logger.add(log_file_path,
enqueue=True, # multiprocessing
format=log_fmt,
colorize=True,
backtrace=True,
diagnose=True,
catch=True,
# Start new log file for each testing run.
rotation=lambda msg, _: startup_message in msg,
level='TRACE')
logger.log('testing', '*' * 25 + startup_message + '*' * 25)
def pytest_addoption(parser):
db_names = ",".join(_all_databases) + ' (or all for all databases)'
group = parser.getgroup("PANOPTES pytest options")
group.addoption(
"--astrometry",
action="store_true",
default=False,
help="If tests that require solving should be run")
group.addoption(
"--theskyx",
action="store_true",
default=False,
help="If running tests alongside a running TheSkyX program.")
group.addoption(
"--test-databases",
nargs="+",
default=['file'],
help=("Test databases in the list. List items can include: " + db_names +
". Note that travis-ci will test all of them by default."))
@pytest.fixture(scope='session')
def db_name():
return 'panoptes_testing'
@pytest.fixture(scope='session')
def images_dir(tmpdir_factory):
directory = tmpdir_factory.mktemp('images')
return str(directory)
@pytest.fixture(scope='session')
def config_path():
return os.path.expandvars('${PANDIR}/panoptes-utils/tests/panoptes_utils_testing.yaml')
@pytest.fixture(scope='session', autouse=True)
def static_config_server(config_path, images_dir, db_name):
logger.log('testing', f'Starting static_config_server for testing session')
proc = config_server(
config_file=config_path,
ignore_local=True,
auto_save=False
)
logger.log('testing', f'static_config_server started with {proc.pid=}')
# Give server time to start
while get_config('name') is None: # pragma: no cover
logger.log('testing', f'Waiting for static_config_server {proc.pid=}, sleeping 1 second.')
time.sleep(1)
logger.log('testing', f'Startup config_server name=[{get_config("name")}]')
# Adjust various config items for testing
unit_id = 'PAN000'
logger.log('testing', f'Setting testing name and unit_id to {unit_id}')
set_config('pan_id', unit_id)
logger.log('testing', f'Setting testing database to {db_name}')
set_config('db.name', db_name)
fields_file = 'simulator.yaml'
logger.log('testing', f'Setting testing scheduler fields_file to {fields_file}')
set_config('scheduler.fields_file', fields_file)
logger.log('testing', f'Setting temporary image directory for testing')
set_config('directories.images', images_dir)
yield
logger.log('testing', f'Killing static_config_server started with PID={proc.pid}')
proc.terminate()
@pytest.fixture(scope='function', params=_all_databases)
def db_type(request):
db_list = request.config.option.test_databases
if request.param not in db_list and 'all' not in db_list: # pragma: no cover
pytest.skip(f"Skipping {request.param} DB, set --test-all-databases=True")
PanDB.permanently_erase_database(
request.param, 'panoptes_testing', really='Yes', dangerous='Totally')
return request.param
@pytest.fixture(scope='function')
def db(db_type):
return PanDB(db_type=db_type, db_name='panoptes_testing', connect=True)
@pytest.fixture(scope='function')
def save_environ():
old_env = copy.deepcopy(os.environ)
yield
os.environ = old_env
@pytest.fixture(scope='session')
def data_dir():
return os.path.expandvars('/var/panoptes/panoptes-utils/tests/data')
@pytest.fixture(scope='function')
def unsolved_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'unsolved.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def solved_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'solved.fits.fz')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def tiny_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'tiny.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def noheader_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'noheader.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def cr2_file(data_dir):
cr2_path = os.path.join(data_dir, 'canon.cr2')
if not os.path.exists(cr2_path):
pytest.skip("No CR2 file found, skipping test.")
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(cr2_path, tmpdirname)
yield copy_file
@pytest.fixture(autouse=True)
def add_doctest_dependencies(doctest_namespace):
doctest_namespace['np'] = np
doctest_namespace['plt'] = plt
@pytest.fixture
def caplog(_caplog):
class PropogateHandler(logging.Handler):
def emit(self, record):
logging.getLogger(record.name).handle(record)
logger.enable('panoptes')
handler_id = logger.add(PropogateHandler(), format="{message}")
yield _caplog
with suppress(ValueError):
logger.remove(handler_id)
| []
| []
| [
"PANLOG"
]
| [] | ["PANLOG"] | python | 1 | 0 | |
tests/system_test/build.py | import logging
import os
import random
import boto3
import argparse
import json
import time
from botocore.exceptions import ClientError
# Step Functions Libraries
import stepfunctions
from stepfunctions import steps
from stepfunctions.inputs import ExecutionInput
from stepfunctions.steps import (
Chain,
ChoiceRule,
ProcessingStep,
TrainingStep,
Task,
LambdaStep
)
from stepfunctions.template import TrainingPipeline
from stepfunctions.workflow import Workflow
# SageMaker Libraries
import sagemaker
from sagemaker.processing import ProcessingInput, ProcessingOutput, Processor
from sagemaker.s3 import S3Uploader
# Client Session
logger = logging.getLogger(__name__)
sagemaker_session = sagemaker.Session()
region = sagemaker_session.boto_region_name
account_id = boto3.client('sts').get_caller_identity()["Account"]
role = sagemaker.session.get_execution_role()
sfn = boto3.client('stepfunctions')
cp = boto3.client('codepipeline')
ssm = boto3.client('ssm')
# Helper Functions
def get_job_id(pipeline_name):
"""
Description:
-----------
Gets the current executionId based on the CodePipeline Stage.
:pipeline_name: CodePipeline Name.
:return: CodePipeline Execution ID for this state.
"""
try:
response = cp.get_pipeline_state(name=pipeline_name)
for stageState in response['stageStates']:
if stageState['stageName'] == 'SystemTest':
for actionState in stageState['actionStates']:
if actionState['actionName'] == 'BuildTestingWorkflow':
return stageState['latestExecution']['pipelineExecutionId']
except ClientError as e:
error_message = e.response["Error"]["Message"]
logger.error(error_message)
raise Exception(error_message)
def get_workflow_role():
"""
Description:
-----------
Retrieves the Workflow Arn from Parameter Store.
:return: Workflow Execution Role ARN from parameters store.
"""
try:
response = ssm.get_parameter(
Name='WorkflowExecRole',
)
return response['Parameter']['Value']
except ClientError as e:
error_message = e.response["Error"]["Message"]
logger.error(error_message)
raise Exception(error_message)
def get_lambda_arn(name):
"""
Description:
-----------
Retrieves the Lambda Function Arn from the Parameter Store.
:name: (str) Name of the Lambda Function to return the ARN for.
:return: Evaluation Lambda ARN from paramater store.
"""
try:
response = ssm.get_parameter(
Name=name
)
return response['Parameter']['Value']
except ClientError as e:
error_message = e.response["Error"]["Message"]
logger.error(error_message)
raise Exception(error_message)
def get_baseline_uri(region):
"""
Description:
-----------
Compiles the container uri for the Baseline Processing Container based ont he region.
:region: (str) Current AWS Region
:return: Baseline Container URI.
"""
container_uri_format = (
"{0}.dkr.ecr.{1}.amazonaws.com/sagemaker-model-monitor-analyzer"
)
regions_to_accounts = {
"eu-north-1": "895015795356",
"me-south-1": "607024016150",
"ap-south-1": "126357580389",
"eu-west-3": "680080141114",
"us-east-2": "777275614652",
"eu-west-1": "468650794304",
"eu-central-1": "048819808253",
"sa-east-1": "539772159869",
"ap-east-1": "001633400207",
"us-east-1": "156813124566",
"ap-northeast-2": "709848358524",
"eu-west-2": "749857270468",
"ap-northeast-1": "574779866223",
"us-west-2": "159807026194",
"us-west-1": "890145073186",
"ap-southeast-1": "245545462676",
"ap-southeast-2": "563025443158",
"ca-central-1": "536280801234"
}
container_uri = container_uri_format.format(regions_to_accounts[region], region)
return container_uri
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pipeline-name", type=str, default=os.environ["PIPELINE_NAME"])
parser.add_argument("--image-repo-name", type=str, default=os.environ["IMAGE_REPO_NAME"])
parser.add_argument("--image-tag", type=str, default=os.environ["IMAGE_TAG"])
parser.add_argument("--model-name", type=str, default=os.environ["MODEL_NAME"])
parser.add_argument("--model-package-group-name", type=str, default=os.environ["MODEL_GROUP"])
parser.add_argument("--test-endpoint", type=str, default="{}-dev-endpoint".format(os.environ["MODEL_NAME"]))
parser.add_argument("--pipeline-bucket", type=str, default=os.environ["PIPELINE_BUCKET"])
args, _ = parser.parse_known_args()
# Configure logging to output the line number and message
log_format = "%(levelname)s: [%(filename)s:%(lineno)s] %(message)s"
logging.basicConfig(format=log_format, level=os.environ.get("LOGLEVEL", "INFO").upper())
# Configure workflow variables for current execution
job_id = get_job_id(args.pipeline_name)
baseline_job_name = "{}-baseline-{}".format(args.model_name, job_id)
image_uri = "{}.dkr.ecr.{}.amazonaws.com/{}:{}".format(account_id, region, args.image_repo_name, args.image_tag)
"""
SageMaker expects unique names for each job, model and endpoint.
NOTE: If these names are not unique the execution will fail.
Pass these dynamically for each execution using placeholders.
"""
execution_input = ExecutionInput(
schema={
"ModelName": str,
"ModelGroup": str,
"EndpointName": str,
"BaselineProcessingJobName": str
}
)
# S3 Locations of processing baseline and testing data.
s3_bucket_base_uri = "s3://{}".format(args.pipeline_bucket)
input_data_prefix = os.path.join(s3_bucket_base_uri, job_id, 'input')
output_data_prefix = os.path.join(s3_bucket_base_uri, job_id)
preprocessed_baseline_data = "{}/{}".format(input_data_prefix, 'baseline')
output_baseline_report_s3_uri = "{}/{}".format(output_data_prefix,"baseline_report")
output_model_evaluation_s3_uri = "{}/{}".format(output_data_prefix,"evaluation")
model_s3_uri = "{}/{}/mlops-{}-{}/{}".format(s3_bucket_base_uri, job_id, args.model_name, job_id, "output/model.tar.gz")
# Create the Lambda Function `configure_output` Step
evaluate_endpoint_step = LambdaStep(
"Evaluate SageMaker Hosted Model",
parameters={
"FunctionName": get_lambda_arn('EvaluateEndpoint'),
"Payload": {
"Endpoint_Name": execution_input['EndpointName'],
"Bucket": args.pipeline_bucket,
"Key": "{}/input/testing/test.csv".format(job_id),
"Output_Key": "{}/evaluation".format(job_id)
}
}
)
# Create the Lambda Function `registerModel` Step
register_model_step = LambdaStep(
"Register Production Model",
parameters={
"FunctionName": get_lambda_arn('RegisterModel'),
"Payload": {
"Model_Name": execution_input['ModelName'],
"Group_Name": execution_input['ModelGroup'],
"Model_Uri": model_s3_uri,
"Image_Uri": image_uri,
"Job_Id": job_id,
"Evaluation_Uri": os.path.join(output_model_evaluation_s3_uri, "evaluation.json")
}
}
)
# Create Baseline suggestion step
baseline_step = ProcessingStep(
"Suggest Baseline",
processor=Processor(
image_uri=get_baseline_uri(region),
instance_count=1,
instance_type="ml.m5.xlarge",
volume_size_in_gb=30,
role=role,
max_runtime_in_seconds=1800,
env={
"dataset_format": "{\"csv\": {\"header\": true, \"output_columns_position\": \"START\"}}",
"dataset_source": "/opt/ml/processing/input/baseline_dataset_input",
"output_path": "/opt/ml/processing/output",
"publish_cloudwatch_metrics": "Disabled"
}
),
job_name=execution_input["BaselineProcessingJobName"],
inputs=[
ProcessingInput(
source="{}/{}".format(preprocessed_baseline_data, "baseline.csv"),
destination="/opt/ml/processing/input/baseline_dataset_input",
input_name="baseline_dataset_input"
)
],
outputs=[
ProcessingOutput(
source="/opt/ml/processing/output",
destination=output_baseline_report_s3_uri,
output_name="monitoring_output"
)
]
)
# Create a `Parallel` Step to simultaneously run the `baseline` and `register_model` steps
parallel_step = stepfunctions.steps.states.Parallel(
"Finalize Production Model",
)
parallel_step.add_branch(baseline_step)
parallel_step.add_branch(register_model_step)
# Create `Fail` states to mark the workflow failed in case any of the steps fail
workflow_failed_state = stepfunctions.steps.states.Fail(
"Workflow Failed", cause="WorkflowFailed"
)
# Create `Fail` state if Model evaluation is below the evaluation threshold
threshold_fail_state = stepfunctions.steps.states.Fail(
"Model Above Quality Threshold"
)
# Creates `Pass` state for successfull evaluation
threshold_pass_state = stepfunctions.steps.states.Pass(
"Model Below Quality Threshold"
)
# Add the baseline step after the `Pass` state
threshold_pass_state.next(parallel_step)
# Create Threshold PASS | Fail Branch Step
check_threshold_step = steps.states.Choice(
"Evaluate Model Quality Threshold"
)
# Set rule to evaluate the results of the Analysis Step with the Threshold value
threshold_rule = steps.choice_rule.ChoiceRule.NumericLessThan(
variable=evaluate_endpoint_step.output()['Payload']['Result'],
value=float(os.environ["THRESHOLD"])
)
# If results less than threshold, workflow is successful
check_threshold_step.add_choice(rule=threshold_rule, next_step=threshold_pass_state)
# If results above threshold, workflow failed
check_threshold_step.default_choice(next_step=threshold_fail_state)
# Define `catch` Step to catch any step failures
catch_state = stepfunctions.steps.states.Catch(
error_equals=["States.TaskFailed"],
next_step=workflow_failed_state,
)
# Add catch block to workflow steps
evaluate_endpoint_step.add_catch(catch_state)
parallel_step.add_catch(catch_state)
# Define the workflow graph
workflow_graph = Chain(
[
evaluate_endpoint_step,
check_threshold_step
]
)
# Define the workflow
workflow = Workflow(
name=os.environ['WORKFLOW_NAME'],
definition=workflow_graph,
role=get_workflow_role()
)
# Create State Machine
try:
logger.info("Creating workflow ...")
workflow.create()
except sfn.exceptions.StateMachineAlreadyExists:
logger.info("Found existing workflow, updating the State Machine definition ...")
else:
# Update workflow
workflow.update(workflow_graph)
# Wait 60 seconds to ensure that definition has been updated before execution
time.sleep(60)
# Create JSON file of the current execution variables
with open("input.json", "w") as json_file:
json.dump(
{
"ModelName": args.model_name,
"ModelGroup": args.model_package_group_name,
"EndpointName": args.test_endpoint,
"BaselineProcessingJobName": baseline_job_name
},
json_file
)
| []
| []
| [
"THRESHOLD",
"IMAGE_REPO_NAME",
"PIPELINE_NAME",
"IMAGE_TAG",
"PIPELINE_BUCKET",
"MODEL_GROUP",
"LOGLEVEL",
"MODEL_NAME",
"WORKFLOW_NAME"
]
| [] | ["THRESHOLD", "IMAGE_REPO_NAME", "PIPELINE_NAME", "IMAGE_TAG", "PIPELINE_BUCKET", "MODEL_GROUP", "LOGLEVEL", "MODEL_NAME", "WORKFLOW_NAME"] | python | 9 | 0 | |
src/vendor/github.com/beego/beego/config.go | // Copyright 2014 beego Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package beego
import (
"crypto/tls"
"fmt"
"net/http"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"github.com/beego/beego/config"
"github.com/beego/beego/context"
"github.com/beego/beego/logs"
"github.com/beego/beego/session"
"github.com/beego/beego/utils"
)
// Config is the main struct for BConfig
type Config struct {
AppName string // Application name
RunMode string // Running Mode: dev | prod
RouterCaseSensitive bool
ServerName string
RecoverPanic bool
RecoverFunc func(*context.Context)
CopyRequestBody bool
EnableGzip bool
MaxMemory int64
EnableErrorsShow bool
EnableErrorsRender bool
Listen Listen
WebConfig WebConfig
Log LogConfig
}
// Listen holds for http and https related config
type Listen struct {
Graceful bool // Graceful means use graceful module to start the server
ServerTimeOut int64
ListenTCP4 bool
EnableHTTP bool
HTTPAddr string
HTTPPort int
AutoTLS bool
Domains []string
TLSCacheDir string
EnableHTTPS bool
EnableMutualHTTPS bool
HTTPSAddr string
HTTPSPort int
HTTPSCertFile string
HTTPSKeyFile string
TrustCaFile string
ClientAuth tls.ClientAuthType
EnableAdmin bool
AdminAddr string
AdminPort int
EnableFcgi bool
EnableStdIo bool // EnableStdIo works with EnableFcgi Use FCGI via standard I/O
}
// WebConfig holds web related config
type WebConfig struct {
AutoRender bool
EnableDocs bool
FlashName string
FlashSeparator string
DirectoryIndex bool
StaticDir map[string]string
StaticExtensionsToGzip []string
StaticCacheFileSize int
StaticCacheFileNum int
TemplateLeft string
TemplateRight string
ViewsPath string
EnableXSRF bool
XSRFKey string
XSRFExpire int
XSRFSecure bool
XSRFHttpOnly bool
Session SessionConfig
}
// SessionConfig holds session related config
type SessionConfig struct {
SessionOn bool
SessionProvider string
SessionName string
SessionGCMaxLifetime int64
SessionProviderConfig string
SessionCookieLifeTime int
SessionAutoSetCookie bool
SessionDomain string
SessionDisableHTTPOnly bool // used to allow for cross domain cookies/javascript cookies.
SessionEnableSidInHTTPHeader bool // enable store/get the sessionId into/from http headers
SessionNameInHTTPHeader string
SessionEnableSidInURLQuery bool // enable get the sessionId from Url Query params
SessionCookieSameSite http.SameSite
}
// LogConfig holds Log related config
type LogConfig struct {
AccessLogs bool
EnableStaticLogs bool // log static files requests default: false
AccessLogsFormat string // access log format: JSON_FORMAT, APACHE_FORMAT or empty string
FileLineNum bool
Outputs map[string]string // Store Adaptor : config
}
var (
// BConfig is the default config for Application
BConfig *Config
// AppConfig is the instance of Config, store the config information from file
AppConfig *beegoAppConfig
// AppPath is the absolute path to the app
AppPath string
// GlobalSessions is the instance for the session manager
GlobalSessions *session.Manager
// appConfigPath is the path to the config files
appConfigPath string
// appConfigProvider is the provider for the config, default is ini
appConfigProvider = "ini"
// WorkPath is the absolute path to project root directory
WorkPath string
)
func init() {
BConfig = newBConfig()
var err error
if AppPath, err = filepath.Abs(filepath.Dir(os.Args[0])); err != nil {
panic(err)
}
WorkPath, err = os.Getwd()
if err != nil {
panic(err)
}
var filename = "app.conf"
if os.Getenv("BEEGO_RUNMODE") != "" {
filename = os.Getenv("BEEGO_RUNMODE") + ".app.conf"
}
appConfigPath = filepath.Join(WorkPath, "conf", filename)
if configPath := os.Getenv("BEEGO_CONFIG_PATH"); configPath != "" {
appConfigPath = configPath
}
if !utils.FileExists(appConfigPath) {
appConfigPath = filepath.Join(AppPath, "conf", filename)
if !utils.FileExists(appConfigPath) {
AppConfig = &beegoAppConfig{innerConfig: config.NewFakeConfig()}
return
}
}
if err = parseConfig(appConfigPath); err != nil {
panic(err)
}
}
func recoverPanic(ctx *context.Context) {
if err := recover(); err != nil {
if err == ErrAbort {
return
}
if !BConfig.RecoverPanic {
panic(err)
}
if BConfig.EnableErrorsShow {
if _, ok := ErrorMaps[fmt.Sprint(err)]; ok {
exception(fmt.Sprint(err), ctx)
return
}
}
var stack string
logs.Critical("the request url is ", ctx.Input.URL())
logs.Critical("Handler crashed with error", err)
for i := 1; ; i++ {
_, file, line, ok := runtime.Caller(i)
if !ok {
break
}
logs.Critical(fmt.Sprintf("%s:%d", file, line))
stack = stack + fmt.Sprintln(fmt.Sprintf("%s:%d", file, line))
}
if BConfig.RunMode == DEV && BConfig.EnableErrorsRender {
showErr(err, ctx, stack)
}
if ctx.Output.Status != 0 {
ctx.ResponseWriter.WriteHeader(ctx.Output.Status)
} else {
ctx.ResponseWriter.WriteHeader(500)
}
}
}
func newBConfig() *Config {
return &Config{
AppName: "beego",
RunMode: PROD,
RouterCaseSensitive: true,
ServerName: "beegoServer:" + VERSION,
RecoverPanic: true,
RecoverFunc: recoverPanic,
CopyRequestBody: false,
EnableGzip: false,
MaxMemory: 1 << 26, // 64MB
EnableErrorsShow: true,
EnableErrorsRender: true,
Listen: Listen{
Graceful: false,
ServerTimeOut: 0,
ListenTCP4: false,
EnableHTTP: true,
AutoTLS: false,
Domains: []string{},
TLSCacheDir: ".",
HTTPAddr: "",
HTTPPort: 8080,
EnableHTTPS: false,
HTTPSAddr: "",
HTTPSPort: 10443,
HTTPSCertFile: "",
HTTPSKeyFile: "",
EnableAdmin: false,
AdminAddr: "",
AdminPort: 8088,
EnableFcgi: false,
EnableStdIo: false,
ClientAuth: tls.RequireAndVerifyClientCert,
},
WebConfig: WebConfig{
AutoRender: true,
EnableDocs: false,
FlashName: "BEEGO_FLASH",
FlashSeparator: "BEEGOFLASH",
DirectoryIndex: false,
StaticDir: map[string]string{"/static": "static"},
StaticExtensionsToGzip: []string{".css", ".js"},
StaticCacheFileSize: 1024 * 100,
StaticCacheFileNum: 1000,
TemplateLeft: "{{",
TemplateRight: "}}",
ViewsPath: "views",
EnableXSRF: false,
XSRFKey: "beegoxsrf",
XSRFExpire: 0,
XSRFSecure: false,
XSRFHttpOnly: false,
Session: SessionConfig{
SessionOn: false,
SessionProvider: "memory",
SessionName: "beegosessionID",
SessionGCMaxLifetime: 3600,
SessionProviderConfig: "",
SessionDisableHTTPOnly: false,
SessionCookieLifeTime: 0, // set cookie default is the browser life
SessionAutoSetCookie: true,
SessionDomain: "",
SessionEnableSidInHTTPHeader: false, // enable store/get the sessionId into/from http headers
SessionNameInHTTPHeader: "Beegosessionid",
SessionEnableSidInURLQuery: false, // enable get the sessionId from Url Query params
SessionCookieSameSite: http.SameSiteDefaultMode,
},
},
Log: LogConfig{
AccessLogs: false,
EnableStaticLogs: false,
AccessLogsFormat: "APACHE_FORMAT",
FileLineNum: true,
Outputs: map[string]string{"console": ""},
},
}
}
// now only support ini, next will support json.
func parseConfig(appConfigPath string) (err error) {
AppConfig, err = newAppConfig(appConfigProvider, appConfigPath)
if err != nil {
return err
}
return assignConfig(AppConfig)
}
func assignConfig(ac config.Configer) error {
for _, i := range []interface{}{BConfig, &BConfig.Listen, &BConfig.WebConfig, &BConfig.Log, &BConfig.WebConfig.Session} {
assignSingleConfig(i, ac)
}
// set the run mode first
if envRunMode := os.Getenv("BEEGO_RUNMODE"); envRunMode != "" {
BConfig.RunMode = envRunMode
} else if runMode := ac.String("RunMode"); runMode != "" {
BConfig.RunMode = runMode
}
if sd := ac.String("StaticDir"); sd != "" {
BConfig.WebConfig.StaticDir = map[string]string{}
sds := strings.Fields(sd)
for _, v := range sds {
if url2fsmap := strings.SplitN(v, ":", 2); len(url2fsmap) == 2 {
BConfig.WebConfig.StaticDir["/"+strings.Trim(url2fsmap[0], "/")] = url2fsmap[1]
} else {
BConfig.WebConfig.StaticDir["/"+strings.Trim(url2fsmap[0], "/")] = url2fsmap[0]
}
}
}
if sgz := ac.String("StaticExtensionsToGzip"); sgz != "" {
extensions := strings.Split(sgz, ",")
fileExts := []string{}
for _, ext := range extensions {
ext = strings.TrimSpace(ext)
if ext == "" {
continue
}
if !strings.HasPrefix(ext, ".") {
ext = "." + ext
}
fileExts = append(fileExts, ext)
}
if len(fileExts) > 0 {
BConfig.WebConfig.StaticExtensionsToGzip = fileExts
}
}
if sfs, err := ac.Int("StaticCacheFileSize"); err == nil {
BConfig.WebConfig.StaticCacheFileSize = sfs
}
if sfn, err := ac.Int("StaticCacheFileNum"); err == nil {
BConfig.WebConfig.StaticCacheFileNum = sfn
}
if lo := ac.String("LogOutputs"); lo != "" {
// if lo is not nil or empty
// means user has set his own LogOutputs
// clear the default setting to BConfig.Log.Outputs
BConfig.Log.Outputs = make(map[string]string)
los := strings.Split(lo, ";")
for _, v := range los {
if logType2Config := strings.SplitN(v, ",", 2); len(logType2Config) == 2 {
BConfig.Log.Outputs[logType2Config[0]] = logType2Config[1]
} else {
continue
}
}
}
// init log
logs.Reset()
for adaptor, config := range BConfig.Log.Outputs {
err := logs.SetLogger(adaptor, config)
if err != nil {
fmt.Fprintln(os.Stderr, fmt.Sprintf("%s with the config %q got err:%s", adaptor, config, err.Error()))
}
}
logs.SetLogFuncCall(BConfig.Log.FileLineNum)
return nil
}
func assignSingleConfig(p interface{}, ac config.Configer) {
pt := reflect.TypeOf(p)
if pt.Kind() != reflect.Ptr {
return
}
pt = pt.Elem()
if pt.Kind() != reflect.Struct {
return
}
pv := reflect.ValueOf(p).Elem()
for i := 0; i < pt.NumField(); i++ {
pf := pv.Field(i)
if !pf.CanSet() {
continue
}
name := pt.Field(i).Name
switch pf.Kind() {
case reflect.String:
pf.SetString(ac.DefaultString(name, pf.String()))
case reflect.Int, reflect.Int64:
pf.SetInt(ac.DefaultInt64(name, pf.Int()))
case reflect.Bool:
pf.SetBool(ac.DefaultBool(name, pf.Bool()))
case reflect.Struct:
default:
// do nothing here
}
}
}
// LoadAppConfig allow developer to apply a config file
func LoadAppConfig(adapterName, configPath string) error {
absConfigPath, err := filepath.Abs(configPath)
if err != nil {
return err
}
if !utils.FileExists(absConfigPath) {
return fmt.Errorf("the target config file: %s don't exist", configPath)
}
appConfigPath = absConfigPath
appConfigProvider = adapterName
return parseConfig(appConfigPath)
}
type beegoAppConfig struct {
innerConfig config.Configer
}
func newAppConfig(appConfigProvider, appConfigPath string) (*beegoAppConfig, error) {
ac, err := config.NewConfig(appConfigProvider, appConfigPath)
if err != nil {
return nil, err
}
return &beegoAppConfig{ac}, nil
}
func (b *beegoAppConfig) Set(key, val string) error {
if err := b.innerConfig.Set(BConfig.RunMode+"::"+key, val); err != nil {
return b.innerConfig.Set(key, val)
}
return nil
}
func (b *beegoAppConfig) String(key string) string {
if v := b.innerConfig.String(BConfig.RunMode + "::" + key); v != "" {
return v
}
return b.innerConfig.String(key)
}
func (b *beegoAppConfig) Strings(key string) []string {
if v := b.innerConfig.Strings(BConfig.RunMode + "::" + key); len(v) > 0 {
return v
}
return b.innerConfig.Strings(key)
}
func (b *beegoAppConfig) Int(key string) (int, error) {
if v, err := b.innerConfig.Int(BConfig.RunMode + "::" + key); err == nil {
return v, nil
}
return b.innerConfig.Int(key)
}
func (b *beegoAppConfig) Int64(key string) (int64, error) {
if v, err := b.innerConfig.Int64(BConfig.RunMode + "::" + key); err == nil {
return v, nil
}
return b.innerConfig.Int64(key)
}
func (b *beegoAppConfig) Bool(key string) (bool, error) {
if v, err := b.innerConfig.Bool(BConfig.RunMode + "::" + key); err == nil {
return v, nil
}
return b.innerConfig.Bool(key)
}
func (b *beegoAppConfig) Float(key string) (float64, error) {
if v, err := b.innerConfig.Float(BConfig.RunMode + "::" + key); err == nil {
return v, nil
}
return b.innerConfig.Float(key)
}
func (b *beegoAppConfig) DefaultString(key string, defaultVal string) string {
if v := b.String(key); v != "" {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultStrings(key string, defaultVal []string) []string {
if v := b.Strings(key); len(v) != 0 {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultInt(key string, defaultVal int) int {
if v, err := b.Int(key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultInt64(key string, defaultVal int64) int64 {
if v, err := b.Int64(key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultBool(key string, defaultVal bool) bool {
if v, err := b.Bool(key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultFloat(key string, defaultVal float64) float64 {
if v, err := b.Float(key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DIY(key string) (interface{}, error) {
return b.innerConfig.DIY(key)
}
func (b *beegoAppConfig) GetSection(section string) (map[string]string, error) {
return b.innerConfig.GetSection(section)
}
func (b *beegoAppConfig) SaveConfigFile(filename string) error {
return b.innerConfig.SaveConfigFile(filename)
}
| [
"\"BEEGO_RUNMODE\"",
"\"BEEGO_RUNMODE\"",
"\"BEEGO_CONFIG_PATH\"",
"\"BEEGO_RUNMODE\""
]
| []
| [
"BEEGO_CONFIG_PATH",
"BEEGO_RUNMODE"
]
| [] | ["BEEGO_CONFIG_PATH", "BEEGO_RUNMODE"] | go | 2 | 0 | |
providers/facebook/facebook_test.go | package facebook_test
import (
"fmt"
"os"
"testing"
"github.com/viddsee/goth"
"github.com/viddsee/goth/providers/facebook"
"github.com/stretchr/testify/assert"
)
func Test_New(t *testing.T) {
t.Parallel()
a := assert.New(t)
provider := facebookProvider()
a.Equal(provider.ClientKey, os.Getenv("FACEBOOK_KEY"))
a.Equal(provider.Secret, os.Getenv("FACEBOOK_SECRET"))
a.Equal(provider.CallbackURL, "/foo")
}
func Test_Implements_Provider(t *testing.T) {
t.Parallel()
a := assert.New(t)
a.Implements((*goth.Provider)(nil), facebookProvider())
}
func Test_BeginAuth(t *testing.T) {
t.Parallel()
a := assert.New(t)
provider := facebookProvider()
session, err := provider.BeginAuth("test_state")
s := session.(*facebook.Session)
a.NoError(err)
a.Contains(s.AuthURL, "facebook.com/dialog/oauth")
a.Contains(s.AuthURL, fmt.Sprintf("client_id=%s", os.Getenv("FACEBOOK_KEY")))
a.Contains(s.AuthURL, "state=test_state")
a.Contains(s.AuthURL, "scope=email")
}
func Test_SessionFromJSON(t *testing.T) {
t.Parallel()
a := assert.New(t)
provider := facebookProvider()
s, err := provider.UnmarshalSession(`{"AuthURL":"http://facebook.com/auth_url","AccessToken":"1234567890"}`)
a.NoError(err)
session := s.(*facebook.Session)
a.Equal(session.AuthURL, "http://facebook.com/auth_url")
a.Equal(session.AccessToken, "1234567890")
}
func facebookProvider() *facebook.Provider {
return facebook.New(os.Getenv("FACEBOOK_KEY"), os.Getenv("FACEBOOK_SECRET"), "/foo", "email")
}
| [
"\"FACEBOOK_KEY\"",
"\"FACEBOOK_SECRET\"",
"\"FACEBOOK_KEY\"",
"\"FACEBOOK_KEY\"",
"\"FACEBOOK_SECRET\""
]
| []
| [
"FACEBOOK_KEY",
"FACEBOOK_SECRET"
]
| [] | ["FACEBOOK_KEY", "FACEBOOK_SECRET"] | go | 2 | 0 | |
vendor/google.golang.org/api/google-api-go-generator/gen.go | // Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"go/format"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"unicode"
"google.golang.org/api/google-api-go-generator/internal/disco"
)
const (
googleDiscoveryURL = "https://www.googleapis.com/discovery/v1/apis"
generatorVersion = "2018018"
)
var (
apiToGenerate = flag.String("api", "*", "The API ID to generate, like 'tasks:v1'. A value of '*' means all.")
useCache = flag.Bool("cache", true, "Use cache of discovered Google API discovery documents.")
genDir = flag.String("gendir", "", "Directory to use to write out generated Go files")
build = flag.Bool("build", false, "Compile generated packages.")
install = flag.Bool("install", false, "Install generated packages.")
apisURL = flag.String("discoveryurl", googleDiscoveryURL, "URL to root discovery document")
publicOnly = flag.Bool("publiconly", true, "Only build public, released APIs. Only applicable for Google employees.")
jsonFile = flag.String("api_json_file", "", "If non-empty, the path to a local file on disk containing the API to generate. Exclusive with setting --api.")
output = flag.String("output", "", "(optional) Path to source output file. If not specified, the API name and version are used to construct an output path (e.g. tasks/v1).")
apiPackageBase = flag.String("api_pkg_base", "google.golang.org/api", "Go package prefix to use for all generated APIs.")
baseURL = flag.String("base_url", "", "(optional) Override the default service API URL. If empty, the service's root URL will be used.")
headerPath = flag.String("header_path", "", "If non-empty, prepend the contents of this file to generated services.")
gensupportPkg = flag.String("gensupport_pkg", "google.golang.org/api/gensupport", "Go package path of the 'api/gensupport' support package.")
googleapiPkg = flag.String("googleapi_pkg", "google.golang.org/api/googleapi", "Go package path of the 'api/googleapi' support package.")
serviceTypes = []string{"Service", "APIService"}
)
// API represents an API to generate, as well as its state while it's
// generating.
type API struct {
// Fields needed before generating code, to select and find the APIs
// to generate.
// These fields usually come from the "directory item" JSON objects
// that are provided by the googleDiscoveryURL. We unmarshal a directory
// item directly into this struct.
ID string `json:"id"`
Name string `json:"name"`
Version string `json:"version"`
DiscoveryLink string `json:"discoveryRestUrl"` // absolute
doc *disco.Document
// TODO(jba): remove m when we've fully converted to using disco.
m map[string]interface{}
forceJSON []byte // if non-nil, the JSON schema file. else fetched.
usedNames namePool
schemas map[string]*Schema // apiName -> schema
responseTypes map[string]bool
p func(format string, args ...interface{}) // print raw
pn func(format string, args ...interface{}) // print with newline
}
func (a *API) sortedSchemaNames() (names []string) {
for name := range a.schemas {
names = append(names, name)
}
sort.Strings(names)
return
}
func (a *API) Schema(name string) *Schema {
return a.schemas[name]
}
type generateError struct {
api *API
error error
}
func (e *generateError) Error() string {
return fmt.Sprintf("API %s failed to generate code: %v", e.api.ID, e.error)
}
type compileError struct {
api *API
output string
}
func (e *compileError) Error() string {
return fmt.Sprintf("API %s failed to compile:\n%v", e.api.ID, e.output)
}
func main() {
flag.Parse()
if *install {
*build = true
}
var (
apiIds = []string{}
matches = []*API{}
errors = []error{}
)
for _, api := range getAPIs() {
apiIds = append(apiIds, api.ID)
if !api.want() {
continue
}
matches = append(matches, api)
log.Printf("Generating API %s", api.ID)
err := api.WriteGeneratedCode()
if err != nil && err != errNoDoc {
errors = append(errors, &generateError{api, err})
continue
}
if *build && err == nil {
var args []string
if *install {
args = append(args, "install")
} else {
args = append(args, "build")
}
args = append(args, api.Target())
out, err := exec.Command("go", args...).CombinedOutput()
if err != nil {
errors = append(errors, &compileError{api, string(out)})
}
}
}
if len(matches) == 0 {
log.Fatalf("No APIs matched %q; options are %v", *apiToGenerate, apiIds)
}
if len(errors) > 0 {
log.Printf("%d API(s) failed to generate or compile:", len(errors))
for _, ce := range errors {
log.Printf(ce.Error())
}
os.Exit(1)
}
}
func (a *API) want() bool {
if *jsonFile != "" {
// Return true early, before calling a.JSONFile()
// which will require a GOPATH be set. This is for
// integration with Google's build system genrules
// where there is no GOPATH.
return true
}
// Skip this API if we're in cached mode and the files don't exist on disk.
if *useCache {
if _, err := os.Stat(a.JSONFile()); os.IsNotExist(err) {
return false
}
}
return *apiToGenerate == "*" || *apiToGenerate == a.ID
}
func getAPIs() []*API {
if *jsonFile != "" {
return getAPIsFromFile()
}
var bytes []byte
var source string
apiListFile := filepath.Join(genDirRoot(), "api-list.json")
if *useCache {
if !*publicOnly {
log.Fatalf("-cache=true not compatible with -publiconly=false")
}
var err error
bytes, err = ioutil.ReadFile(apiListFile)
if err != nil {
log.Fatal(err)
}
source = apiListFile
} else {
bytes = slurpURL(*apisURL)
if *publicOnly {
if err := writeFile(apiListFile, bytes); err != nil {
log.Fatal(err)
}
}
source = *apisURL
}
apis, err := unmarshalAPIs(bytes)
if err != nil {
log.Fatalf("error decoding JSON in %s: %v", source, err)
}
if !*publicOnly && *apiToGenerate != "*" {
apis = append(apis, apiFromID(*apiToGenerate))
}
return apis
}
func unmarshalAPIs(bytes []byte) ([]*API, error) {
var itemObj struct{ Items []*API }
if err := json.Unmarshal(bytes, &itemObj); err != nil {
return nil, err
}
return itemObj.Items, nil
}
func apiFromID(apiID string) *API {
parts := strings.Split(apiID, ":")
if len(parts) != 2 {
log.Fatalf("malformed API name: %q", apiID)
}
return &API{
ID: apiID,
Name: parts[0],
Version: parts[1],
}
}
// getAPIsFromFile handles the case of generating exactly one API
// from the flag given in --api_json_file
func getAPIsFromFile() []*API {
if *apiToGenerate != "*" {
log.Fatalf("Can't set --api with --api_json_file.")
}
if !*publicOnly {
log.Fatalf("Can't set --publiconly with --api_json_file.")
}
a, err := apiFromFile(*jsonFile)
if err != nil {
log.Fatal(err)
}
return []*API{a}
}
func apiFromFile(file string) (*API, error) {
jsonBytes, err := ioutil.ReadFile(file)
if err != nil {
return nil, fmt.Errorf("Error reading %s: %v", file, err)
}
doc, err := disco.NewDocument(jsonBytes)
if err != nil {
return nil, fmt.Errorf("reading document from %q: %v", file, err)
}
a := &API{
ID: doc.ID,
Name: doc.Name,
Version: doc.Version,
forceJSON: jsonBytes,
doc: doc,
}
return a, nil
}
func writeFile(file string, contents []byte) error {
// Don't write it if the contents are identical.
existing, err := ioutil.ReadFile(file)
if err == nil && (bytes.Equal(existing, contents) || basicallyEqual(existing, contents)) {
return nil
}
outdir := filepath.Dir(file)
if err = os.MkdirAll(outdir, 0755); err != nil {
return fmt.Errorf("failed to Mkdir %s: %v", outdir, err)
}
return ioutil.WriteFile(file, contents, 0644)
}
var ignoreLines = regexp.MustCompile(`(?m)^\s+"(?:etag|revision)": ".+\n`)
// basicallyEqual reports whether a and b are equal except for boring
// differences like ETag updates.
func basicallyEqual(a, b []byte) bool {
return ignoreLines.Match(a) && ignoreLines.Match(b) &&
bytes.Equal(ignoreLines.ReplaceAll(a, nil), ignoreLines.ReplaceAll(b, nil))
}
func slurpURL(urlStr string) []byte {
if *useCache {
log.Fatalf("Invalid use of slurpURL in cached mode for URL %s", urlStr)
}
req, err := http.NewRequest("GET", urlStr, nil)
if err != nil {
log.Fatal(err)
}
if *publicOnly {
req.Header.Add("X-User-IP", "0.0.0.0") // hack
}
res, err := http.DefaultClient.Do(req)
if err != nil {
log.Fatalf("Error fetching URL %s: %v", urlStr, err)
}
if res.StatusCode >= 300 {
log.Printf("WARNING: URL %s served status code %d", urlStr, res.StatusCode)
return nil
}
bs, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Fatalf("Error reading body of URL %s: %v", urlStr, err)
}
return bs
}
func panicf(format string, args ...interface{}) {
panic(fmt.Sprintf(format, args...))
}
// namePool keeps track of used names and assigns free ones based on a
// preferred name
type namePool struct {
m map[string]bool // lazily initialized
}
// oddVersionRE matches unusual API names like directory_v1.
var oddVersionRE = regexp.MustCompile(`^(.+)_(v[\d\.]+)$`)
// renameVersion conditionally rewrites the provided version such
// that the final path component of the import path doesn't look
// like a Go identifier. This keeps the consistency that import paths
// for the generated Go packages look like:
// google.golang.org/api/NAME/v<version>
// and have package NAME.
// See https://github.com/google/google-api-go-client/issues/78
func renameVersion(version string) string {
if version == "alpha" || version == "beta" {
return "v0." + version
}
if m := oddVersionRE.FindStringSubmatch(version); m != nil {
return m[1] + "/" + m[2]
}
return version
}
func (p *namePool) Get(preferred string) string {
if p.m == nil {
p.m = make(map[string]bool)
}
name := preferred
tries := 0
for p.m[name] {
tries++
name = fmt.Sprintf("%s%d", preferred, tries)
}
p.m[name] = true
return name
}
func genDirRoot() string {
if *genDir != "" {
return *genDir
}
paths := filepath.SplitList(os.Getenv("GOPATH"))
if len(paths) == 0 {
log.Fatalf("No GOPATH set.")
}
return filepath.Join(paths[0], "src", "google.golang.org", "api")
}
func (a *API) SourceDir() string {
return filepath.Join(genDirRoot(), a.Package(), renameVersion(a.Version))
}
func (a *API) DiscoveryURL() string {
if a.DiscoveryLink == "" {
log.Fatalf("API %s has no DiscoveryLink", a.ID)
}
return a.DiscoveryLink
}
func (a *API) Package() string {
return strings.ToLower(a.Name)
}
func (a *API) Target() string {
return fmt.Sprintf("%s/%s/%s", *apiPackageBase, a.Package(), renameVersion(a.Version))
}
// ServiceType returns the name of the type to use for the root API struct
// (typically "Service").
func (a *API) ServiceType() string {
switch a.Name {
case "appengine", "content": // retained for historical compatibility.
return "APIService"
default:
for _, t := range serviceTypes {
if _, ok := a.schemas[t]; !ok {
return t
}
}
panic("all service types are used, please consider introducing a new type to serviceTypes.")
}
}
// GetName returns a free top-level function/type identifier in the package.
// It tries to return your preferred match if it's free.
func (a *API) GetName(preferred string) string {
return a.usedNames.Get(preferred)
}
func (a *API) apiBaseURL() string {
var base, rel string
switch {
case *baseURL != "":
base, rel = *baseURL, a.doc.BasePath
case a.doc.RootURL != "":
base, rel = a.doc.RootURL, a.doc.ServicePath
default:
base, rel = *apisURL, a.doc.BasePath
}
return resolveRelative(base, rel)
}
func (a *API) needsDataWrapper() bool {
for _, feature := range a.doc.Features {
if feature == "dataWrapper" {
return true
}
}
return false
}
func (a *API) jsonBytes() []byte {
if a.forceJSON == nil {
var slurp []byte
var err error
if *useCache {
slurp, err = ioutil.ReadFile(a.JSONFile())
if err != nil {
log.Fatal(err)
}
} else {
slurp = slurpURL(a.DiscoveryURL())
if slurp != nil {
// Make sure that keys are sorted by re-marshalling.
d := make(map[string]interface{})
json.Unmarshal(slurp, &d)
if err != nil {
log.Fatal(err)
}
var err error
slurp, err = json.MarshalIndent(d, "", " ")
if err != nil {
log.Fatal(err)
}
}
}
a.forceJSON = slurp
}
return a.forceJSON
}
func (a *API) JSONFile() string {
return filepath.Join(a.SourceDir(), a.Package()+"-api.json")
}
var errNoDoc = errors.New("could not read discovery doc")
// WriteGeneratedCode generates code for a.
// It returns errNoDoc if we couldn't read the discovery doc.
func (a *API) WriteGeneratedCode() error {
genfilename := *output
jsonBytes := a.jsonBytes()
// Skip generation if we don't have the discovery doc.
if jsonBytes == nil {
// No message here, because slurpURL printed one.
return errNoDoc
}
if genfilename == "" {
if err := writeFile(a.JSONFile(), jsonBytes); err != nil {
return err
}
outdir := a.SourceDir()
err := os.MkdirAll(outdir, 0755)
if err != nil {
return fmt.Errorf("failed to Mkdir %s: %v", outdir, err)
}
pkg := a.Package()
genfilename = filepath.Join(outdir, pkg+"-gen.go")
}
code, err := a.GenerateCode()
errw := writeFile(genfilename, code)
if err == nil {
err = errw
}
if err != nil {
return err
}
return nil
}
var docsLink string
func (a *API) GenerateCode() ([]byte, error) {
pkg := a.Package()
jsonBytes := a.jsonBytes()
var err error
if a.doc == nil {
a.doc, err = disco.NewDocument(jsonBytes)
if err != nil {
return nil, err
}
}
// Buffer the output in memory, for gofmt'ing later.
var buf bytes.Buffer
a.p = func(format string, args ...interface{}) {
_, err := fmt.Fprintf(&buf, format, args...)
if err != nil {
panic(err)
}
}
a.pn = func(format string, args ...interface{}) {
a.p(format+"\n", args...)
}
wf := func(path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(&buf, f)
return err
}
p, pn := a.p, a.pn
if *headerPath != "" {
if err := wf(*headerPath); err != nil {
return nil, err
}
}
pn("// Package %s provides access to the %s.", pkg, a.doc.Title)
if r := replacementPackage[pkg]; r != "" {
pn("//")
pn("// This package is DEPRECATED. Use package %s instead.", r)
}
docsLink = a.doc.DocumentationLink
if docsLink != "" {
pn("//")
pn("// See %s", docsLink)
}
pn("//\n// Usage example:")
pn("//")
pn("// import %q", a.Target())
pn("// ...")
pn("// %sService, err := %s.New(oauthHttpClient)", pkg, pkg)
pn("package %s // import %q", pkg, a.Target())
p("\n")
pn("import (")
for _, imp := range []string{
"bytes",
"context",
"encoding/json",
"errors",
"fmt",
"io",
"net/http",
"net/url",
"strconv",
"strings",
} {
pn(" %q", imp)
}
pn("")
for _, imp := range []struct {
pkg string
lname string
}{
{*gensupportPkg, "gensupport"},
{*googleapiPkg, "googleapi"},
} {
pn(" %s %q", imp.lname, imp.pkg)
}
pn(")")
pn("\n// Always reference these packages, just in case the auto-generated code")
pn("// below doesn't.")
pn("var _ = bytes.NewBuffer")
pn("var _ = strconv.Itoa")
pn("var _ = fmt.Sprintf")
pn("var _ = json.NewDecoder")
pn("var _ = io.Copy")
pn("var _ = url.Parse")
pn("var _ = gensupport.MarshalJSON")
pn("var _ = googleapi.Version")
pn("var _ = errors.New")
pn("var _ = strings.Replace")
pn("var _ = context.Canceled")
pn("")
pn("const apiId = %q", a.doc.ID)
pn("const apiName = %q", a.doc.Name)
pn("const apiVersion = %q", a.doc.Version)
pn("const basePath = %q", a.apiBaseURL())
a.generateScopeConstants()
a.PopulateSchemas()
service := a.ServiceType()
// Reserve names (ignore return value; we're the first caller).
a.GetName("New")
a.GetName(service)
pn("func New(client *http.Client) (*%s, error) {", service)
pn("if client == nil { return nil, errors.New(\"client is nil\") }")
pn("s := &%s{client: client, BasePath: basePath}", service)
for _, res := range a.doc.Resources { // add top level resources.
pn("s.%s = New%s(s)", resourceGoField(res, nil), resourceGoType(res))
}
pn("return s, nil")
pn("}")
pn("\ntype %s struct {", service)
pn(" client *http.Client")
pn(" BasePath string // API endpoint base URL")
pn(" UserAgent string // optional additional User-Agent fragment")
for _, res := range a.doc.Resources {
pn("\n\t%s\t*%s", resourceGoField(res, nil), resourceGoType(res))
}
pn("}")
pn("\nfunc (s *%s) userAgent() string {", service)
pn(` if s.UserAgent == "" { return googleapi.UserAgent }`)
pn(` return googleapi.UserAgent + " " + s.UserAgent`)
pn("}\n")
for _, res := range a.doc.Resources {
a.generateResource(res)
}
a.responseTypes = make(map[string]bool)
for _, meth := range a.APIMethods() {
meth.cacheResponseTypes(a)
}
for _, res := range a.doc.Resources {
a.cacheResourceResponseTypes(res)
}
for _, name := range a.sortedSchemaNames() {
a.schemas[name].writeSchemaCode(a)
}
for _, meth := range a.APIMethods() {
meth.generateCode()
}
for _, res := range a.doc.Resources {
a.generateResourceMethods(res)
}
clean, err := format.Source(buf.Bytes())
if err != nil {
return buf.Bytes(), err
}
return clean, nil
}
func (a *API) generateScopeConstants() {
scopes := a.doc.Auth.OAuth2Scopes
if len(scopes) == 0 {
return
}
a.pn("// OAuth2 scopes used by this API.")
a.pn("const (")
n := 0
for _, scope := range scopes {
if n > 0 {
a.p("\n")
}
n++
ident := scopeIdentifierFromURL(scope.URL)
if scope.Description != "" {
a.p("%s", asComment("\t", scope.Description))
}
a.pn("\t%s = %q", ident, scope.URL)
}
a.p(")\n\n")
}
func scopeIdentifierFromURL(urlStr string) string {
const prefix = "https://www.googleapis.com/auth/"
if !strings.HasPrefix(urlStr, prefix) {
const https = "https://"
if !strings.HasPrefix(urlStr, https) {
log.Fatalf("Unexpected oauth2 scope %q doesn't start with %q", urlStr, https)
}
ident := validGoIdentifer(depunct(urlStr[len(https):], true)) + "Scope"
return ident
}
ident := validGoIdentifer(initialCap(urlStr[len(prefix):])) + "Scope"
return ident
}
// Schema is a disco.Schema that has been bestowed an identifier, whether by
// having an "id" field at the top of the schema or with an
// automatically generated one in populateSubSchemas.
//
// TODO: While sub-types shouldn't need to be promoted to schemas,
// API.GenerateCode iterates over API.schemas to figure out what
// top-level Go types to write. These should be separate concerns.
type Schema struct {
api *API
typ *disco.Schema
apiName string // the native API-defined name of this type
goName string // lazily populated by GoName
goReturnType string // lazily populated by GoReturnType
props []*Property
}
type Property struct {
s *Schema // the containing Schema
p *disco.Property
assignedGoName string
}
func (p *Property) Type() *disco.Schema {
return p.p.Schema
}
func (p *Property) GoName() string {
return initialCap(p.p.Name)
}
func (p *Property) Default() string {
return p.p.Schema.Default
}
func (p *Property) Description() string {
return p.p.Schema.Description
}
func (p *Property) Enum() ([]string, bool) {
typ := p.p.Schema
if typ.Enums != nil {
return typ.Enums, true
}
// Check if this has an array of string enums.
if typ.ItemSchema != nil {
if enums := typ.ItemSchema.Enums; enums != nil && typ.ItemSchema.Type == "string" {
return enums, true
}
}
return nil, false
}
func (p *Property) EnumDescriptions() []string {
if desc := p.p.Schema.EnumDescriptions; desc != nil {
return desc
}
// Check if this has an array of string enum descriptions.
if items := p.p.Schema.ItemSchema; items != nil {
if desc := items.EnumDescriptions; desc != nil {
return desc
}
}
return nil
}
func (p *Property) Pattern() (string, bool) {
return p.p.Schema.Pattern, (p.p.Schema.Pattern != "")
}
func (p *Property) TypeAsGo() string {
return p.s.api.typeAsGo(p.Type(), false)
}
// A FieldName uniquely identifies a field within a Schema struct for an API.
type fieldName struct {
api string // The ID of an API.
schema string // The Go name of a Schema struct.
field string // The Go name of a field.
}
// pointerFields is a list of fields that should use a pointer type.
// This makes it possible to distinguish between a field being unset vs having
// an empty value.
var pointerFields = []fieldName{
{api: "androidpublisher:v1.1", schema: "InappPurchase", field: "PurchaseType"},
{api: "androidpublisher:v2", schema: "ProductPurchase", field: "PurchaseType"},
{api: "androidpublisher:v3", schema: "ProductPurchase", field: "PurchaseType"},
{api: "androidpublisher:v2", schema: "SubscriptionPurchase", field: "CancelReason"},
{api: "androidpublisher:v2", schema: "SubscriptionPurchase", field: "PaymentState"},
{api: "androidpublisher:v2", schema: "SubscriptionPurchase", field: "PurchaseType"},
{api: "androidpublisher:v3", schema: "SubscriptionPurchase", field: "PurchaseType"},
{api: "cloudmonitoring:v2beta2", schema: "Point", field: "BoolValue"},
{api: "cloudmonitoring:v2beta2", schema: "Point", field: "DoubleValue"},
{api: "cloudmonitoring:v2beta2", schema: "Point", field: "Int64Value"},
{api: "cloudmonitoring:v2beta2", schema: "Point", field: "StringValue"},
{api: "compute:alpha", schema: "Scheduling", field: "AutomaticRestart"},
{api: "compute:beta", schema: "MetadataItems", field: "Value"},
{api: "compute:beta", schema: "Scheduling", field: "AutomaticRestart"},
{api: "compute:v1", schema: "MetadataItems", field: "Value"},
{api: "compute:v1", schema: "Scheduling", field: "AutomaticRestart"},
{api: "content:v2", schema: "AccountUser", field: "Admin"},
{api: "datastore:v1beta2", schema: "Property", field: "BlobKeyValue"},
{api: "datastore:v1beta2", schema: "Property", field: "BlobValue"},
{api: "datastore:v1beta2", schema: "Property", field: "BooleanValue"},
{api: "datastore:v1beta2", schema: "Property", field: "DateTimeValue"},
{api: "datastore:v1beta2", schema: "Property", field: "DoubleValue"},
{api: "datastore:v1beta2", schema: "Property", field: "Indexed"},
{api: "datastore:v1beta2", schema: "Property", field: "IntegerValue"},
{api: "datastore:v1beta2", schema: "Property", field: "StringValue"},
{api: "datastore:v1beta3", schema: "Value", field: "BlobValue"},
{api: "datastore:v1beta3", schema: "Value", field: "BooleanValue"},
{api: "datastore:v1beta3", schema: "Value", field: "DoubleValue"},
{api: "datastore:v1beta3", schema: "Value", field: "IntegerValue"},
{api: "datastore:v1beta3", schema: "Value", field: "StringValue"},
{api: "datastore:v1beta3", schema: "Value", field: "TimestampValue"},
{api: "genomics:v1beta2", schema: "Dataset", field: "IsPublic"},
{api: "monitoring:v3", schema: "TypedValue", field: "BoolValue"},
{api: "monitoring:v3", schema: "TypedValue", field: "DoubleValue"},
{api: "monitoring:v3", schema: "TypedValue", field: "Int64Value"},
{api: "monitoring:v3", schema: "TypedValue", field: "StringValue"},
{api: "servicecontrol:v1", schema: "MetricValue", field: "BoolValue"},
{api: "servicecontrol:v1", schema: "MetricValue", field: "DoubleValue"},
{api: "servicecontrol:v1", schema: "MetricValue", field: "Int64Value"},
{api: "servicecontrol:v1", schema: "MetricValue", field: "StringValue"},
{api: "sqladmin:v1beta4", schema: "Settings", field: "StorageAutoResize"},
{api: "storage:v1", schema: "BucketLifecycleRuleCondition", field: "IsLive"},
{api: "storage:v1beta2", schema: "BucketLifecycleRuleCondition", field: "IsLive"},
{api: "tasks:v1", schema: "Task", field: "Completed"},
{api: "youtube:v3", schema: "ChannelSectionSnippet", field: "Position"},
}
// forcePointerType reports whether p should be represented as a pointer type in its parent schema struct.
func (p *Property) forcePointerType() bool {
if p.UnfortunateDefault() {
return true
}
name := fieldName{api: p.s.api.ID, schema: p.s.GoName(), field: p.GoName()}
for _, pf := range pointerFields {
if pf == name {
return true
}
}
return false
}
// UnfortunateDefault reports whether p may be set to a zero value, but has a non-zero default.
func (p *Property) UnfortunateDefault() bool {
switch p.TypeAsGo() {
default:
return false
case "bool":
return p.Default() == "true"
case "string":
if p.Default() == "" {
return false
}
// String fields are considered to "allow" a zero value if either:
// (a) they are an enum, and one of the permitted enum values is the empty string, or
// (b) they have a validation pattern which matches the empty string.
pattern, hasPat := p.Pattern()
enum, hasEnum := p.Enum()
if hasPat && hasEnum {
log.Printf("Encountered enum property which also has a pattern: %#v", p)
return false // don't know how to handle this, so ignore.
}
return (hasPat && emptyPattern(pattern)) ||
(hasEnum && emptyEnum(enum))
case "float64", "int64", "uint64", "int32", "uint32":
if p.Default() == "" {
return false
}
if f, err := strconv.ParseFloat(p.Default(), 64); err == nil {
return f != 0.0
}
// The default value has an unexpected form. Whatever it is, it's non-zero.
return true
}
}
// emptyPattern reports whether a pattern matches the empty string.
func emptyPattern(pattern string) bool {
if re, err := regexp.Compile(pattern); err == nil {
return re.MatchString("")
}
log.Printf("Encountered bad pattern: %s", pattern)
return false
}
// emptyEnum reports whether a property enum list contains the empty string.
func emptyEnum(enum []string) bool {
for _, val := range enum {
if val == "" {
return true
}
}
return false
}
func (a *API) typeAsGo(s *disco.Schema, elidePointers bool) string {
switch s.Kind {
case disco.SimpleKind:
return mustSimpleTypeConvert(s.Type, s.Format)
case disco.ArrayKind:
as := s.ElementSchema()
if as.Type == "string" {
switch as.Format {
case "int64":
return "googleapi.Int64s"
case "uint64":
return "googleapi.Uint64s"
case "int32":
return "googleapi.Int32s"
case "uint32":
return "googleapi.Uint32s"
case "float64":
return "googleapi.Float64s"
}
}
return "[]" + a.typeAsGo(as, elidePointers)
case disco.ReferenceKind:
rs := s.RefSchema
if rs.Kind == disco.SimpleKind {
// Simple top-level schemas get named types (see writeSchemaCode).
// Use the name instead of using the equivalent simple Go type.
return a.schemaNamed(rs.Name).GoName()
}
return a.typeAsGo(rs, elidePointers)
case disco.MapKind:
es := s.ElementSchema()
if es.Type == "string" {
// If the element schema has a type "string", it's going to be
// transmitted as a string, and the Go map type must reflect that.
// This is true even if the format is, say, "int64". When type =
// "string" and format = "int64" at top level, we can use the json
// "string" tag option to unmarshal the string to an int64, but
// inside a map we can't.
return "map[string]string"
}
// Due to historical baggage (maps used to be a separate code path),
// the element types of maps never have pointers in them. From this
// level down, elide pointers in types.
return "map[string]" + a.typeAsGo(es, true)
case disco.AnyStructKind:
return "googleapi.RawMessage"
case disco.StructKind:
tls := a.schemaNamed(s.Name)
if elidePointers || s.Variant != nil {
return tls.GoName()
}
return "*" + tls.GoName()
default:
panic(fmt.Sprintf("unhandled typeAsGo for %+v", s))
}
}
func (a *API) schemaNamed(name string) *Schema {
s := a.schemas[name]
if s == nil {
panicf("no top-level schema named %q", name)
}
return s
}
func (s *Schema) properties() []*Property {
if s.props != nil {
return s.props
}
if s.typ.Kind != disco.StructKind {
panic("called properties on non-object schema")
}
for _, p := range s.typ.Properties {
s.props = append(s.props, &Property{
s: s,
p: p,
})
}
return s.props
}
func (s *Schema) HasContentType() bool {
for _, p := range s.properties() {
if p.GoName() == "ContentType" && p.TypeAsGo() == "string" {
return true
}
}
return false
}
func (s *Schema) populateSubSchemas() (outerr error) {
defer func() {
r := recover()
if r == nil {
return
}
outerr = fmt.Errorf("%v", r)
}()
addSubStruct := func(subApiName string, t *disco.Schema) {
if s.api.schemas[subApiName] != nil {
panic("dup schema apiName: " + subApiName)
}
if t.Name != "" {
panic("subtype already has name: " + t.Name)
}
t.Name = subApiName
subs := &Schema{
api: s.api,
typ: t,
apiName: subApiName,
}
s.api.schemas[subApiName] = subs
err := subs.populateSubSchemas()
if err != nil {
panicf("in sub-struct %q: %v", subApiName, err)
}
}
switch s.typ.Kind {
case disco.StructKind:
for _, p := range s.properties() {
subApiName := fmt.Sprintf("%s.%s", s.apiName, p.p.Name)
switch p.Type().Kind {
case disco.SimpleKind, disco.ReferenceKind, disco.AnyStructKind:
// Do nothing.
case disco.MapKind:
mt := p.Type().ElementSchema()
if mt.Kind == disco.SimpleKind || mt.Kind == disco.ReferenceKind {
continue
}
addSubStruct(subApiName, mt)
case disco.ArrayKind:
at := p.Type().ElementSchema()
if at.Kind == disco.SimpleKind || at.Kind == disco.ReferenceKind {
continue
}
addSubStruct(subApiName, at)
case disco.StructKind:
addSubStruct(subApiName, p.Type())
default:
panicf("Unknown type for %q: %v", subApiName, p.Type())
}
}
case disco.ArrayKind:
subApiName := fmt.Sprintf("%s.Item", s.apiName)
switch at := s.typ.ElementSchema(); at.Kind {
case disco.SimpleKind, disco.ReferenceKind, disco.AnyStructKind:
// Do nothing.
case disco.MapKind:
mt := at.ElementSchema()
if k := mt.Kind; k != disco.SimpleKind && k != disco.ReferenceKind {
addSubStruct(subApiName, mt)
}
case disco.ArrayKind:
at := at.ElementSchema()
if k := at.Kind; k != disco.SimpleKind && k != disco.ReferenceKind {
addSubStruct(subApiName, at)
}
case disco.StructKind:
addSubStruct(subApiName, at)
default:
panicf("Unknown array type for %q: %v", subApiName, at)
}
case disco.AnyStructKind, disco.MapKind, disco.SimpleKind, disco.ReferenceKind:
// Do nothing.
default:
fmt.Fprintf(os.Stderr, "in populateSubSchemas, schema is: %v", s.typ)
panicf("populateSubSchemas: unsupported type for schema %q", s.apiName)
panic("unreachable")
}
return nil
}
// GoName returns (or creates and returns) the bare Go name
// of the apiName, making sure that it's a proper Go identifier
// and doesn't conflict with an existing name.
func (s *Schema) GoName() string {
if s.goName == "" {
if s.typ.Kind == disco.MapKind {
s.goName = s.api.typeAsGo(s.typ, false)
} else {
base := initialCap(s.apiName)
s.goName = s.api.GetName(base)
if base == "Service" && s.goName != "Service" {
// Detect the case where a resource is going to clash with the
// root service object.
panicf("Clash on name Service")
}
}
}
return s.goName
}
// GoReturnType returns the Go type to use as the return type.
// If a type is a struct, it will return *StructType,
// for a map it will return map[string]ValueType,
// for (not yet supported) slices it will return []ValueType.
func (s *Schema) GoReturnType() string {
if s.goReturnType == "" {
if s.typ.Kind == disco.MapKind {
s.goReturnType = s.GoName()
} else {
s.goReturnType = "*" + s.GoName()
}
}
return s.goReturnType
}
func (s *Schema) writeSchemaCode(api *API) {
switch s.typ.Kind {
case disco.SimpleKind:
apitype := s.typ.Type
typ := mustSimpleTypeConvert(apitype, s.typ.Format)
s.api.pn("\ntype %s %s", s.GoName(), typ)
case disco.StructKind:
s.writeSchemaStruct(api)
case disco.MapKind, disco.AnyStructKind:
// Do nothing.
case disco.ArrayKind:
log.Printf("TODO writeSchemaCode for arrays for %s", s.GoName())
default:
fmt.Fprintf(os.Stderr, "in writeSchemaCode, schema is: %+v", s.typ)
panicf("writeSchemaCode: unsupported type for schema %q", s.apiName)
}
}
func (s *Schema) writeVariant(api *API, v *disco.Variant) {
s.api.p("\ntype %s map[string]interface{}\n\n", s.GoName())
// Write out the "Type" method that identifies the variant type.
s.api.pn("func (t %s) Type() string {", s.GoName())
s.api.pn(" return googleapi.VariantType(t)")
s.api.p("}\n\n")
// Write out helper methods to convert each possible variant.
for _, m := range v.Map {
if m.TypeValue == "" && m.Ref == "" {
log.Printf("TODO variant %s ref %s not yet supported.", m.TypeValue, m.Ref)
continue
}
s.api.pn("func (t %s) %s() (r %s, ok bool) {", s.GoName(), initialCap(m.TypeValue), m.Ref)
s.api.pn(" if t.Type() != %q {", initialCap(m.TypeValue))
s.api.pn(" return r, false")
s.api.pn(" }")
s.api.pn(" ok = googleapi.ConvertVariant(map[string]interface{}(t), &r)")
s.api.pn(" return r, ok")
s.api.p("}\n\n")
}
}
func (s *Schema) Description() string {
return s.typ.Description
}
func (s *Schema) writeSchemaStruct(api *API) {
if v := s.typ.Variant; v != nil {
s.writeVariant(api, v)
return
}
s.api.p("\n")
des := s.Description()
if des != "" {
s.api.p("%s", asComment("", fmt.Sprintf("%s: %s", s.GoName(), des)))
}
s.api.pn("type %s struct {", s.GoName())
np := new(namePool)
forceSendName := np.Get("ForceSendFields")
nullFieldsName := np.Get("NullFields")
if s.isResponseType() {
np.Get("ServerResponse") // reserve the name
}
firstFieldName := "" // used to store a struct field name for use in documentation.
for i, p := range s.properties() {
if i > 0 {
s.api.p("\n")
}
pname := np.Get(p.GoName())
if pname[0] == '@' {
// HACK(cbro): ignore JSON-LD special fields until we can figure out
// the correct Go representation for them.
continue
}
p.assignedGoName = pname
des := p.Description()
if des != "" {
s.api.p("%s", asComment("\t", fmt.Sprintf("%s: %s", pname, des)))
}
addFieldValueComments(s.api.p, p, "\t", des != "")
var extraOpt string
if p.Type().IsIntAsString() {
extraOpt += ",string"
}
typ := p.TypeAsGo()
if p.forcePointerType() {
typ = "*" + typ
}
s.api.pn(" %s %s `json:\"%s,omitempty%s\"`", pname, typ, p.p.Name, extraOpt)
if firstFieldName == "" {
firstFieldName = pname
}
}
if s.isResponseType() {
if firstFieldName != "" {
s.api.p("\n")
}
s.api.p("%s", asComment("\t", "ServerResponse contains the HTTP response code and headers from the server."))
s.api.pn(" googleapi.ServerResponse `json:\"-\"`")
}
if firstFieldName == "" {
// There were no fields in the struct, so there is no point
// adding any custom JSON marshaling code.
s.api.pn("}")
return
}
commentFmtStr := "%s is a list of field names (e.g. %q) to " +
"unconditionally include in API requests. By default, fields " +
"with empty values are omitted from API requests. However, " +
"any non-pointer, non-interface field appearing in %s will " +
"be sent to the server regardless of whether the field is " +
"empty or not. This may be used to include empty fields in " +
"Patch requests."
comment := fmt.Sprintf(commentFmtStr, forceSendName, firstFieldName, forceSendName)
s.api.p("\n")
s.api.p("%s", asComment("\t", comment))
s.api.pn("\t%s []string `json:\"-\"`", forceSendName)
commentFmtStr = "%s is a list of field names (e.g. %q) to " +
"include in API requests with the JSON null value. " +
"By default, fields with empty values are omitted from API requests. However, " +
"any field with an empty value appearing in %s will be sent to the server as null. " +
"It is an error if a field in this list has a non-empty value. This may be used to " +
"include null fields in Patch requests."
comment = fmt.Sprintf(commentFmtStr, nullFieldsName, firstFieldName, nullFieldsName)
s.api.p("\n")
s.api.p("%s", asComment("\t", comment))
s.api.pn("\t%s []string `json:\"-\"`", nullFieldsName)
s.api.pn("}")
s.writeSchemaMarshal(forceSendName, nullFieldsName)
s.writeSchemaUnmarshal()
}
// writeSchemaMarshal writes a custom MarshalJSON function for s, which allows
// fields to be explicitly transmitted by listing them in the field identified
// by forceSendFieldName, and allows fields to be transmitted with the null value
// by listing them in the field identified by nullFieldsName.
func (s *Schema) writeSchemaMarshal(forceSendFieldName, nullFieldsName string) {
s.api.pn("func (s *%s) MarshalJSON() ([]byte, error) {", s.GoName())
s.api.pn("\ttype NoMethod %s", s.GoName())
// pass schema as methodless type to prevent subsequent calls to MarshalJSON from recursing indefinitely.
s.api.pn("\traw := NoMethod(*s)")
s.api.pn("\treturn gensupport.MarshalJSON(raw, s.%s, s.%s)", forceSendFieldName, nullFieldsName)
s.api.pn("}")
}
func (s *Schema) writeSchemaUnmarshal() {
var floatProps []*Property
for _, p := range s.properties() {
if p.p.Schema.Type == "number" {
floatProps = append(floatProps, p)
}
}
if len(floatProps) == 0 {
return
}
pn := s.api.pn
pn("\nfunc (s *%s) UnmarshalJSON(data []byte) error {", s.GoName())
pn(" type NoMethod %s", s.GoName()) // avoid infinite recursion
pn(" var s1 struct {")
// Hide the float64 fields of the schema with fields that correctly
// unmarshal special values.
for _, p := range floatProps {
typ := "gensupport.JSONFloat64"
if p.forcePointerType() {
typ = "*" + typ
}
pn("%s %s `json:\"%s\"`", p.assignedGoName, typ, p.p.Name)
}
pn(" *NoMethod") // embed the schema
pn(" }")
// Set the schema value into the wrapper so its other fields are unmarshaled.
pn(" s1.NoMethod = (*NoMethod)(s)")
pn(" if err := json.Unmarshal(data, &s1); err != nil {")
pn(" return err")
pn(" }")
// Copy each shadowing field into the field it shadows.
for _, p := range floatProps {
n := p.assignedGoName
if p.forcePointerType() {
pn("if s1.%s != nil { s.%s = (*float64)(s1.%s) }", n, n, n)
} else {
pn("s.%s = float64(s1.%s)", n, n)
}
}
pn(" return nil")
pn("}")
}
// isResponseType returns true for all types that are used as a response.
func (s *Schema) isResponseType() bool {
return s.api.responseTypes["*"+s.goName]
}
// PopulateSchemas reads all the API types ("schemas") from the JSON file
// and converts them to *Schema instances, returning an identically
// keyed map, additionally containing subresources. For instance,
//
// A resource "Foo" of type "object" with a property "bar", also of type
// "object" (an anonymous sub-resource), will get a synthetic API name
// of "Foo.bar".
//
// A resource "Foo" of type "array" with an "items" of type "object"
// will get a synthetic API name of "Foo.Item".
func (a *API) PopulateSchemas() {
if a.schemas != nil {
panic("")
}
a.schemas = make(map[string]*Schema)
for name, ds := range a.doc.Schemas {
s := &Schema{
api: a,
apiName: name,
typ: ds,
}
a.schemas[name] = s
err := s.populateSubSchemas()
if err != nil {
panicf("Error populating schema with API name %q: %v", name, err)
}
}
}
func (a *API) generateResource(r *disco.Resource) {
pn := a.pn
t := resourceGoType(r)
pn(fmt.Sprintf("func New%s(s *%s) *%s {", t, a.ServiceType(), t))
pn("rs := &%s{s : s}", t)
for _, res := range r.Resources {
pn("rs.%s = New%s(s)", resourceGoField(res, r), resourceGoType(res))
}
pn("return rs")
pn("}")
pn("\ntype %s struct {", t)
pn(" s *%s", a.ServiceType())
for _, res := range r.Resources {
pn("\n\t%s\t*%s", resourceGoField(res, r), resourceGoType(res))
}
pn("}")
for _, res := range r.Resources {
a.generateResource(res)
}
}
func (a *API) cacheResourceResponseTypes(r *disco.Resource) {
for _, meth := range a.resourceMethods(r) {
meth.cacheResponseTypes(a)
}
for _, res := range r.Resources {
a.cacheResourceResponseTypes(res)
}
}
func (a *API) generateResourceMethods(r *disco.Resource) {
for _, meth := range a.resourceMethods(r) {
meth.generateCode()
}
for _, res := range r.Resources {
a.generateResourceMethods(res)
}
}
func resourceGoField(r, parent *disco.Resource) string {
// Avoid conflicts with method names.
und := ""
if parent != nil {
for _, m := range parent.Methods {
if m.Name == r.Name {
und = "_"
break
}
}
}
// Note: initialCap(r.Name + "_") doesn't work because initialCap calls depunct.
return initialCap(r.Name) + und
}
func resourceGoType(r *disco.Resource) string {
return initialCap(r.FullName + "Service")
}
func (a *API) resourceMethods(r *disco.Resource) []*Method {
ms := []*Method{}
for _, m := range r.Methods {
ms = append(ms, &Method{
api: a,
r: r,
m: m,
})
}
return ms
}
type Method struct {
api *API
r *disco.Resource // or nil if a API-level (top-level) method
m *disco.Method
params []*Param // all Params, of each type, lazily set by first call of Params method.
}
func (m *Method) Id() string {
return m.m.ID
}
func (m *Method) responseType() *Schema {
return m.api.schemas[m.m.Response.RefSchema.Name]
}
func (m *Method) supportsMediaUpload() bool {
return m.m.MediaUpload != nil
}
func (m *Method) mediaUploadPath() string {
return m.m.MediaUpload.Protocols["simple"].Path
}
func (m *Method) supportsMediaDownload() bool {
if m.supportsMediaUpload() {
// storage.objects.insert claims support for download in
// addition to upload but attempting to do so fails.
// This situation doesn't apply to any other methods.
return false
}
return m.m.SupportsMediaDownload
}
func (m *Method) supportsPaging() (*pageTokenGenerator, string, bool) {
ptg := m.pageTokenGenerator()
if ptg == nil {
return nil, "", false
}
// Check that the response type has the next page token.
s := m.responseType()
if s == nil || s.typ.Kind != disco.StructKind {
return nil, "", false
}
for _, prop := range s.properties() {
if isPageTokenName(prop.p.Name) && prop.Type().Type == "string" {
return ptg, prop.GoName(), true
}
}
return nil, "", false
}
type pageTokenGenerator struct {
isParam bool // is the page token a URL parameter?
name string // param or request field name
requestName string // empty for URL param
}
func (p *pageTokenGenerator) genGet() string {
if p.isParam {
return fmt.Sprintf("c.urlParams_.Get(%q)", p.name)
}
return fmt.Sprintf("c.%s.%s", p.requestName, p.name)
}
func (p *pageTokenGenerator) genSet(valueExpr string) string {
if p.isParam {
return fmt.Sprintf("c.%s(%s)", initialCap(p.name), valueExpr)
}
return fmt.Sprintf("c.%s.%s = %s", p.requestName, p.name, valueExpr)
}
func (p *pageTokenGenerator) genDeferBody() string {
if p.isParam {
return p.genSet(p.genGet())
}
return fmt.Sprintf("func (pt string) { %s }(%s)", p.genSet("pt"), p.genGet())
}
// pageTokenGenerator returns a pageTokenGenerator that will generate code to
// get/set the page token for a subsequent page in the context of the generated
// Pages method. It returns nil if there is no page token.
func (m *Method) pageTokenGenerator() *pageTokenGenerator {
matches := m.grepParams(func(p *Param) bool { return isPageTokenName(p.p.Name) })
switch len(matches) {
case 1:
if matches[0].p.Required {
// The page token is a required parameter (e.g. because there is
// a separate API call to start an iteration), and so the relevant
// call factory method takes the page token instead.
return nil
}
n := matches[0].p.Name
return &pageTokenGenerator{true, n, ""}
case 0: // No URL parameter, but maybe a request field.
if m.m.Request == nil {
return nil
}
rs := m.m.Request
if rs.RefSchema != nil {
rs = rs.RefSchema
}
for _, p := range rs.Properties {
if isPageTokenName(p.Name) {
return &pageTokenGenerator{false, initialCap(p.Name), validGoIdentifer(strings.ToLower(rs.Name))}
}
}
return nil
default:
panicf("too many page token parameters for method %s", m.m.Name)
return nil
}
}
func isPageTokenName(s string) bool {
return s == "pageToken" || s == "nextPageToken"
}
func (m *Method) Params() []*Param {
if m.params == nil {
for _, p := range m.m.Parameters {
m.params = append(m.params, &Param{
method: m,
p: p,
})
}
}
return m.params
}
func (m *Method) grepParams(f func(*Param) bool) []*Param {
matches := make([]*Param, 0)
for _, param := range m.Params() {
if f(param) {
matches = append(matches, param)
}
}
return matches
}
func (m *Method) NamedParam(name string) *Param {
matches := m.grepParams(func(p *Param) bool {
return p.p.Name == name
})
if len(matches) < 1 {
log.Panicf("failed to find named parameter %q", name)
}
if len(matches) > 1 {
log.Panicf("found multiple parameters for parameter name %q", name)
}
return matches[0]
}
func (m *Method) OptParams() []*Param {
return m.grepParams(func(p *Param) bool {
return !p.p.Required
})
}
func (meth *Method) cacheResponseTypes(api *API) {
if retType := responseType(api, meth.m); retType != "" && strings.HasPrefix(retType, "*") {
api.responseTypes[retType] = true
}
}
// convertMultiParams builds a []string temp variable from a slice
// of non-strings and returns the name of the temp variable.
func convertMultiParams(a *API, param string) string {
a.pn(" var %v_ []string", param)
a.pn(" for _, v := range %v {", param)
a.pn(" %v_ = append(%v_, fmt.Sprint(v))", param, param)
a.pn(" }")
return param + "_"
}
func (meth *Method) generateCode() {
res := meth.r // may be nil if a top-level method
a := meth.api
p, pn := a.p, a.pn
pn("\n// method id %q:", meth.Id())
retType := responseType(a, meth.m)
retTypeComma := retType
if retTypeComma != "" {
retTypeComma += ", "
}
args := meth.NewArguments()
methodName := initialCap(meth.m.Name)
prefix := ""
if res != nil {
prefix = initialCap(res.FullName)
}
callName := a.GetName(prefix + methodName + "Call")
pn("\ntype %s struct {", callName)
pn(" s *%s", a.ServiceType())
for _, arg := range args.l {
if arg.location != "query" {
pn(" %s %s", arg.goname, arg.gotype)
}
}
pn(" urlParams_ gensupport.URLParams")
httpMethod := meth.m.HTTPMethod
if httpMethod == "GET" {
pn(" ifNoneMatch_ string")
}
if meth.supportsMediaUpload() {
pn(" mediaInfo_ *gensupport.MediaInfo")
}
pn(" ctx_ context.Context")
pn(" header_ http.Header")
pn("}")
p("\n%s", asComment("", methodName+": "+meth.m.Description))
if res != nil {
if url := canonicalDocsURL[fmt.Sprintf("%v%v/%v", docsLink, res.Name, meth.m.Name)]; url != "" {
pn("// For details, see %v", url)
}
}
var servicePtr string
if res == nil {
pn("func (s *Service) %s(%s) *%s {", methodName, args, callName)
servicePtr = "s"
} else {
pn("func (r *%s) %s(%s) *%s {", resourceGoType(res), methodName, args, callName)
servicePtr = "r.s"
}
pn(" c := &%s{s: %s, urlParams_: make(gensupport.URLParams)}", callName, servicePtr)
for _, arg := range args.l {
// TODO(gmlewis): clean up and consolidate this section.
// See: https://code-review.googlesource.com/#/c/3520/18/google-api-go-generator/gen.go
if arg.location == "query" {
switch arg.gotype {
case "[]string":
pn(" c.urlParams_.SetMulti(%q, append([]string{}, %v...))", arg.apiname, arg.goname)
case "string":
pn(" c.urlParams_.Set(%q, %v)", arg.apiname, arg.goname)
default:
if strings.HasPrefix(arg.gotype, "[]") {
tmpVar := convertMultiParams(a, arg.goname)
pn(" c.urlParams_.SetMulti(%q, %v)", arg.apiname, tmpVar)
} else {
pn(" c.urlParams_.Set(%q, fmt.Sprint(%v))", arg.apiname, arg.goname)
}
}
continue
}
if arg.gotype == "[]string" {
pn(" c.%s = append([]string{}, %s...)", arg.goname, arg.goname) // Make a copy of the []string.
continue
}
pn(" c.%s = %s", arg.goname, arg.goname)
}
pn(" return c")
pn("}")
for _, opt := range meth.OptParams() {
if opt.p.Location != "query" {
panicf("optional parameter has unsupported location %q", opt.p.Location)
}
setter := initialCap(opt.p.Name)
des := opt.p.Description
des = strings.Replace(des, "Optional.", "", 1)
des = strings.TrimSpace(des)
p("\n%s", asComment("", fmt.Sprintf("%s sets the optional parameter %q: %s", setter, opt.p.Name, des)))
addFieldValueComments(p, opt, "", true)
np := new(namePool)
np.Get("c") // take the receiver's name
paramName := np.Get(validGoIdentifer(opt.p.Name))
typePrefix := ""
if opt.p.Repeated {
typePrefix = "..."
}
pn("func (c *%s) %s(%s %s%s) *%s {", callName, setter, paramName, typePrefix, opt.GoType(), callName)
if opt.p.Repeated {
if opt.GoType() == "string" {
pn("c.urlParams_.SetMulti(%q, append([]string{}, %v...))", opt.p.Name, paramName)
} else {
tmpVar := convertMultiParams(a, paramName)
pn(" c.urlParams_.SetMulti(%q, %v)", opt.p.Name, tmpVar)
}
} else {
if opt.GoType() == "string" {
pn("c.urlParams_.Set(%q, %v)", opt.p.Name, paramName)
} else {
pn("c.urlParams_.Set(%q, fmt.Sprint(%v))", opt.p.Name, paramName)
}
}
pn("return c")
pn("}")
}
if meth.supportsMediaUpload() {
comment := "Media specifies the media to upload in one or more chunks. " +
"The chunk size may be controlled by supplying a MediaOption generated by googleapi.ChunkSize. " +
"The chunk size defaults to googleapi.DefaultUploadChunkSize." +
"The Content-Type header used in the upload request will be determined by sniffing the contents of r, " +
"unless a MediaOption generated by googleapi.ContentType is supplied." +
"\nAt most one of Media and ResumableMedia may be set."
// TODO(mcgreevy): Ensure that r is always closed before Do returns, and document this.
// See comments on https://code-review.googlesource.com/#/c/3970/
p("\n%s", asComment("", comment))
pn("func (c *%s) Media(r io.Reader, options ...googleapi.MediaOption) *%s {", callName, callName)
// We check if the body arg, if any, has a content type and apply it here.
// In practice, this only happens for the storage API today.
// TODO(djd): check if we can cope with the developer setting the body's Content-Type field
// after they've made this call.
if ba := args.bodyArg(); ba != nil {
if ba.schema.HasContentType() {
pn(" if ct := c.%s.ContentType; ct != \"\" {", ba.goname)
pn(" options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...)")
pn(" }")
}
}
pn(" c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options)")
pn(" return c")
pn("}")
comment = "ResumableMedia specifies the media to upload in chunks and can be canceled with ctx. " +
"\n\nDeprecated: use Media instead." +
"\n\nAt most one of Media and ResumableMedia may be set. " +
`mediaType identifies the MIME media type of the upload, such as "image/png". ` +
`If mediaType is "", it will be auto-detected. ` +
`The provided ctx will supersede any context previously provided to ` +
`the Context method.`
p("\n%s", asComment("", comment))
pn("func (c *%s) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *%s {", callName, callName)
pn(" c.ctx_ = ctx")
pn(" c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType)")
pn(" return c")
pn("}")
comment = "ProgressUpdater provides a callback function that will be called after every chunk. " +
"It should be a low-latency function in order to not slow down the upload operation. " +
"This should only be called when using ResumableMedia (as opposed to Media)."
p("\n%s", asComment("", comment))
pn("func (c *%s) ProgressUpdater(pu googleapi.ProgressUpdater) *%s {", callName, callName)
pn(`c.mediaInfo_.SetProgressUpdater(pu)`)
pn("return c")
pn("}")
}
comment := "Fields allows partial responses to be retrieved. " +
"See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse " +
"for more information."
p("\n%s", asComment("", comment))
pn("func (c *%s) Fields(s ...googleapi.Field) *%s {", callName, callName)
pn(`c.urlParams_.Set("fields", googleapi.CombineFields(s))`)
pn("return c")
pn("}")
if httpMethod == "GET" {
// Note that non-GET responses are excluded from supporting If-None-Match.
// See https://github.com/google/google-api-go-client/issues/107 for more info.
comment := "IfNoneMatch sets the optional parameter which makes the operation fail if " +
"the object's ETag matches the given value. This is useful for getting updates " +
"only after the object has changed since the last request. " +
"Use googleapi.IsNotModified to check whether the response error from Do " +
"is the result of In-None-Match."
p("\n%s", asComment("", comment))
pn("func (c *%s) IfNoneMatch(entityTag string) *%s {", callName, callName)
pn(" c.ifNoneMatch_ = entityTag")
pn(" return c")
pn("}")
}
doMethod := "Do method"
if meth.supportsMediaDownload() {
doMethod = "Do and Download methods"
}
commentFmtStr := "Context sets the context to be used in this call's %s. " +
"Any pending HTTP request will be aborted if the provided context is canceled."
comment = fmt.Sprintf(commentFmtStr, doMethod)
p("\n%s", asComment("", comment))
if meth.supportsMediaUpload() {
comment = "This context will supersede any context previously provided to " +
"the ResumableMedia method."
p("%s", asComment("", comment))
}
pn("func (c *%s) Context(ctx context.Context) *%s {", callName, callName)
pn(`c.ctx_ = ctx`)
pn("return c")
pn("}")
comment = "Header returns an http.Header that can be modified by the caller to add " +
"HTTP headers to the request."
p("\n%s", asComment("", comment))
pn("func (c *%s) Header() http.Header {", callName)
pn(" if c.header_ == nil {")
pn(" c.header_ = make(http.Header)")
pn(" }")
pn(" return c.header_")
pn("}")
pn("\nfunc (c *%s) doRequest(alt string) (*http.Response, error) {", callName)
pn(`reqHeaders := make(http.Header)`)
pn("for k, v := range c.header_ {")
pn(" reqHeaders[k] = v")
pn("}")
pn(`reqHeaders.Set("User-Agent",c.s.userAgent())`)
if httpMethod == "GET" {
pn(`if c.ifNoneMatch_ != "" {`)
pn(` reqHeaders.Set("If-None-Match", c.ifNoneMatch_)`)
pn("}")
}
pn("var body io.Reader = nil")
if ba := args.bodyArg(); ba != nil && httpMethod != "GET" {
if meth.m.ID == "ml.projects.predict" {
// Skip JSONReader for APIs that require clients to pass in JSON already.
pn("body = strings.NewReader(c.%s.HttpBody.Data)", ba.goname)
} else {
style := "WithoutDataWrapper"
if a.needsDataWrapper() {
style = "WithDataWrapper"
}
pn("body, err := googleapi.%s.JSONReader(c.%s)", style, ba.goname)
pn("if err != nil { return nil, err }")
}
pn(`reqHeaders.Set("Content-Type", "application/json")`)
}
pn(`c.urlParams_.Set("alt", alt)`)
pn(`c.urlParams_.Set("prettyPrint", "false")`)
pn("urls := googleapi.ResolveRelative(c.s.BasePath, %q)", meth.m.Path)
if meth.supportsMediaUpload() {
pn("if c.mediaInfo_ != nil {")
// Hack guess, since we get a 404 otherwise:
//pn("urls = googleapi.ResolveRelative(%q, %q)", a.apiBaseURL(), meth.mediaUploadPath())
// Further hack. Discovery doc is wrong?
pn(" urls = strings.Replace(urls, %q, %q, 1)", "https://www.googleapis.com/", "https://www.googleapis.com/upload/")
pn(` c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType())`)
pn("}")
pn("if body == nil {")
pn(" body = new(bytes.Buffer)")
pn(` reqHeaders.Set("Content-Type", "application/json")`)
pn("}")
pn("body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body)")
pn("defer cleanup()")
}
pn("urls += \"?\" + c.urlParams_.Encode()")
pn("req, err := http.NewRequest(%q, urls, body)", httpMethod)
pn("if err != nil { return nil, err }")
pn("req.Header = reqHeaders")
if meth.supportsMediaUpload() {
pn("gensupport.SetGetBody(req, getBody)")
}
// Replace param values after NewRequest to avoid reencoding them.
// E.g. Cloud Storage API requires '%2F' in entity param to be kept, but url.Parse replaces it with '/'.
argsForLocation := args.forLocation("path")
if len(argsForLocation) > 0 {
pn(`googleapi.Expand(req.URL, map[string]string{`)
for _, arg := range argsForLocation {
pn(`"%s": %s,`, arg.apiname, arg.exprAsString("c."))
}
pn(`})`)
}
pn("return gensupport.SendRequest(c.ctx_, c.s.client, req)")
pn("}")
if meth.supportsMediaDownload() {
pn("\n// Download fetches the API endpoint's \"media\" value, instead of the normal")
pn("// API response value. If the returned error is nil, the Response is guaranteed to")
pn("// have a 2xx status code. Callers must close the Response.Body as usual.")
pn("func (c *%s) Download(opts ...googleapi.CallOption) (*http.Response, error) {", callName)
pn(`gensupport.SetOptions(c.urlParams_, opts...)`)
pn(`res, err := c.doRequest("media")`)
pn("if err != nil { return nil, err }")
pn("if err := googleapi.CheckMediaResponse(res); err != nil {")
pn("res.Body.Close()")
pn("return nil, err")
pn("}")
pn("return res, nil")
pn("}")
}
mapRetType := strings.HasPrefix(retTypeComma, "map[")
pn("\n// Do executes the %q call.", meth.m.ID)
if retTypeComma != "" && !mapRetType {
commentFmtStr := "Exactly one of %v or error will be non-nil. " +
"Any non-2xx status code is an error. " +
"Response headers are in either %v.ServerResponse.Header " +
"or (if a response was returned at all) in error.(*googleapi.Error).Header. " +
"Use googleapi.IsNotModified to check whether the returned error was because " +
"http.StatusNotModified was returned."
comment := fmt.Sprintf(commentFmtStr, retType, retType)
p("%s", asComment("", comment))
}
pn("func (c *%s) Do(opts ...googleapi.CallOption) (%serror) {", callName, retTypeComma)
nilRet := ""
if retTypeComma != "" {
nilRet = "nil, "
}
pn(`gensupport.SetOptions(c.urlParams_, opts...)`)
pn(`res, err := c.doRequest("json")`)
if retTypeComma != "" && !mapRetType {
pn("if res != nil && res.StatusCode == http.StatusNotModified {")
pn(" if res.Body != nil { res.Body.Close() }")
pn(" return nil, &googleapi.Error{")
pn(" Code: res.StatusCode,")
pn(" Header: res.Header,")
pn(" }")
pn("}")
}
pn("if err != nil { return %serr }", nilRet)
pn("defer googleapi.CloseBody(res)")
pn("if err := googleapi.CheckResponse(res); err != nil { return %serr }", nilRet)
if meth.supportsMediaUpload() {
pn(`rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location"))`)
pn("if rx != nil {")
pn(" rx.Client = c.s.client")
pn(" rx.UserAgent = c.s.userAgent()")
pn(" ctx := c.ctx_")
pn(" if ctx == nil {")
// TODO(mcgreevy): Require context when calling Media, or Do.
pn(" ctx = context.TODO()")
pn(" }")
pn(" res, err = rx.Upload(ctx)")
pn(" if err != nil { return %serr }", nilRet)
pn(" defer res.Body.Close()")
pn(" if err := googleapi.CheckResponse(res); err != nil { return %serr }", nilRet)
pn("}")
}
if retTypeComma == "" {
pn("return nil")
} else {
if mapRetType {
pn("var ret %s", responseType(a, meth.m))
} else {
pn("ret := &%s{", responseTypeLiteral(a, meth.m))
pn(" ServerResponse: googleapi.ServerResponse{")
pn(" Header: res.Header,")
pn(" HTTPStatusCode: res.StatusCode,")
pn(" },")
pn("}")
}
if a.needsDataWrapper() {
pn("target := &struct {")
pn(" Data %s `json:\"data\"`", responseType(a, meth.m))
pn("}{ret}")
} else {
pn("target := &ret")
}
if meth.m.ID == "ml.projects.predict" {
pn("var b bytes.Buffer")
pn("if _, err := io.Copy(&b, res.Body); err != nil { return nil, err }")
pn("if err := res.Body.Close(); err != nil { return nil, err }")
pn("if err := json.NewDecoder(bytes.NewReader(b.Bytes())).Decode(target); err != nil { return nil, err }")
pn("ret.Data = b.String()")
} else {
pn("if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err }")
}
pn("return ret, nil")
}
bs, err := json.MarshalIndent(meth.m.JSONMap, "\t// ", " ")
if err != nil {
panic(err)
}
pn("// %s\n", string(bs))
pn("}")
if ptg, rname, ok := meth.supportsPaging(); ok {
// We can assume retType is non-empty.
pn("")
pn("// Pages invokes f for each page of results.")
pn("// A non-nil error returned from f will halt the iteration.")
pn("// The provided context supersedes any context provided to the Context method.")
pn("func (c *%s) Pages(ctx context.Context, f func(%s) error) error {", callName, retType)
pn(" c.ctx_ = ctx")
pn(` defer %s // reset paging to original point`, ptg.genDeferBody())
pn(" for {")
pn(" x, err := c.Do()")
pn(" if err != nil { return err }")
pn(" if err := f(x); err != nil { return err }")
pn(` if x.%s == "" { return nil }`, rname)
pn(ptg.genSet("x." + rname))
pn(" }")
pn("}")
}
}
// A Field provides methods that describe the characteristics of a Param or Property.
type Field interface {
Default() string
Enum() ([]string, bool)
EnumDescriptions() []string
UnfortunateDefault() bool
}
type Param struct {
method *Method
p *disco.Parameter
callFieldName string // empty means to use the default
}
func (p *Param) Default() string {
return p.p.Default
}
func (p *Param) Enum() ([]string, bool) {
if e := p.p.Enums; e != nil {
return e, true
}
return nil, false
}
func (p *Param) EnumDescriptions() []string {
return p.p.EnumDescriptions
}
func (p *Param) UnfortunateDefault() bool {
// We do not do anything special for Params with unfortunate defaults.
return false
}
func (p *Param) GoType() string {
typ, format := p.p.Type, p.p.Format
if typ == "string" && strings.Contains(format, "int") && p.p.Location != "query" {
panic("unexpected int parameter encoded as string, not in query: " + p.p.Name)
}
t, ok := simpleTypeConvert(typ, format)
if !ok {
panic("failed to convert parameter type " + fmt.Sprintf("type=%q, format=%q", typ, format))
}
return t
}
// goCallFieldName returns the name of this parameter's field in a
// method's "Call" struct.
func (p *Param) goCallFieldName() string {
if p.callFieldName != "" {
return p.callFieldName
}
return validGoIdentifer(p.p.Name)
}
// APIMethods returns top-level ("API-level") methods. They don't have an associated resource.
func (a *API) APIMethods() []*Method {
meths := []*Method{}
for _, m := range a.doc.Methods {
meths = append(meths, &Method{
api: a,
r: nil, // to be explicit
m: m,
})
}
return meths
}
func resolveRelative(basestr, relstr string) string {
u, err := url.Parse(basestr)
if err != nil {
panicf("Error parsing base URL %q: %v", basestr, err)
}
rel, err := url.Parse(relstr)
if err != nil {
panicf("Error parsing relative URL %q: %v", relstr, err)
}
u = u.ResolveReference(rel)
return u.String()
}
func (meth *Method) NewArguments() *arguments {
args := &arguments{
method: meth,
m: make(map[string]*argument),
}
pnames := meth.m.ParameterOrder
if len(pnames) == 0 {
// No parameterOrder; collect required parameters and sort by name.
for _, reqParam := range meth.grepParams(func(p *Param) bool { return p.p.Required }) {
pnames = append(pnames, reqParam.p.Name)
}
sort.Strings(pnames)
}
for _, pname := range pnames {
arg := meth.NewArg(pname, meth.NamedParam(pname))
args.AddArg(arg)
}
if rs := meth.m.Request; rs != nil {
args.AddArg(meth.NewBodyArg(rs))
}
return args
}
func (meth *Method) NewBodyArg(ds *disco.Schema) *argument {
s := meth.api.schemaNamed(ds.RefSchema.Name)
return &argument{
goname: validGoIdentifer(strings.ToLower(ds.Ref)),
apiname: "REQUEST",
gotype: "*" + s.GoName(),
apitype: ds.Ref,
location: "body",
schema: s,
}
}
func (meth *Method) NewArg(apiname string, p *Param) *argument {
apitype := p.p.Type
des := p.p.Description
goname := validGoIdentifer(apiname) // but might be changed later, if conflicts
if strings.Contains(des, "identifier") && !strings.HasSuffix(strings.ToLower(goname), "id") {
goname += "id" // yay
p.callFieldName = goname
}
gotype := mustSimpleTypeConvert(apitype, p.p.Format)
if p.p.Repeated {
gotype = "[]" + gotype
}
return &argument{
apiname: apiname,
apitype: apitype,
goname: goname,
gotype: gotype,
location: p.p.Location,
}
}
type argument struct {
method *Method
schema *Schema // Set if location == "body".
apiname, apitype string
goname, gotype string
location string // "path", "query", "body"
}
func (a *argument) String() string {
return a.goname + " " + a.gotype
}
func (a *argument) exprAsString(prefix string) string {
switch a.gotype {
case "[]string":
log.Printf("TODO(bradfitz): only including the first parameter in path query.")
return prefix + a.goname + `[0]`
case "string":
return prefix + a.goname
case "integer", "int64":
return "strconv.FormatInt(" + prefix + a.goname + ", 10)"
case "uint64":
return "strconv.FormatUint(" + prefix + a.goname + ", 10)"
case "bool":
return "strconv.FormatBool(" + prefix + a.goname + ")"
}
log.Panicf("unknown type: apitype=%q, gotype=%q", a.apitype, a.gotype)
return ""
}
// arguments are the arguments that a method takes
type arguments struct {
l []*argument
m map[string]*argument
method *Method
}
func (args *arguments) forLocation(loc string) []*argument {
matches := make([]*argument, 0)
for _, arg := range args.l {
if arg.location == loc {
matches = append(matches, arg)
}
}
return matches
}
func (args *arguments) bodyArg() *argument {
for _, arg := range args.l {
if arg.location == "body" {
return arg
}
}
return nil
}
func (args *arguments) AddArg(arg *argument) {
n := 1
oname := arg.goname
for {
_, present := args.m[arg.goname]
if !present {
args.m[arg.goname] = arg
args.l = append(args.l, arg)
return
}
n++
arg.goname = fmt.Sprintf("%s%d", oname, n)
}
}
func (a *arguments) String() string {
var buf bytes.Buffer
for i, arg := range a.l {
if i != 0 {
buf.Write([]byte(", "))
}
buf.Write([]byte(arg.String()))
}
return buf.String()
}
var urlRE = regexp.MustCompile(`^http\S+$`)
func asComment(pfx, c string) string {
var buf bytes.Buffer
const maxLen = 70
r := strings.NewReplacer(
"\n", "\n"+pfx+"// ",
"`\"", `"`,
"\"`", `"`,
)
for len(c) > 0 {
line := c
if len(line) < maxLen {
fmt.Fprintf(&buf, "%s// %s\n", pfx, r.Replace(line))
break
}
// Don't break URLs.
if !urlRE.MatchString(line[:maxLen]) {
line = line[:maxLen]
}
si := strings.LastIndex(line, " ")
if nl := strings.Index(line, "\n"); nl != -1 && nl < si {
si = nl
}
if si != -1 {
line = line[:si]
}
fmt.Fprintf(&buf, "%s// %s\n", pfx, r.Replace(line))
c = c[len(line):]
if si != -1 {
c = c[1:]
}
}
return buf.String()
}
func simpleTypeConvert(apiType, format string) (gotype string, ok bool) {
// From http://tools.ietf.org/html/draft-zyp-json-schema-03#section-5.1
switch apiType {
case "boolean":
gotype = "bool"
case "string":
gotype = "string"
switch format {
case "int64", "uint64", "int32", "uint32":
gotype = format
}
case "number":
gotype = "float64"
case "integer":
gotype = "int64"
case "any":
gotype = "interface{}"
}
return gotype, gotype != ""
}
func mustSimpleTypeConvert(apiType, format string) string {
if gotype, ok := simpleTypeConvert(apiType, format); ok {
return gotype
}
panic(fmt.Sprintf("failed to simpleTypeConvert(%q, %q)", apiType, format))
}
func responseType(api *API, m *disco.Method) string {
if m.Response == nil {
return ""
}
ref := m.Response.Ref
if ref != "" {
if s := api.schemas[ref]; s != nil {
return s.GoReturnType()
}
return "*" + ref
}
return ""
}
// Strips the leading '*' from a type name so that it can be used to create a literal.
func responseTypeLiteral(api *API, m *disco.Method) string {
v := responseType(api, m)
if strings.HasPrefix(v, "*") {
return v[1:]
}
return v
}
// initialCap returns the identifier with a leading capital letter.
// it also maps "foo-bar" to "FooBar".
func initialCap(ident string) string {
if ident == "" {
panic("blank identifier")
}
return depunct(ident, true)
}
func validGoIdentifer(ident string) string {
id := depunct(ident, false)
switch id {
case "break", "default", "func", "interface", "select",
"case", "defer", "go", "map", "struct",
"chan", "else", "goto", "package", "switch",
"const", "fallthrough", "if", "range", "type",
"continue", "for", "import", "return", "var":
return id + "_"
}
return id
}
// depunct removes '-', '.', '$', '/', '_' from identifers, making the
// following character uppercase. Multiple '_' are preserved.
func depunct(ident string, needCap bool) string {
var buf bytes.Buffer
preserve_ := false
for i, c := range ident {
if c == '_' {
if preserve_ || strings.HasPrefix(ident[i:], "__") {
preserve_ = true
} else {
needCap = true
continue
}
} else {
preserve_ = false
}
if c == '-' || c == '.' || c == '$' || c == '/' {
needCap = true
continue
}
if needCap {
c = unicode.ToUpper(c)
needCap = false
}
buf.WriteByte(byte(c))
}
return buf.String()
}
func addFieldValueComments(p func(format string, args ...interface{}), field Field, indent string, blankLine bool) {
var lines []string
if enum, ok := field.Enum(); ok {
desc := field.EnumDescriptions()
lines = append(lines, asComment(indent, "Possible values:"))
defval := field.Default()
for i, v := range enum {
more := ""
if v == defval {
more = " (default)"
}
if len(desc) > i && desc[i] != "" {
more = more + " - " + desc[i]
}
lines = append(lines, asComment(indent, ` "`+v+`"`+more))
}
} else if field.UnfortunateDefault() {
lines = append(lines, asComment("\t", fmt.Sprintf("Default: %s", field.Default())))
}
if blankLine && len(lines) > 0 {
p(indent + "//\n")
}
for _, l := range lines {
p("%s", l)
}
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
scripts/src/update_helm_chart.py | #!/usr/bin/python3
import os
from common.bash import execute_bash_script
from common.yaml import update_yaml_file
chart_name = os.environ["CHART_NAME"]
chart_version = os.environ["CHART_VERSION"]
feature_branch_name = os.environ["FEATURE_BRANCH_NAME"]
execute_bash_script(
[
"git config user.name github-actions",
"git config user.email [email protected]",
f"git checkout {feature_branch_name}",
]
)
update_yaml_file(
f"./overlay/development/kon/{chart_name}/{chart_name}.yaml",
"spec.chart.spec.version",
chart_version,
)
execute_bash_script(
[
"git add .",
"git commit -m "
f"'Update {chart_name} Helm chart to version {chart_version}'",
"git push",
]
)
| []
| []
| [
"CHART_NAME",
"CHART_VERSION",
"FEATURE_BRANCH_NAME"
]
| [] | ["CHART_NAME", "CHART_VERSION", "FEATURE_BRANCH_NAME"] | python | 3 | 0 | |
labs/lab-4/ex4-3.metrics.py | #!/usr/bin/env python
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import syslog
import time
import petl
import pymysql
import tspapi
from tspapi import Measurement
import filelock
import os
class ETL(object):
def __init__(self, lock_file_path=None, last_record_path=None):
"""
1) Open the syslog for writing
2) Allocate an instance of API class
3) Set defaults on member variables
:return:
"""
syslog.openlog(logoption=(syslog.LOG_PID | syslog.LOG_INFO), facility=syslog.LOG_USER)
logging.basicConfig(level=logging.DEBUG)
self.api = tspapi.API()
# Set our application id from the environment variable
self.app_id = os.environ['TSI_APP_ID']
if lock_file_path is not None:
self.lock_file_path = lock_file_path
else:
raise ValueError("Lock file path not specified")
if last_record_path is not None:
self.last_record_path = last_record_path
else:
raise ValueError("Lock file path not specified")
self.host = None
self.user = None
self.password = None
self.database = None
self.connection = None
self.table = None
self.get_db_config()
def log(self, message):
"""
Wrapper method for writing to logging and syslog
:param message:
:return:
"""
logging.debug(message)
syslog.syslog(str(message))
print(message)
def get_db_config(self):
"""
Extract database configuration from environment variables
:return:
"""
self.host = os.environ['DB_HOST']
self.user = os.environ['DB_USER']
self.password = os.environ['DB_PASSWORD']
self.database = os.environ['DB_DATABASE']
def db_connect(self):
"""
Open connection to the database
:return:
"""
self.connection = pymysql.connect(host=self.host,
user=self.user,
password=self.password,
db=self.database)
def db_close(self):
"""
Close connection to the database
:return:
"""
self.connection.close()
def acquire_lock(self):
"""
Create our file lock and return
:return:
"""
self.log("Acquiring lock file from {0}".format(self.lock_file_path))
return filelock.FileLock(self.lock_file_path)
def get(self):
"""
Reads data from last record file, creates if it does not exist
:return:
"""
data = None
try:
with open(self.last_record_path, 'rat') as f:
data = f.read()
self.log("data: {0}".format(data))
except IOError as e:
self.log(e.message)
return data
def put(self, data):
"""
Writes data to last record file
:param data:
:return:
"""
with open(self.last_record_path, 'wt') as f:
self.log("data: {0}".format(data))
f.write(str(data))
def get_last_fetched_record(self):
"""
Single call to return information on our last record that we fetched
:return:
"""
data = self.get()
if data is None or len(data) == 0:
last = None
else:
last = data
return last
def set_last_fetched_record(self, last):
"""
Puts the last record fetched for later retrieval
:param last:
:return:
"""
self.put(last)
def get_max_dt(self):
"""
Gets the current maximum date in the table
:return:
"""
sql = 'select max(dt) as max_dt from ol_transactions'
self.log("SQL: {0}".format(sql))
table = petl.fromdb(self.connection, sql)
max_dt = petl.values(table, 'max_dt')[0]
return max_dt
def get_min_dt(self, last):
"""
Gets the minimum date considering previous extractions from the table.
:param last:
:return:
"""
if last is None or len(last) == 0:
sql = "select min(dt) as min_dt from ol_transactions"
else:
sql = "select min(dt) as min_dt from ol_transactions where dt >= '{0}'".format(last)
self.log("SQL: {0}".format(sql))
table = petl.fromdb(self.connection, sql)
extract_dt = petl.values(table, 'min_dt')[0]
return extract_dt
def get_data(self, min_dt, max_dt):
"""
Generates the SQL and extracts our data
:param min_dt:
:param max_dt:
:return:
"""
sql = "select dt, total, duration from ol_transactions where dt > '{0}' and dt <= '{1}'".format(min_dt, max_dt)
self.log("SQL: {0}".format(sql))
self.table = petl.fromdb(self.connection, sql)
def send_measurements(self, measurements):
"""
Helper function that takes an array of Measurements and sends via the Measurement API
:param measurements:
:return:
"""
measurement_count = len(measurements)
logging.debug("Sending {0} measurements".format(measurement_count))
self.api.measurement_create_batch(measurements)
def process_records(self):
"""
Handles querying and extraction
:return:
"""
rows = petl.values(self.table, 'dt', 'total', 'duration')
row_count = 0
measurements = []
properties = {'app_id': self.app_id}
source = "littledog.com"
for row in rows:
timestamp = int(row[0].strftime('%s'))
total = int(row[1])
duration = int(row[2])
logging.debug("Add Measurements => dt: {0}, total: {1}, duration: {2} ".format(timestamp, total, duration))
row_count += 1
measurements.append(Measurement(metric='ONLINE_TRANSACTION_COUNT',
source=source,
value=total,
timestamp=timestamp,
properties=properties))
measurements.append(Measurement(metric='ONLINE_TRANSACTION_TIME',
source=source,
value=duration,
timestamp=timestamp,
properties=properties))
# Send when we have batch of 10 measurements
if row_count == 10:
# send measurements
self.send_measurements(measurements)
measurements = []
row_count = 0
# If there are any remaining measurements send them on
if len(measurements) > 0:
self.api.measurement_create_batch(measurements)
def process_data(self):
"""
Higher level function that handles processing of data extraction
:return:
"""
last_record = self.get_last_fetched_record()
max_dt = self.get_max_dt()
min_dt = self.get_min_dt(last_record)
self.get_data(min_dt, max_dt)
self.process_records()
self.set_last_fetched_record(max_dt)
def run(self):
"""
1) Acquire lock
2) Connect to the database
3) Look for data to process
4) If data available then process
:return:
"""
lock = self.acquire_lock()
try:
with lock.acquire(timeout=0):
logging.debug('acquired lock')
self.db_connect()
self.process_data()
except filelock.Timeout:
self.log('Extraction process running skipping')
if __name__ == '__main__':
etl = ETL(lock_file_path='etl.lock',
last_record_path='etl.last')
etl.run()
| []
| []
| [
"DB_PASSWORD",
"DB_HOST",
"DB_DATABASE",
"TSI_APP_ID",
"DB_USER"
]
| [] | ["DB_PASSWORD", "DB_HOST", "DB_DATABASE", "TSI_APP_ID", "DB_USER"] | python | 5 | 0 | |
test_notifier.py | #!/usr/bin/env python3
import os
import glob
import sys
import requests
import json
from google.cloud import storage
def upload_html(html_path, gcs_bucket, destination_blob_name, project_id):
storage_client = storage.Client()
bucket = storage_client.bucket(gcs_bucket)
try:
if os.path.isdir(html_path):
for local_file in glob.glob(html_path + '/**'):
remote_path = '%s%s' %(destination_blob_name, local_file[0 + len(html_path):])
blob = bucket.blob(remote_path)
blob.upload_from_filename(local_file)
print(
'File {} uploaded to {}.'.format(
local_file, destination_blob_name
)
)
if os.path.isfile(html_path + '/index.html'):
bucket_url = 'https://storage.googleapis.com/%s/%s/index.html' %(gcs_bucket, destination_blob_name)
elif os.path.isfile(html_path + '/index.htm'):
bucket_url = 'https://storage.googleapis.com/%s/%s/index.htm' %(gcs_bucket, destination_blob_name)
else:
bucket_url = 'https://storage.googleapis.com/%s/%s' %(gcs_bucket, destination_blob_name)
return bucket_url
else:
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(html_path)
print(
'File {} uploaded to {}.'.format(
html_path, destination_blob_name
)
)
return blob.public_url
except Exception as e:
print('Unexpected error:', e)
def post_to_slack(slack_channel, slack_text, build_id, project_id, webhook_url, repo_name, branch_name, commit):
slack_user_name = 'ReportBot'
cloudbuild_url = 'https://console.cloud.google.com/cloud-build/builds/%s?project=%s' %(build_id, project_id)
slack_data = {
'channel': slack_channel,
'text': '%s test suite has completed' %(repo_name),
'username': slack_user_name,
'blocks': [
{
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': ':tada: %s test suite has completed.' %(repo_name)
}
},
{
'type': 'section',
'fields': [
{
'type': 'mrkdwn',
'text': '*Repo Name:*\n %s' %(repo_name)
},
{
'type': 'mrkdwn',
'text': '*Branch Name:*\n %s \n' %(branch_name)
}
]
},
{
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*Commit:*\n %s' %(commit)
}
},
{
'type': 'actions',
'block_id': 'view_test_result',
'elements': [
{
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'View Test Result'
},
'style': 'primary',
'url': slack_text
},
{
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'View Cloudbuild Run'
},
'url': cloudbuild_url
}
]
}
]
}
response = requests.post(webhook_url, data=json.dumps(slack_data), headers={'Content-Type': 'application/json'})
if response.status_code != 200:
raise ValueError(
'Request to slack returned an error %s, the response is:\n%s'
% (response.status_code, response.text)
)
def main():
# Get environment variables
html_path = os.environ.get('HTML_PATH')
gcs_bucket = os.environ.get('GCS_BUCKET')
destination_blob_name = os.environ.get('BLOB_NAME')
slack_channel = os.environ.get('SLACK_CHANNEL')
build_id = os.environ.get('BUILD_ID')
project_id = os.environ.get('PROJECT_ID')
webhook_url = os.environ.get('WEBHOOK_URL')
repo_name = os.environ.get('REPO_NAME')
branch_name = os.environ.get('BRANCH_NAME')
commit = os.environ.get('COMMIT_SHA')
# upload test file/folders to GCS
test_url = upload_html(html_path, gcs_bucket, destination_blob_name, project_id)
# post notification to slack
post_to_slack(slack_channel, test_url, build_id, project_id, webhook_url, repo_name, branch_name, commit)
if __name__ == '__main__':
main()
| []
| []
| [
"HTML_PATH",
"GCS_BUCKET",
"WEBHOOK_URL",
"BLOB_NAME",
"PROJECT_ID",
"SLACK_CHANNEL",
"BRANCH_NAME",
"REPO_NAME",
"BUILD_ID",
"COMMIT_SHA"
]
| [] | ["HTML_PATH", "GCS_BUCKET", "WEBHOOK_URL", "BLOB_NAME", "PROJECT_ID", "SLACK_CHANNEL", "BRANCH_NAME", "REPO_NAME", "BUILD_ID", "COMMIT_SHA"] | python | 10 | 0 | |
manage.py | #!/usr/bin/env python3
"""
Scripts to drive a donkey 2 car
Usage:
manage.py (drive) [--model=<model>] [--js] [--type=(linear|categorical|rnn|imu|behavior|3d|localizer|latent)] [--useadversarial] [--advimage=<file>] [--camera=(single|stereo)] [--meta=<key:value> ...] [--myconfig=<filename>] [--corruption=<corruption>] [--severity=<severity>] [--delay=<delay>]
manage.py (train) [--tub=<tub1,tub2,..tubn>] [--file=<file> ...] (--model=<model>) [--transfer=<model>] [--type=(linear|categorical|rnn|imu|behavior|3d|localizer)] [--continuous] [--aug] [--myconfig=<filename>]
Options:
-h --help Show this screen.
--js Use physical joystick.
-f --file=<file> A text file containing paths to tub320x240_train files, one per line. Option may be used more than once.
--meta=<key:value> Key/Value strings describing describing a piece of meta data about this drive. Option may be used more than once.
--myconfig=filename Specify myconfig file to use.
[default: myconfig.py]
"""
import pickle
import warnings
from visualodometry.xte_predictor.testing.get_corrupted_images import corrupt
warnings.filterwarnings('ignore')
import shutil
from docopt import docopt
import donkeycar as dk
# import parts
from donkeycar.parts.transform import TriggeredCallback, DelayedTrigger
from donkeycar.parts.datastore import TubHandler
from donkeycar.parts.controller import LocalWebController, \
JoystickController, WebFpv
from donkeycar.parts.throttle_filter import ThrottleFilter
from donkeycar.parts.behavior import BehaviorPart
from donkeycar.parts.file_watcher import FileWatcher
from donkeycar.parts.launch import AiLaunch
from visualodometry.utils import *
# TODO: Fix
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type='single', meta=[]):
'''
Construct a working robotic vehicle from many parts.
Each part runs as a job in the Vehicle loop, calling either
it's run or run_threaded method depending on the constructor flag `threaded`.
All parts are updated one after another at the framerate given in
cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
Parts may have named outputs and inputs. The framework handles passing named outputs
to parts requesting the same named input.
'''
if cfg.DONKEY_GYM:
# the simulator will use cuda and then we usually run out of resources
# if we also try to use cuda. so disable for donkey_gym.
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
if model_type is None:
if cfg.TRAIN_LOCALIZER:
model_type = "localizer"
elif cfg.TRAIN_BEHAVIORS:
model_type = "behavior"
else:
model_type = cfg.DEFAULT_MODEL_TYPE
# Initialize car
V = dk.vehicle.Vehicle()
print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE)
if camera_type == "stereo":
if cfg.CAMERA_TYPE == "WEBCAM":
from donkeycar.parts.camera import Webcam
camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0)
camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1)
elif cfg.CAMERA_TYPE == "CVCAM":
from donkeycar.parts.cv import CvCam
camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0)
camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1)
else:
raise (Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))
V.add(camA, outputs=['cam/image_array_a'], threaded=True)
V.add(camB, outputs=['cam/image_array_b'], threaded=True)
from donkeycar.parts.image import StereoPair
V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'],
outputs=['cam/image_array'])
elif cfg.CAMERA_TYPE == "D435":
from donkeycar.parts.realsense435i import RealSense435i
cam = RealSense435i(
enable_rgb=cfg.REALSENSE_D435_RGB,
enable_depth=cfg.REALSENSE_D435_DEPTH,
enable_imu=cfg.REALSENSE_D435_IMU,
device_id=cfg.REALSENSE_D435_ID)
V.add(cam, inputs=[],
outputs=['cam/image_array', 'cam/depth_array',
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'],
threaded=True)
else:
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
inputs = []
threaded = True
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME,
conf=cfg.GYM_CONF, delay=cfg.SIM_ARTIFICIAL_LATENCY)
threaded = True
inputs = ['angle', 'throttle']
elif cfg.CAMERA_TYPE == "PICAM":
from donkeycar.parts.camera import PiCamera
cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH,
framerate=cfg.CAMERA_FRAMERATE, vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP)
elif cfg.CAMERA_TYPE == "WEBCAM":
from donkeycar.parts.camera import Webcam
cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CVCAM":
from donkeycar.parts.cv import CvCam
cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CSIC":
from donkeycar.parts.camera import CSICamera
cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH,
framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
elif cfg.CAMERA_TYPE == "V4L":
from donkeycar.parts.camera import V4LCamera
cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH,
framerate=cfg.CAMERA_FRAMERATE)
elif cfg.CAMERA_TYPE == "MOCK":
from donkeycar.parts.camera import MockCamera
cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "IMAGE_LIST":
from donkeycar.parts.camera import ImageListCamera
cam = ImageListCamera(path_mask=cfg.PATH_MASK)
else:
raise (Exception("Unknown camera type: %s" % cfg.CAMERA_TYPE))
V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded)
if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
# modify max_throttle closer to 1.0 to have more power
# modify steering_scale lower than 1.0 to have less responsive steering
if cfg.CONTROLLER_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATController
ctr = RoboHATController(cfg)
elif "custom" == cfg.CONTROLLER_TYPE:
#
# custom controller created with `donkey createjs` command
#
from my_joystick import MyJoystickController
ctr = MyJoystickController(
throttle_dir=cfg.JOYSTICK_THROTTLE_DIR,
throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
steering_scale=cfg.JOYSTICK_STEERING_SCALE,
auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
ctr.set_deadzone(cfg.JOYSTICK_DEADZONE)
else:
from donkeycar.parts.controller import get_js_controller
ctr = get_js_controller(cfg)
if cfg.USE_NETWORKED_JS:
from donkeycar.parts.controller import JoyStickSub
netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
V.add(netwkJs, threaded=True)
ctr.js = netwkJs
V.add(ctr,
inputs=['cam/image_array'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
else:
# This web controller will create a web server that is capable
# of managing steering, throttle, and modes, and more.
ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT, mode=cfg.WEB_INIT_MODE)
V.add(ctr,
inputs=['cam/image_array', 'tub320x240_train/num_records'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
# this throttle filter will allow one tap back for esc reverse
th_filter = ThrottleFilter()
V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])
# See if we should even run the pilot module.
# This is only needed because the part run_condition only accepts boolean
class PilotCondition:
def run(self, mode):
if mode == 'user':
return False
else:
return True
V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])
class LedConditionLogic:
def __init__(self, cfg):
self.cfg = cfg
def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc):
# returns a blink rate. 0 for off. -1 for on. positive for rate.
if track_loc is not None:
led.set_rgb(*self.cfg.LOC_COLORS[track_loc])
return -1
if model_file_changed:
led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B)
return 0.1
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if recording_alert:
led.set_rgb(*recording_alert)
return self.cfg.REC_COUNT_ALERT_BLINK_RATE
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if behavior_state is not None and model_type == 'behavior':
r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]
led.set_rgb(r, g, b)
return -1 # solid on
if recording:
return -1 # solid on
elif mode == 'user':
return 1
elif mode == 'local_angle':
return 0.5
elif mode == 'local':
return 0.1
return 0
if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM:
from donkeycar.parts.led_status import RGB_LED
led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)
led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)
V.add(LedConditionLogic(cfg),
inputs=['user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc"],
outputs=['led/blink_rate'])
V.add(led, inputs=['led/blink_rate'])
def get_record_alert_color(num_records):
col = (0, 0, 0)
for count, color in cfg.RECORD_ALERT_COLOR_ARR:
if num_records >= count:
col = color
return col
class RecordTracker:
def __init__(self):
self.last_num_rec_print = 0
self.dur_alert = 0
self.force_alert = 0
def run(self, num_records):
if num_records is None:
return 0
if self.last_num_rec_print != num_records or self.force_alert:
self.last_num_rec_print = num_records
if num_records % 10 == 0:
print("recorded", num_records, "records")
if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
self.force_alert = 0
if self.dur_alert > 0:
self.dur_alert -= 1
if self.dur_alert != 0:
return get_record_alert_color(num_records)
return 0
rec_tracker_part = RecordTracker()
V.add(rec_tracker_part, inputs=["tub320x240_train/num_records"], outputs=['records/alert'])
if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController):
# then we are not using the circle button. hijack that to force a record count indication
def show_record_acount_status():
rec_tracker_part.last_num_rec_print = 0
rec_tracker_part.force_alert = 1
ctr.set_button_down_trigger('circle', show_record_acount_status)
# Sombrero
if cfg.HAVE_SOMBRERO:
from donkeycar.parts.sombrero import Sombrero
s = Sombrero()
# IMU
if cfg.HAVE_IMU:
from donkeycar.parts.imu import IMU
imu = IMU(sensor=cfg.IMU_SENSOR, dlp_setting=cfg.IMU_DLP_CONFIG)
V.add(imu, outputs=['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True)
class ImgPreProcess():
'''
preprocess camera image for inference.
normalize and crop if needed.
'''
def clean_temp_dir(self):
shutil.rmtree('data/temp')
os.mkdir('data/temp')
def __init__(self, cfg):
self.cfg = cfg
self.start_time = time.perf_counter()
self.counter = 1
self.clean_temp_dir()
if not self.cfg.USE_ADVERSARIAL_DRIVING:
self.adv_img = None
else:
self.adv_img = np.array(pickle.load(open(self.cfg.ADV_IMAGE, "rb"))[0])
def run(self, img_arr):
if self.cfg.USE_CORRUPTED_INPUTS:
if time.perf_counter() - self.start_time > self.cfg.CORRUPTION_DELAY:
img = Image.fromarray(np.uint8(img_arr)).convert('RGB')
img = img.resize((224, 224))
img = corrupt(img, corruption=self.cfg.CORRUPTION, severity=self.cfg.SEVERITY)
img = img.resize((self.cfg.IMAGE_W, self.cfg.IMAGE_H))
img_arr = np.array(img)
# im = Image.fromarray(img_arr)
# im.save("data/temp/" + str(self.counter) + "_cam-image_array_.jpg")
self.counter += 1
img_arr = normalize_and_crop(img_arr, self.cfg)
if self.cfg.USE_ADVERSARIAL_DRIVING:
if time.perf_counter() - self.start_time > self.cfg.CORRUPTION_DELAY:
img_arr = img_arr + self.adv_img * self.cfg.SEVERITY
im = Image.fromarray((img_arr * 255).astype(np.uint8))
im.save("data/temp/" + str(self.counter) + "_cam-image_array_.jpg")
self.counter += 1
# print('adv attack is active')
return img_arr
if "coral" in model_type:
inf_input = 'cam/image_array'
else:
inf_input = 'cam/normalized/cropped'
V.add(ImgPreProcess(cfg),
inputs=['cam/image_array'],
outputs=[inf_input],
run_condition='run_pilot')
# Use the FPV preview, which will show the cropped image output, or the full frame.
if cfg.USE_FPV:
V.add(WebFpv(), inputs=['cam/image_array'], threaded=True)
# Behavioral state
if cfg.TRAIN_BEHAVIORS:
bh = BehaviorPart(cfg.BEHAVIOR_LIST)
V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"])
try:
ctr.set_button_down_trigger('L1', bh.increment_state)
except:
pass
inputs = [inf_input, "behavior/one_hot_state_array"]
# IMU
elif model_type == "imu":
assert (cfg.HAVE_IMU)
# Run the pilot if the mode is not user.
inputs = [inf_input,
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
else:
inputs = [inf_input]
def load_model(kl, model_path):
start = time.time()
print('loading model', model_path)
kl.load(model_path)
print('finished loading in %s sec.' % (str(time.time() - start)))
def load_weights(kl, weights_path):
start = time.time()
try:
print('loading model weights', weights_path)
kl.get_vgg.load_weights(weights_path)
print('finished loading in %s sec.' % (str(time.time() - start)))
except Exception as e:
print(e)
print('ERR>> problems loading weights', weights_path)
def load_model_json(kl, json_fnm):
start = time.time()
print('loading model json', json_fnm)
from tensorflow.python import keras
try:
with open(json_fnm, 'r') as handle:
contents = handle.read()
kl.get_vgg = keras.models.model_from_json(contents)
print('finished loading json in %s sec.' % (str(time.time() - start)))
except Exception as e:
print(e)
print("ERR>> problems loading model json", json_fnm)
if model_path:
# When we have a model, first create an appropriate Keras part
# kl = dk.utils.get_model_by_type(model_type, cfg)
kl = get_model_by_type(model_type, cfg)
model_reload_cb = None
if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path:
# when we have a .h5 extension
# load everything from the model file
load_model(kl, model_path)
def reload_model(filename):
load_model(kl, filename)
model_reload_cb = reload_model
elif '.json' in model_path:
# when we have a .json extension
# load the model from there and look for a matching
# .wts file with just weights
load_model_json(kl, model_path)
weights_path = model_path.replace('.json', '.weights')
load_weights(kl, weights_path)
def reload_weights(filename):
weights_path = filename.replace('.json', '.weights')
load_weights(kl, weights_path)
model_reload_cb = reload_weights
else:
print("ERR>> Unknown extension type on model file!!")
return
# this part will signal visual LED, if connected
V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified'])
# these parts will reload the model file, but only when ai is running so we don't interrupt user driving
V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running")
V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running")
V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running")
outputs = ['pilot/angle', 'pilot/throttle']
if cfg.TRAIN_LOCALIZER:
outputs.append("pilot/loc")
V.add(kl, inputs=inputs,
outputs=outputs,
run_condition='run_pilot')
if cfg.STOP_SIGN_DETECTOR:
from donkeycar.parts.object_detector.stop_sign_detector import StopSignDetector
V.add(StopSignDetector(cfg.STOP_SIGN_MIN_SCORE, cfg.STOP_SIGN_SHOW_BOUNDING_BOX),
inputs=['cam/image_array', 'pilot/throttle'], outputs=['pilot/throttle', 'cam/image_array'])
# Choose what inputs should change the car.
class DriveMode:
def run(self, mode,
user_angle, user_throttle,
pilot_angle, pilot_throttle):
if mode == 'user':
return user_angle, user_throttle
elif mode == 'local_angle':
return pilot_angle if pilot_angle else 0.0, user_throttle
else:
return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0
V.add(DriveMode(),
inputs=['user/mode', 'user/angle', 'user/throttle',
'pilot/angle', 'pilot/throttle'],
outputs=['angle', 'throttle'])
# to give the car a boost when starting ai mode in a race.
aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED)
V.add(aiLauncher,
inputs=['user/mode', 'throttle'],
outputs=['throttle'])
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch)
class AiRunCondition:
'''
A bool part to let us know when ai is running.
'''
def run(self, mode):
if mode == "user":
return False
return True
V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])
# Ai Recording
class AiRecordingCondition:
'''
return True when ai mode, otherwise respect user mode recording flag
'''
def run(self, mode, recording):
if mode == 'user':
return recording
return True
if cfg.RECORD_DURING_AI:
V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording'])
# Drive train setup
if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK":
pass
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC":
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)
throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM
left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)
right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
from donkeycar.parts.actuator import ServoBlaster, PWMSteering
steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) # really pin
# PWM pulse values should be in the range of 100 to 200
assert (cfg.STEERING_LEFT_PWM <= 200)
assert (cfg.STEERING_RIGHT_PWM <= 200)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'], threaded=True)
V.add(motor, inputs=["throttle"])
elif cfg.DRIVE_TRAIN_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATDriver
V.add(RoboHATDriver(cfg), inputs=['angle', 'throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "PIGPIO_PWM":
from donkeycar.parts.actuator import PWMSteering, PWMThrottle, PiGPIO_PWM
steering_controller = PiGPIO_PWM(cfg.STEERING_PWM_PIN, freq=cfg.STEERING_PWM_FREQ,
inverted=cfg.STEERING_PWM_INVERTED)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PiGPIO_PWM(cfg.THROTTLE_PWM_PIN, freq=cfg.THROTTLE_PWM_FREQ,
inverted=cfg.THROTTLE_PWM_INVERTED)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
# OLED setup
if cfg.USE_SSD1306_128_32:
from donkeycar.parts.oled import OLEDPart
auto_record_on_throttle = cfg.USE_JOYSTICK_AS_DEFAULT and cfg.AUTO_RECORD_ON_THROTTLE
oled_part = OLEDPart(cfg.SSD1306_128_32_I2C_BUSNUM, auto_record_on_throttle=auto_record_on_throttle)
V.add(oled_part, inputs=['recording', 'tub320x240_train/num_records', 'user/mode'], outputs=[], threaded=True)
# add tub320x240_train to save data
inputs = ['cam/image_array',
'user/angle', 'user/throttle',
'user/mode']
types = ['image_array',
'float', 'float',
'str']
if cfg.TRAIN_BEHAVIORS:
inputs += ['behavior/state', 'behavior/label', "behavior/one_hot_state_array"]
types += ['int', 'str', 'vector']
if cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_DEPTH:
inputs += ['cam/depth_array']
types += ['gray16_array']
if cfg.HAVE_IMU or (cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_IMU):
inputs += ['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
types += ['float', 'float', 'float',
'float', 'float', 'float']
if cfg.RECORD_DURING_AI:
inputs += ['pilot/angle', 'pilot/throttle']
types += ['float', 'float']
th = TubHandler(path=cfg.DATA_PATH)
tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
V.add(tub, inputs=inputs, outputs=["tub320x240_train/num_records"], run_condition='recording')
if cfg.PUB_CAMERA_IMAGES:
from donkeycar.parts.network import TCPServeValue
from donkeycar.parts.image import ImgArrToJpg
pub = TCPServeValue("camera")
V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
V.add(pub, inputs=['jpg/bin'])
if type(ctr) is LocalWebController:
if cfg.DONKEY_GYM:
print("You can now go to http://localhost:%d to drive your car." % cfg.WEB_CONTROL_PORT)
else:
print("You can now go to <your hostname.local>:%d to drive your car." % cfg.WEB_CONTROL_PORT)
elif isinstance(ctr, JoystickController):
print("You can now move your joystick to drive your car.")
# tell the controller about the tub320x240_train
ctr.set_tub(tub)
if cfg.BUTTON_PRESS_NEW_TUB:
def new_tub_dir():
V.parts.pop()
tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
V.add(tub, inputs=inputs, outputs=["tub320x240_train/num_records"], run_condition='recording')
ctr.set_tub(tub)
ctr.set_button_down_trigger('cross', new_tub_dir)
ctr.print_controls()
# run the vehicle for 20 seconds
V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
max_loop_count=cfg.MAX_LOOPS)
if __name__ == '__main__':
args = docopt(__doc__)
cfg = dk.load_config(myconfig=args['--myconfig'])
if args['drive']:
model_type = args['--type']
camera_type = args['--camera']
# experimental
if args['--corruption'] is not None and args['--corruption'] is True:
cfg.USE_CORRUPTIONS = True
cfg.CORRUPTION = args['--corruption']
cfg.SEVERITY = int(args['--severity'])
if args['--useadversarial'] is not None and args['--useadversarial'] is True:
cfg.USE_ADVERSARIAL_DRIVING = True
cfg.ADV_IMAGE = args['--advimage']
cfg.SEVERITY = int(args['--severity'])
drive(cfg, model_path=args['--model'], use_joystick=args['--js'],
model_type=model_type, camera_type=camera_type,
meta=args['--meta'])
if args['train']:
from train import multi_train, preprocessFileList
tub = args['--tub']
model = args['--model']
transfer = args['--transfer']
model_type = args['--type']
continuous = args['--continuous']
aug = args['--aug']
dirs = preprocessFileList(args['--file'])
if tub is not None:
tub_paths = [os.path.expanduser(n) for n in tub.split(',')]
dirs.extend(tub_paths)
if model_type is None:
model_type = cfg.DEFAULT_MODEL_TYPE
print("using default model type of", model_type)
multi_train(cfg, dirs, model, transfer, model_type, continuous, aug)
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"KMP_DUPLICATE_LIB_OK",
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["CUDA_VISIBLE_DEVICES", "KMP_DUPLICATE_LIB_OK", "TF_CPP_MIN_LOG_LEVEL"] | python | 3 | 0 | |
pkg/pmem-csi-driver/pmem-csi-driver.go | /*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 Intel Corporation.
SPDX-License-Identifier: Apache-2.0
*/
package pmemcsidriver
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"net/http"
"os"
"os/signal"
"path/filepath"
"syscall"
"time"
"k8s.io/klog/v2"
api "github.com/intel/pmem-csi/pkg/apis/pmemcsi/v1beta1"
grpcserver "github.com/intel/pmem-csi/pkg/grpc-server"
"github.com/intel/pmem-csi/pkg/k8sutil"
pmdmanager "github.com/intel/pmem-csi/pkg/pmem-device-manager"
pmemgrpc "github.com/intel/pmem-csi/pkg/pmem-grpc"
pmemstate "github.com/intel/pmem-csi/pkg/pmem-state"
"github.com/intel/pmem-csi/pkg/scheduler"
"github.com/intel/pmem-csi/pkg/types"
"github.com/kubernetes-csi/csi-lib-utils/metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"k8s.io/client-go/informers"
)
const (
// Resyncing should never be needed for correct operation,
// so this is so high that it shouldn't matter in practice.
resyncPeriod = 10000 * time.Hour
)
type DriverMode string
func (mode *DriverMode) Set(value string) error {
switch value {
case string(Node), string(Webhooks), string(ForceConvertRawNamespaces):
*mode = DriverMode(value)
default:
// The flag package will add the value to the final output, no need to do it here.
return errors.New("invalid driver mode")
}
return nil
}
func (mode *DriverMode) String() string {
return string(*mode)
}
// The mode strings are part of the metrics API (-> csi_controller,
// csi_node as subsystem), do not change them!
const (
// Node driver with support for provisioning.
Node DriverMode = "node"
// Just the webhooks, using metrics instead of gRPC over TCP.
Webhooks DriverMode = "webhooks"
// Convert each raw namespace into fsdax.
ForceConvertRawNamespaces = "force-convert-raw-namespaces"
)
var (
//PmemDriverTopologyKey key to use for topology constraint
DriverTopologyKey = ""
// Mirrored after https://github.com/kubernetes/component-base/blob/dae26a37dccb958eac96bc9dedcecf0eb0690f0f/metrics/version.go#L21-L37
// just with less information.
buildInfo = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "build_info",
Help: "A metric with a constant '1' value labeled by version.",
},
[]string{"version"},
)
simpleMetrics = prometheus.NewPedanticRegistry()
)
func init() {
prometheus.MustRegister(buildInfo)
simpleMetrics.MustRegister(buildInfo)
}
//Config type for driver configuration
type Config struct {
//DriverName name of the csi driver
DriverName string
//NodeID node id on which this csi driver is running
NodeID string
//Endpoint exported csi driver endpoint
Endpoint string
//Mode mode fo the driver
Mode DriverMode
//CAFile Root certificate authority certificate file
CAFile string
//CertFile certificate for server authentication
CertFile string
//KeyFile server private key file
KeyFile string
//DeviceManager device manager to use
DeviceManager api.DeviceMode
//Directory where to persist the node driver state
StateBasePath string
//Version driver release version
Version string
// PmemPercentage percentage of space to be used by the driver in each PMEM region
PmemPercentage uint
// KubeAPIQPS is the average rate of requests to the Kubernetes API server,
// enforced locally in client-go.
KubeAPIQPS float64
// KubeAPIQPS is the number of requests that a client is
// allowed to send above the average rate of request.
KubeAPIBurst int
// parameters for Kubernetes scheduler extender
schedulerListen string
insecureSchedulerListen string
// parameters for rescheduler and raw namespace conversion
nodeSelector types.NodeSelector
// parameters for Prometheus metrics
metricsListen string
metricsPath string
}
type csiDriver struct {
cfg Config
gatherers prometheus.Gatherers
}
func GetCSIDriver(cfg Config) (*csiDriver, error) {
if cfg.DriverName == "" {
return nil, errors.New("driver name configuration option missing")
}
if cfg.Endpoint == "" {
return nil, errors.New("CSI endpoint configuration option missing")
}
if cfg.Mode == Node && cfg.NodeID == "" {
return nil, errors.New("node ID configuration option missing")
}
if cfg.Mode == Node && cfg.StateBasePath == "" {
cfg.StateBasePath = "/var/lib/" + cfg.DriverName
}
DriverTopologyKey = cfg.DriverName + "/node"
// Should GetCSIDriver get called more than once per process,
// all of them will record their version.
buildInfo.With(prometheus.Labels{"version": cfg.Version}).Set(1)
return &csiDriver{
cfg: cfg,
// We use the default Prometheus registry here in addition to
// any custom CSIMetricsManager. Therefore we also return all
// data that is registered globally, including (but not
// limited to!) our own metrics data. For example, some Go
// runtime information
// (https://povilasv.me/prometheus-go-metrics/) are included,
// which may be useful.
gatherers: prometheus.Gatherers{prometheus.DefaultGatherer},
}, nil
}
func (csid *csiDriver) Run(ctx context.Context) error {
s := grpcserver.NewNonBlockingGRPCServer()
// Ensure that the server is stopped before we return.
defer func() {
s.ForceStop()
s.Wait()
}()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
logger := klog.FromContext(ctx)
switch csid.cfg.Mode {
case Webhooks:
client, err := k8sutil.NewClient(config.KubeAPIQPS, config.KubeAPIBurst)
if err != nil {
return fmt.Errorf("connect to apiserver: %v", err)
}
// A factory for all namespaces. Some of these are only needed by
// scheduler webhooks or deprovisioner, but because the normal
// setup is to have both enabled, the logic here is simplified so that
// everything gets initialized.
//
// The PV informer is not really needed, but there is no good way to
// tell the lib that it should watch PVs. An informer for a fake client
// did not work:
// Failed to watch *v1.PersistentVolume: unhandled watch: testing.WatchActionImpl
globalFactory := informers.NewSharedInformerFactory(client, resyncPeriod)
pvcInformer := globalFactory.Core().V1().PersistentVolumeClaims().Informer()
pvcLister := globalFactory.Core().V1().PersistentVolumeClaims().Lister()
scLister := globalFactory.Storage().V1().StorageClasses().Lister()
scInformer := globalFactory.Storage().V1().StorageClasses().Informer()
pvInformer := globalFactory.Core().V1().PersistentVolumes().Informer()
csiNodeLister := globalFactory.Storage().V1().CSINodes().Lister()
var pcp *pmemCSIProvisioner
if csid.cfg.nodeSelector != nil {
serverVersion, err := client.Discovery().ServerVersion()
if err != nil {
return fmt.Errorf("discover server version: %v", err)
}
// Create rescheduler. This has to be done before starting the factory
// because it will indirectly add a new index.
//
// We don't use leader election. The shared factories are running
// anyway, so we don't avoid traffic when hot spares are idle. Quite
// the opposite, the leader election itself causes additional traffic.
//
// There's also no downside to running the deschedule check multiple
// times. In the worst case, multiple instances will determine at exactly
// the same time that it's time to reschedule and try to unset the annotation.
// One of them will succeed, the others will get a conflict error and then
// notice that nothing is left to do on their retry.
pcp = newRescheduler(ctx,
csid.cfg.DriverName,
client, pvcInformer, scInformer, pvInformer, csiNodeLister,
csid.cfg.nodeSelector,
serverVersion.GitVersion)
}
// Now that all informers and indices are created we can run the factory.
globalFactory.Start(ctx.Done())
cacheSyncResult := globalFactory.WaitForCacheSync(ctx.Done())
logger.V(5).Info("Synchronized caches", "cache-sync-result", cacheSyncResult)
for t, v := range cacheSyncResult {
if !v {
return fmt.Errorf("failed to sync informer for type %v", t)
}
}
if csid.cfg.schedulerListen != "" || csid.cfg.insecureSchedulerListen != "" {
// Factory for the driver's namespace.
namespace := os.Getenv("POD_NAMESPACE")
if namespace == "" {
return errors.New("POD_NAMESPACE env variable is not set")
}
localFactory := informers.NewSharedInformerFactoryWithOptions(client, resyncPeriod,
informers.WithNamespace(namespace),
)
podLister := localFactory.Core().V1().Pods().Lister()
c := scheduler.CapacityViaMetrics(namespace, csid.cfg.DriverName, podLister)
localFactory.Start(ctx.Done())
sched, err := scheduler.NewScheduler(
csid.cfg.DriverName,
c,
client,
pvcLister,
scLister,
)
if err != nil {
return fmt.Errorf("create scheduler: %v", err)
}
if csid.cfg.schedulerListen != "" {
if _, err := csid.startHTTPSServer(ctx, cancel, csid.cfg.schedulerListen, sched, true /* TLS */); err != nil {
return err
}
}
if csid.cfg.insecureSchedulerListen != "" {
if _, err := csid.startHTTPSServer(ctx, cancel, csid.cfg.insecureSchedulerListen, sched, false /* not TLS */); err != nil {
return err
}
}
}
if pcp != nil {
pcp.startRescheduler(ctx, cancel)
}
case Node:
dm, err := pmdmanager.New(ctx, csid.cfg.DeviceManager, csid.cfg.PmemPercentage)
if err != nil {
return err
}
sm, err := pmemstate.NewFileState(csid.cfg.StateBasePath)
if err != nil {
return err
}
// On the csi.sock endpoint we gather statistics for incoming
// CSI method calls like any other CSI driver.
cmm := metrics.NewCSIMetricsManagerWithOptions(csid.cfg.DriverName,
metrics.WithProcessStartTime(false),
metrics.WithSubsystem(metrics.SubsystemPlugin),
)
csid.gatherers = append(csid.gatherers, cmm.GetRegistry())
// Create GRPC servers
ids := NewIdentityServer(csid.cfg.DriverName, csid.cfg.Version)
cs := NewNodeControllerServer(ctx, csid.cfg.NodeID, dm, sm)
ns := NewNodeServer(cs, filepath.Clean(csid.cfg.StateBasePath)+"/mount")
services := []grpcserver.Service{ids, ns, cs}
if err := s.Start(ctx, csid.cfg.Endpoint, csid.cfg.NodeID, nil, cmm, services...); err != nil {
return err
}
// Also collect metrics data via the device manager.
pmdmanager.CapacityCollector{PmemDeviceCapacity: dm}.MustRegister(prometheus.DefaultRegisterer, csid.cfg.NodeID, csid.cfg.DriverName)
capacity, err := dm.GetCapacity(ctx)
if err != nil {
return fmt.Errorf("get initial capacity: %v", err)
}
logger.Info("PMEM-CSI ready.", "capacity", capacity)
case ForceConvertRawNamespaces:
client, err := k8sutil.NewClient(config.KubeAPIQPS, config.KubeAPIBurst)
if err != nil {
return fmt.Errorf("connect to apiserver: %v", err)
}
if err := pmdmanager.ForceConvertRawNamespaces(ctx, client, csid.cfg.DriverName, csid.cfg.nodeSelector, csid.cfg.NodeID); err != nil {
return err
}
// By proceeding to waiting for the termination signal below
// we keep the pod around after it has its work done until
// Kubernetes notices that the pod is no longer needed.
// Terminating the pod (even with a zero exit code) would
// cause a race between detecting the label change and
// restarting the container.
//
// "RestartPolicy: OnFailure" would solve that, but
// isn't supported for DaemonSets
// (https://github.com/kubernetes/kubernetes/issues/24725).
logger.Info("Raw namespace conversion is done, waiting for termination signal.")
default:
return fmt.Errorf("Unsupported device mode '%v", csid.cfg.Mode)
}
// And metrics server?
if csid.cfg.metricsListen != "" {
addr, err := csid.startMetrics(ctx, cancel)
if err != nil {
return err
}
logger.Info("Prometheus endpoint started.", "endpoint", fmt.Sprintf("http://%s%s", addr, csid.cfg.metricsPath))
}
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
select {
case sig := <-c:
logger.Info("Caught signal, terminating.", "signal", sig)
// We sleep briefly to give sidecars a chance to shut down cleanly
// before we close the CSI socket and force them to shut down
// abnormally, because the latter causes lots of debug output
// due to usage of klog.Fatal (https://github.com/intel/pmem-csi/issues/856).
time.Sleep(time.Second)
case <-ctx.Done():
// The scheduler HTTP server must have failed (to start).
// We quit directly in that case.
}
// Here (in contrast to the s.ForceStop() above) we let the gRPC server finish
// its work on any pending call.
s.Stop()
s.Wait()
return nil
}
// startMetrics starts the HTTPS server for the Prometheus endpoint, if one is configured.
// Error handling is the same as for startScheduler.
func (csid *csiDriver) startMetrics(ctx context.Context, cancel func()) (string, error) {
mux := http.NewServeMux()
mux.Handle(csid.cfg.metricsPath,
promhttp.InstrumentMetricHandler(
prometheus.DefaultRegisterer,
promhttp.HandlerFor(csid.gatherers, promhttp.HandlerOpts{}),
),
)
mux.Handle(csid.cfg.metricsPath+"/simple", promhttp.HandlerFor(simpleMetrics, promhttp.HandlerOpts{}))
return csid.startHTTPSServer(ctx, cancel, csid.cfg.metricsListen, mux, false /* no TLS */)
}
// startHTTPSServer contains the common logic for starting and
// stopping an HTTPS server. Returns an error or the address that can
// be used in Dial("tcp") to reach the server (useful for testing when
// "listen" does not include a port).
func (csid *csiDriver) startHTTPSServer(ctx context.Context, cancel func(), listen string, handler http.Handler, useTLS bool) (string, error) {
name := "HTTP server"
if useTLS {
name = "HTTPS server"
}
logger := klog.FromContext(ctx).WithName(name).WithValues("listen", listen)
var config *tls.Config
if useTLS {
c, err := pmemgrpc.LoadServerTLS(ctx, csid.cfg.CAFile, csid.cfg.CertFile, csid.cfg.KeyFile, "" /* any peer can connect */)
if err != nil {
return "", fmt.Errorf("initialize HTTPS config: %v", err)
}
config = c
}
server := http.Server{
Addr: listen,
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
logger.V(5).Info("Handling request", "method", r.Method, "path", r.URL.Path, "peer", r.RemoteAddr, "agent", r.UserAgent())
handler.ServeHTTP(w, r)
}),
TLSConfig: config,
}
listener, err := net.Listen("tcp", listen)
if err != nil {
return "", fmt.Errorf("listen on TCP address %q: %v", listen, err)
}
tcpListener := listener.(*net.TCPListener)
go func() {
defer tcpListener.Close()
var err error
if useTLS {
err = server.ServeTLS(listener, csid.cfg.CertFile, csid.cfg.KeyFile)
} else {
err = server.Serve(listener)
}
if err != http.ErrServerClosed {
logger.Error(err, "Failed")
}
// Also stop main thread.
cancel()
}()
go func() {
// Block until the context is done, then immediately
// close the server.
<-ctx.Done()
server.Close()
}()
logger.V(3).Info("Started", "addr", tcpListener.Addr())
return tcpListener.Addr().String(), nil
}
| [
"\"POD_NAMESPACE\""
]
| []
| [
"POD_NAMESPACE"
]
| [] | ["POD_NAMESPACE"] | go | 1 | 0 | |
pipenv/resolver.py | import os
import sys
import json
import logging
os.environ['PIP_PYTHON_PATH'] = sys.executable
for _dir in ('vendor', 'patched', '..'):
dirpath = os.path.sep.join([os.path.dirname(__file__), _dir])
sys.path.insert(0, dirpath)
def which(*args, **kwargs):
return sys.executable
def main():
is_verbose = '--verbose' in ' '.join(sys.argv)
do_pre = '--pre' in ' '.join(sys.argv)
do_clear = '--clear' in ' '.join(sys.argv)
is_debug = '--debug' in ' '.join(sys.argv)
new_sys_argv = []
for v in sys.argv:
if v.startswith('--'):
continue
else:
new_sys_argv.append(v)
sys.argv = new_sys_argv
import pipenv.core
if is_verbose:
logging.getLogger('pip9').setLevel(logging.INFO)
logging.getLogger('notpip').setLevel(logging.INFO)
if is_debug:
# Shit's getting real at this point.
logging.getLogger('pip9').setLevel(logging.DEBUG)
logging.getLogger('notpip').setLevel(logging.DEBUG)
if 'PIPENV_PACKAGES' in os.environ:
packages = os.environ['PIPENV_PACKAGES'].strip().split('\n')
else:
packages = sys.argv[1:]
for i, package in enumerate(packages):
if package.startswith('--'):
del packages[i]
project = pipenv.core.project
def resolve(packages, pre, sources, verbose, clear):
import pipenv.utils
return pipenv.utils.resolve_deps(
packages,
which,
project=project,
pre=pre,
sources=sources,
clear=clear,
verbose=verbose,
)
results = resolve(
packages,
pre=do_pre,
sources=project.sources,
verbose=is_verbose,
clear=do_clear,
)
print('RESULTS:')
if results:
print(json.dumps(results))
else:
print(json.dumps([]))
if __name__ == '__main__':
main()
| []
| []
| [
"PIPENV_PACKAGES",
"PIP_PYTHON_PATH"
]
| [] | ["PIPENV_PACKAGES", "PIP_PYTHON_PATH"] | python | 2 | 0 | |
soracom/generated/cmd/lagoon_updated_plan.go | // Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"os"
"strings"
"github.com/spf13/cobra"
)
// LagoonUpdatedPlanCmdPlan holds value of 'plan' option
var LagoonUpdatedPlanCmdPlan string
// LagoonUpdatedPlanCmdBody holds contents of request body to be sent
var LagoonUpdatedPlanCmdBody string
func init() {
LagoonUpdatedPlanCmd.Flags().StringVar(&LagoonUpdatedPlanCmdPlan, "plan", "", TRAPI(""))
LagoonUpdatedPlanCmd.Flags().StringVar(&LagoonUpdatedPlanCmdBody, "body", "", TRCLI("cli.common_params.body.short_help"))
LagoonCmd.AddCommand(LagoonUpdatedPlanCmd)
}
// LagoonUpdatedPlanCmd defines 'updated-plan' subcommand
var LagoonUpdatedPlanCmd = &cobra.Command{
Use: "updated-plan",
Short: TRAPI("/lagoon/plan:put:summary"),
Long: TRAPI(`/lagoon/plan:put:description`),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
return fmt.Errorf("unexpected arguments passed => %v", args)
}
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
param, err := collectLagoonUpdatedPlanCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectLagoonUpdatedPlanCmdParams(ac *apiClient) (*apiParams, error) {
var body string
var parsedBody interface{}
var err error
body, err = buildBodyForLagoonUpdatedPlanCmd()
if err != nil {
return nil, err
}
contentType := "application/json"
if contentType == "application/json" {
err = json.Unmarshal([]byte(body), &parsedBody)
if err != nil {
return nil, fmt.Errorf("invalid json format specified for `--body` parameter: %s", err)
}
}
return &apiParams{
method: "PUT",
path: buildPathForLagoonUpdatedPlanCmd("/lagoon/plan"),
query: buildQueryForLagoonUpdatedPlanCmd(),
contentType: contentType,
body: body,
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForLagoonUpdatedPlanCmd(path string) string {
return path
}
func buildQueryForLagoonUpdatedPlanCmd() url.Values {
result := url.Values{}
return result
}
func buildBodyForLagoonUpdatedPlanCmd() (string, error) {
var result map[string]interface{}
if LagoonUpdatedPlanCmdBody != "" {
var b []byte
var err error
if strings.HasPrefix(LagoonUpdatedPlanCmdBody, "@") {
fname := strings.TrimPrefix(LagoonUpdatedPlanCmdBody, "@")
// #nosec
b, err = ioutil.ReadFile(fname)
} else if LagoonUpdatedPlanCmdBody == "-" {
b, err = ioutil.ReadAll(os.Stdin)
} else {
b = []byte(LagoonUpdatedPlanCmdBody)
}
if err != nil {
return "", err
}
err = json.Unmarshal(b, &result)
if err != nil {
return "", err
}
}
if result == nil {
result = make(map[string]interface{})
}
if LagoonUpdatedPlanCmdPlan != "" {
result["plan"] = LagoonUpdatedPlanCmdPlan
}
resultBytes, err := json.Marshal(result)
if err != nil {
return "", err
}
return string(resultBytes), nil
}
| [
"\"SORACOM_VERBOSE\""
]
| []
| [
"SORACOM_VERBOSE"
]
| [] | ["SORACOM_VERBOSE"] | go | 1 | 0 | |
test/functional/feature_taproot.py | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test Taproot softfork (BIPs 340-342)
from test_framework.blocktools import (
create_coinbase,
create_block,
add_witness_commitment,
MAX_BLOCK_SIGOPS_WEIGHT,
NORMAL_GBT_REQUEST_PARAMS,
WITNESS_SCALE_FACTOR,
)
from test_framework.messages import (
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxOutValue, CTxOutWitness,
ToHex, uint256_from_str,
)
from test_framework.script import (
ANNEX_TAG,
CScript,
CScriptNum,
CScriptOp,
LEAF_VERSION_TAPSCRIPT,
LegacySignatureHash,
LOCKTIME_THRESHOLD,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_16,
OP_2DROP,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGADD,
OP_CHECKSIGVERIFY,
OP_CODESEPARATOR,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_NOP,
OP_NOT,
OP_NOTIF,
OP_PUSHDATA1,
OP_RETURN,
OP_SWAP,
OP_VERIFY,
SIGHASH_DEFAULT,
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY,
SegwitV0SignatureHash,
TaprootSignatureHash,
is_op_success,
taproot_construct,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error, assert_equal
from test_framework import util
from test_framework.key import generate_privkey, compute_xonly_pubkey, sign_schnorr, tweak_add_privkey, ECKey
from test_framework.address import (
hash160,
sha256,
)
from collections import OrderedDict, namedtuple
from io import BytesIO
import json
import hashlib
import os
import random
# === Framework for building spending transactions. ===
#
# The computation is represented as a "context" dict, whose entries store potentially-unevaluated expressions that
# refer to lower-level ones. By overwriting these expression, many aspects - both high and low level - of the signing
# process can be overridden.
#
# Specifically, a context object is a dict that maps names to compositions of:
# - values
# - lists of values
# - callables which, when fed the context object as argument, produce any of these
#
# The DEFAULT_CONTEXT object specifies a standard signing process, with many overridable knobs.
#
# The get(ctx, name) function can evaluate a name, and cache its result in the context.
# getter(name) can be used to construct a callable that evaluates name. For example:
#
# ctx1 = {**DEFAULT_CONTEXT, inputs=[getter("sign"), b'\x01']}
#
# creates a context where the script inputs are a signature plus the bytes 0x01.
#
# override(expr, name1=expr1, name2=expr2, ...) can be used to cause an expression to be evaluated in a selectively
# modified context. For example:
#
# ctx2 = {**DEFAULT_CONTEXT, sighash=override(default_sighash, hashtype=SIGHASH_DEFAULT)}
#
# creates a context ctx2 where the sighash is modified to use hashtype=SIGHASH_DEFAULT. This differs from
#
# ctx3 = {**DEFAULT_CONTEXT, hashtype=SIGHASH_DEFAULT}
#
# in that ctx3 will globally use hashtype=SIGHASH_DEFAULT (including in the hashtype byte appended to the signature)
# while ctx2 only uses the modified hashtype inside the sighash calculation.
#
# ELEMENTS:
# Elements taphash calculation also depends on genesis_block_hash which is stored as
# `genesis_hash` in the test config
g_genesis_hash = None
def deep_eval(ctx, expr):
"""Recursively replace any callables c in expr (including inside lists) with c(ctx)."""
while callable(expr):
expr = expr(ctx)
if isinstance(expr, list):
expr = [deep_eval(ctx, x) for x in expr]
return expr
# Data type to represent fully-evaluated expressions in a context dict (so we can avoid reevaluating them).
Final = namedtuple("Final", "value")
def get(ctx, name):
"""Evaluate name in context ctx."""
assert name in ctx, "Missing '%s' in context" % name
expr = ctx[name]
if not isinstance(expr, Final):
# Evaluate and cache the result.
expr = Final(deep_eval(ctx, expr))
ctx[name] = expr
return expr.value
def getter(name):
"""Return a callable that evaluates name in its passed context."""
return lambda ctx: get(ctx, name)
def override(expr, **kwargs):
"""Return a callable that evaluates expr in a modified context."""
return lambda ctx: deep_eval({**ctx, **kwargs}, expr)
# === Implementations for the various default expressions in DEFAULT_CONTEXT ===
def default_hashtype(ctx):
"""Default expression for "hashtype": SIGHASH_DEFAULT for taproot, SIGHASH_ALL otherwise."""
mode = get(ctx, "mode")
if mode == "taproot":
return SIGHASH_DEFAULT
else:
return SIGHASH_ALL
def default_tapleaf(ctx):
"""Default expression for "tapleaf": looking up leaf in tap[2]."""
return get(ctx, "tap").leaves[get(ctx, "leaf")]
def default_script_taproot(ctx):
"""Default expression for "script_taproot": tapleaf.script."""
return get(ctx, "tapleaf").script
def default_leafversion(ctx):
"""Default expression for "leafversion": tapleaf.version"""
return get(ctx, "tapleaf").version
def default_negflag(ctx):
"""Default expression for "negflag": tap.negflag."""
return get(ctx, "tap").negflag
def default_pubkey_inner(ctx):
"""Default expression for "pubkey_inner": tap.inner_pubkey."""
return get(ctx, "tap").inner_pubkey
def default_merklebranch(ctx):
"""Default expression for "merklebranch": tapleaf.merklebranch."""
return get(ctx, "tapleaf").merklebranch
def default_controlblock(ctx):
"""Default expression for "controlblock": combine leafversion, negflag, pubkey_inner, merklebranch."""
return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_inner") + get(ctx, "merklebranch")
#ELEMENTS: taphash depends on genesis hash
def default_sighash(ctx):
"""Default expression for "sighash": depending on mode, compute BIP341, BIP143, or legacy sighash."""
tx = get(ctx, "tx")
idx = get(ctx, "idx")
hashtype = get(ctx, "hashtype_actual")
genesis_hash = get(ctx, "genesis_hash")
mode = get(ctx, "mode")
if mode == "taproot":
# BIP341 signature hash
utxos = get(ctx, "utxos")
annex = get(ctx, "annex")
if get(ctx, "leaf") is not None:
codeseppos = get(ctx, "codeseppos")
leaf_ver = get(ctx, "leafversion")
script = get(ctx, "script_taproot")
return TaprootSignatureHash(tx, utxos, hashtype, genesis_hash, idx, scriptpath=True, script=script, leaf_ver=leaf_ver, codeseparator_pos=codeseppos, annex=annex)
else:
return TaprootSignatureHash(tx, utxos, hashtype, genesis_hash, idx, scriptpath=False, annex=annex)
elif mode == "witv0":
# BIP143 signature hash
scriptcode = get(ctx, "scriptcode")
utxos = get(ctx, "utxos")
return SegwitV0SignatureHash(scriptcode, tx, idx, hashtype, utxos[idx].nValue, enable_sighash_rangeproof=False)
else:
# Pre-segwit signature hash
scriptcode = get(ctx, "scriptcode")
return LegacySignatureHash(scriptcode, tx, idx, hashtype, enable_sighash_rangeproof=False)[0]
def default_tweak(ctx):
"""Default expression for "tweak": None if a leaf is specified, tap[0] otherwise."""
if get(ctx, "leaf") is None:
return get(ctx, "tap").tweak
return None
def default_key_tweaked(ctx):
"""Default expression for "key_tweaked": key if tweak is None, tweaked with it otherwise."""
key = get(ctx, "key")
tweak = get(ctx, "tweak")
if tweak is None:
return key
else:
return tweak_add_privkey(key, tweak)
def default_signature(ctx):
"""Default expression for "signature": BIP340 signature or ECDSA signature depending on mode."""
sighash = get(ctx, "sighash")
if get(ctx, "mode") == "taproot":
key = get(ctx, "key_tweaked")
flip_r = get(ctx, "flag_flip_r")
flip_p = get(ctx, "flag_flip_p")
return sign_schnorr(key, sighash, flip_r=flip_r, flip_p=flip_p)
else:
key = get(ctx, "key")
return key.sign_ecdsa(sighash)
def default_hashtype_actual(ctx):
"""Default expression for "hashtype_actual": hashtype, unless mismatching SIGHASH_SINGLE in taproot."""
hashtype = get(ctx, "hashtype")
mode = get(ctx, "mode")
if mode != "taproot":
return hashtype
idx = get(ctx, "idx")
tx = get(ctx, "tx")
if hashtype & 3 == SIGHASH_SINGLE and idx >= len(tx.vout):
return (hashtype & ~3) | SIGHASH_NONE
return hashtype
def default_bytes_hashtype(ctx):
"""Default expression for "bytes_hashtype": bytes([hashtype_actual]) if not 0, b"" otherwise."""
return bytes([x for x in [get(ctx, "hashtype_actual")] if x != 0])
def default_sign(ctx):
"""Default expression for "sign": concatenation of signature and bytes_hashtype."""
return get(ctx, "signature") + get(ctx, "bytes_hashtype")
def default_inputs_keypath(ctx):
"""Default expression for "inputs_keypath": a signature."""
return [get(ctx, "sign")]
def default_witness_taproot(ctx):
"""Default expression for "witness_taproot", consisting of inputs, script, control block, and annex as needed."""
annex = get(ctx, "annex")
suffix_annex = []
if annex is not None:
suffix_annex = [annex]
if get(ctx, "leaf") is None:
return get(ctx, "inputs_keypath") + suffix_annex
else:
return get(ctx, "inputs") + [bytes(get(ctx, "script_taproot")), get(ctx, "controlblock")] + suffix_annex
def default_witness_witv0(ctx):
"""Default expression for "witness_witv0", consisting of inputs and witness script, as needed."""
script = get(ctx, "script_witv0")
inputs = get(ctx, "inputs")
if script is None:
return inputs
else:
return inputs + [script]
def default_witness(ctx):
"""Default expression for "witness", delegating to "witness_taproot" or "witness_witv0" as needed."""
mode = get(ctx, "mode")
if mode == "taproot":
return get(ctx, "witness_taproot")
elif mode == "witv0":
return get(ctx, "witness_witv0")
else:
return []
def default_scriptsig(ctx):
"""Default expression for "scriptsig", consisting of inputs and redeemscript, as needed."""
scriptsig = []
mode = get(ctx, "mode")
if mode == "legacy":
scriptsig = get(ctx, "inputs")
redeemscript = get(ctx, "script_p2sh")
if redeemscript is not None:
scriptsig += [bytes(redeemscript)]
return scriptsig
# The default context object.
DEFAULT_CONTEXT = {
# == The main expressions to evaluate. Only override these for unusual or invalid spends. ==
# The overall witness stack, as a list of bytes objects.
"witness": default_witness,
# The overall scriptsig, as a list of CScript objects (to be concatenated) and bytes objects (to be pushed)
"scriptsig": default_scriptsig,
# == Expressions you'll generally only override for intentionally invalid spends. ==
# The witness stack for spending a taproot output.
"witness_taproot": default_witness_taproot,
# The witness stack for spending a P2WPKH/P2WSH output.
"witness_witv0": default_witness_witv0,
# The script inputs for a taproot key path spend.
"inputs_keypath": default_inputs_keypath,
# The actual hashtype to use (usually equal to hashtype, but in taproot SIGHASH_SINGLE is not always allowed).
"hashtype_actual": default_hashtype_actual,
# The bytes object for a full signature (including hashtype byte, if needed).
"bytes_hashtype": default_bytes_hashtype,
# A full script signature (bytes including hashtype, if needed)
"sign": default_sign,
# An ECDSA or Schnorr signature (excluding hashtype byte).
"signature": default_signature,
# The 32-byte tweaked key (equal to key for script path spends, or key+tweak for key path spends).
"key_tweaked": default_key_tweaked,
# The tweak to use (None for script path spends, the actual tweak for key path spends).
"tweak": default_tweak,
# The sighash value (32 bytes)
"sighash": default_sighash,
# The information about the chosen script path spend (TaprootLeafInfo object).
"tapleaf": default_tapleaf,
# The script to push, and include in the sighash, for a taproot script path spend.
"script_taproot": default_script_taproot,
# The inner pubkey for a taproot script path spend (32 bytes).
"pubkey_inner": default_pubkey_inner,
# The negation flag of the inner pubkey for a taproot script path spend.
"negflag": default_negflag,
# The leaf version to include in the sighash (this does not affect the one in the control block).
"leafversion": default_leafversion,
# The Merkle path to include in the control block for a script path spend.
"merklebranch": default_merklebranch,
# The control block to push for a taproot script path spend.
"controlblock": default_controlblock,
# Whether to produce signatures with invalid P sign (Schnorr signatures only).
"flag_flip_p": False,
# Whether to produce signatures with invalid R sign (Schnorr signatures only).
"flag_flip_r": False,
# == Parameters that can be changed without invalidating, but do have a default: ==
# The hashtype (as an integer).
"hashtype": default_hashtype,
# The annex (only when mode=="taproot").
"annex": None,
# The codeseparator position (only when mode=="taproot").
"codeseppos": -1,
# The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH).
"script_p2sh": None,
# The script to add to the witness in (if P2WSH; None implies P2WPKH)
"script_witv0": None,
# The leaf to use in taproot spends (if script path spend; None implies key path spend).
"leaf": None,
# The input arguments to provide to the executed script
"inputs": [],
# Genesis hash(required for taproot outputs)
"genesis_hash": None,
# == Parameters to be set before evaluation: ==
# - mode: what spending style to use ("taproot", "witv0", or "legacy").
# - key: the (untweaked) private key to sign with (ECKey object for ECDSA, 32 bytes for Schnorr).
# - tap: the TaprootInfo object (see taproot_construct; needed in mode=="taproot").
# - tx: the transaction to sign.
# - utxos: the UTXOs being spent (needed in mode=="witv0" and mode=="taproot").
# - idx: the input position being signed.
# - scriptcode: the scriptcode to include in legacy and witv0 sighashes.
# - genesisHash: The genesis hash of the block
}
def flatten(lst):
ret = []
for elem in lst:
if isinstance(elem, list):
ret += flatten(elem)
else:
ret.append(elem)
return ret
def spend(tx, idx, utxos, **kwargs):
"""Sign transaction input idx of tx, provided utxos is the list of outputs being spent.
Additional arguments may be provided that override any aspect of the signing process.
See DEFAULT_CONTEXT above for what can be overridden, and what must be provided.
"""
ctx = {**DEFAULT_CONTEXT, "tx":tx, "idx":idx, "utxos":utxos, **kwargs}
def to_script(elem):
"""If fed a CScript, return it; if fed bytes, return a CScript that pushes it."""
if isinstance(elem, CScript):
return elem
else:
return CScript([elem])
scriptsig_list = flatten(get(ctx, "scriptsig"))
scriptsig = CScript(b"".join(bytes(to_script(elem)) for elem in scriptsig_list))
witness_stack = flatten(get(ctx, "witness"))
return (scriptsig, witness_stack)
# === Spender objects ===
#
# Each spender is a tuple of:
# - A scriptPubKey which is to be spent from (CScript)
# - A comment describing the test (string)
# - Whether the spending (on itself) is expected to be standard (bool)
# - A tx-signing lambda returning (scriptsig, witness_stack), taking as inputs:
# - A transaction to sign (CTransaction)
# - An input position (int)
# - The spent UTXOs by this transaction (list of CTxOut)
# - Whether to produce a valid spend (bool)
# - A string with an expected error message for failure case if known
# - The (pre-taproot) sigops weight consumed by a successful spend
# - Whether this spend cannot fail
# - Whether this test demands being placed in a txin with no corresponding txout (for testing SIGHASH_SINGLE behavior)
Spender = namedtuple("Spender", "script,comment,is_standard,sat_function,err_msg,sigops_weight,no_fail,need_vin_vout_mismatch")
def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=False, genesis_hash=None, spk_mutate_pre_p2sh=None, failure=None, standard=True, err_msg=None, sigops_weight=0, need_vin_vout_mismatch=False, **kwargs):
"""Helper for constructing Spender objects using the context signing framework.
* tap: a TaprootInfo object (see taproot_construct), for Taproot spends (cannot be combined with pkh, witv0, or script)
* witv0: boolean indicating the use of witness v0 spending (needs one of script or pkh)
* script: the actual script executed (for bare/P2WSH/P2SH spending)
* pkh: the public key for P2PKH or P2WPKH spending
* p2sh: whether the output is P2SH wrapper (this is supported even for Taproot, where it makes the output unencumbered)
* spk_mutate_pre_psh: a callable to be applied to the script (before potentially P2SH-wrapping it)
* failure: a dict of entries to override in the context when intentionally failing to spend (if None, no_fail will be set)
* standard: whether the (valid version of) spending is expected to be standard
* err_msg: a string with an expected error message for failure (or None, if not cared about)
* sigops_weight: the pre-taproot sigops weight consumed by a successful spend
"""
conf = dict()
global g_genesis_hash
if genesis_hash is None:
genesis_hash = g_genesis_hash
conf["genesis_hash"] = genesis_hash
# Compute scriptPubKey and set useful defaults based on the inputs.
if witv0:
assert tap is None
conf["mode"] = "witv0"
if pkh is not None:
# P2WPKH
assert script is None
pubkeyhash = hash160(pkh)
spk = CScript([OP_0, pubkeyhash])
conf["scriptcode"] = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG])
conf["script_witv0"] = None
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# P2WSH
spk = CScript([OP_0, sha256(script)])
conf["scriptcode"] = script
conf["script_witv0"] = script
else:
assert False
elif tap is None:
conf["mode"] = "legacy"
if pkh is not None:
# P2PKH
assert script is None
pubkeyhash = hash160(pkh)
spk = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG])
conf["scriptcode"] = spk
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# bare
spk = script
conf["scriptcode"] = script
else:
assert False
else:
assert script is None
conf["mode"] = "taproot"
conf["tap"] = tap
spk = tap.scriptPubKey
if spk_mutate_pre_p2sh is not None:
spk = spk_mutate_pre_p2sh(spk)
if p2sh:
# P2SH wrapper can be combined with anything else
conf["script_p2sh"] = spk
spk = CScript([OP_HASH160, hash160(spk), OP_EQUAL])
conf = {**conf, **kwargs}
def sat_fn(tx, idx, utxos, valid):
if valid:
return spend(tx, idx, utxos, **conf)
else:
assert failure is not None
return spend(tx, idx, utxos, **{**conf, **failure})
return Spender(script=spk, comment=comment, is_standard=standard, sat_function=sat_fn, err_msg=err_msg, sigops_weight=sigops_weight, no_fail=failure is None, need_vin_vout_mismatch=need_vin_vout_mismatch)
def add_spender(spenders, *args, **kwargs):
"""Make a spender using make_spender, and add it to spenders."""
spenders.append(make_spender(*args, **kwargs))
# === Helpers for the test ===
def random_checksig_style(pubkey):
"""Creates a random CHECKSIG* tapscript that would succeed with only the valid signature on witness stack."""
return bytes(CScript([pubkey, OP_CHECKSIG]))
opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD])
if (opcode == OP_CHECKSIGVERIFY):
ret = CScript([pubkey, opcode, OP_1])
elif (opcode == OP_CHECKSIGADD):
num = random.choice([0, 0x7fffffff, -0x7fffffff])
ret = CScript([num, pubkey, opcode, num + 1, OP_EQUAL])
else:
ret = CScript([pubkey, opcode])
return bytes(ret)
def random_bytes(n):
"""Return a random bytes object of length n."""
return bytes(random.getrandbits(8) for i in range(n))
def bitflipper(expr):
"""Return a callable that evaluates expr and returns it with a random bitflip."""
def fn(ctx):
sub = deep_eval(ctx, expr)
assert isinstance(sub, bytes)
return (int.from_bytes(sub, 'little') ^ (1 << random.randrange(len(sub) * 8))).to_bytes(len(sub), 'little')
return fn
def zero_appender(expr):
"""Return a callable that evaluates expr and returns it with a zero added."""
return lambda ctx: deep_eval(ctx, expr) + b"\x00"
def byte_popper(expr):
"""Return a callable that evaluates expr and returns it with its last byte removed."""
return lambda ctx: deep_eval(ctx, expr)[:-1]
# Expected error strings
ERR_SIG_SIZE = {"err_msg": "Invalid Schnorr signature size"}
ERR_SIG_HASHTYPE = {"err_msg": "Invalid Schnorr signature hash type"}
ERR_SIG_SCHNORR = {"err_msg": "Invalid Schnorr signature"}
ERR_OP_RETURN = {"err_msg": "OP_RETURN was encountered"}
ERR_CONTROLBLOCK_SIZE = {"err_msg": "Invalid Taproot control block size"}
ERR_WITNESS_PROGRAM_MISMATCH = {"err_msg": "Witness program hash mismatch"}
ERR_PUSH_LIMIT = {"err_msg": "Push value size limit exceeded"}
ERR_DISABLED_OPCODE = {"err_msg": "Attempted to use a disabled opcode"}
ERR_TAPSCRIPT_CHECKMULTISIG = {"err_msg": "OP_CHECKMULTISIG(VERIFY) is not available in tapscript"}
ERR_MINIMALIF = {"err_msg": "OP_IF/NOTIF argument must be minimal in tapscript"}
ERR_UNKNOWN_PUBKEY = {"err_msg": "Public key is neither compressed or uncompressed"}
ERR_STACK_SIZE = {"err_msg": "Stack size limit exceeded"}
ERR_CLEANSTACK = {"err_msg": "Stack size must be exactly one after execution"}
ERR_STACK_EMPTY = {"err_msg": "Operation not valid with the current stack size"}
ERR_SIGOPS_RATIO = {"err_msg": "Too much signature validation relative to witness weight"}
ERR_UNDECODABLE = {"err_msg": "Opcode missing or not understood"}
ERR_NO_SUCCESS = {"err_msg": "Script evaluated without error but finished with a false/empty top stack element"}
ERR_EMPTY_WITNESS = {"err_msg": "Witness program was passed an empty witness"}
ERR_CHECKSIGVERIFY = {"err_msg": "Script failed an OP_CHECKSIGVERIFY operation"}
VALID_SIGHASHES_ECDSA = [
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_ALL,
SIGHASH_ANYONECANPAY + SIGHASH_NONE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT = [SIGHASH_DEFAULT] + VALID_SIGHASHES_ECDSA
VALID_SIGHASHES_TAPROOT_SINGLE = [
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT_NO_SINGLE = [h for h in VALID_SIGHASHES_TAPROOT if h not in VALID_SIGHASHES_TAPROOT_SINGLE]
SIGHASH_BITFLIP = {"failure": {"sighash": bitflipper(default_sighash)}}
SIG_POP_BYTE = {"failure": {"sign": byte_popper(default_sign)}}
SINGLE_SIG = {"inputs": [getter("sign")]}
SIG_ADD_ZERO = {"failure": {"sign": zero_appender(default_sign)}}
DUST_LIMIT = 1200
MIN_FEE = 50000
# === Actual test cases ===
def spenders_taproot_active():
"""Return a list of Spenders for testing post-Taproot activation behavior."""
secs = [generate_privkey() for _ in range(8)]
pubs = [compute_xonly_pubkey(sec)[0] for sec in secs]
spenders = []
# == Tests for BIP340 signature validation. ==
# These are primarily tested through the test vectors implemented in libsecp256k1, and in src/tests/key_tests.cpp.
# Some things are tested programmatically as well here.
tap = taproot_construct(pubs[0])
# Test with key with bit flipped.
add_spender(spenders, "sig/key", tap=tap, key=secs[0], failure={"key_tweaked": bitflipper(default_key_tweaked)}, **ERR_SIG_SCHNORR)
# Test with sighash with bit flipped.
add_spender(spenders, "sig/sighash", tap=tap, key=secs[0], failure={"sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# Test with invalid R sign.
add_spender(spenders, "sig/flip_r", tap=tap, key=secs[0], failure={"flag_flip_r": True}, **ERR_SIG_SCHNORR)
# Test with invalid P sign.
add_spender(spenders, "sig/flip_p", tap=tap, key=secs[0], failure={"flag_flip_p": True}, **ERR_SIG_SCHNORR)
# Test with signature with bit flipped.
add_spender(spenders, "sig/bitflip", tap=tap, key=secs[0], failure={"signature": bitflipper(default_signature)}, **ERR_SIG_SCHNORR)
# == Tests for signature hashing ==
# Run all tests once with no annex, and once with a valid random annex.
for annex in [None, lambda _: bytes([ANNEX_TAG]) + random_bytes(random.randrange(0, 250))]:
# Non-empty annex is non-standard
no_annex = annex is None
# Sighash mutation tests (test all sighash combinations)
for hashtype in VALID_SIGHASHES_TAPROOT:
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
# Pure pubkey
tap = taproot_construct(pubs[0])
add_spender(spenders, "sighash/purepk", tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Pubkey/P2PK script combination
scripts = [("s0", CScript(random_checksig_style(pubs[1])))]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/keypath_hashtype_%x" % hashtype, tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath_hashtype_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Test SIGHASH_SINGLE behavior in combination with mismatching outputs
if hashtype in VALID_SIGHASHES_TAPROOT_SINGLE:
add_spender(spenders, "sighash/keypath_hashtype_mis_%x" % hashtype, tap=tap, key=secs[0], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
add_spender(spenders, "sighash/scriptpath_hashtype_mis_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), **SINGLE_SIG, failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
# Test OP_CODESEPARATOR impact on sighashing.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
scripts = [
("pk_codesep", CScript(random_checksig_style(pubs[1]) + bytes([OP_CODESEPARATOR]))), # codesep after checksig
("codesep_pk", CScript(bytes([OP_CODESEPARATOR]) + random_checksig_style(pubs[1]))), # codesep before checksig
("branched_codesep", CScript([random_bytes(random.randrange(511)), OP_DROP, OP_IF, OP_CODESEPARATOR, pubs[0], OP_ELSE, OP_CODESEPARATOR, pubs[1], OP_ENDIF, OP_CHECKSIG])), # branch dependent codesep
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/pk_codesep", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/codesep_pk", tap=tap, leaf="codesep_pk", key=secs[1], codeseppos=0, **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/left", tap=tap, leaf="branched_codesep", key=secs[0], codeseppos=3, **common, inputs=[getter("sign"), b'\x01'], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/right", tap=tap, leaf="branched_codesep", key=secs[1], codeseppos=6, **common, inputs=[getter("sign"), b''], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Reusing the scripts above, test that various features affect the sighash.
add_spender(spenders, "sighash/annex", tap=tap, leaf="pk_codesep", key=secs[1], hashtype=hashtype, standard=False, **SINGLE_SIG, annex=bytes([ANNEX_TAG]), failure={"sighash": override(default_sighash, annex=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/script", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, script_taproot=tap.leaves["codesep_pk"].script)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/leafver", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leafversion=random.choice([x & 0xFE for x in range(0x100) if x & 0xFE != 0xC0]))}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leaf=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/keypath", tap=tap, key=secs[0], **common, failure={"sighash": override(default_sighash, leaf="pk_codesep")}, **ERR_SIG_SCHNORR)
# Test that invalid hashtypes don't work, both in key path and script path spends
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for invalid_hashtype in [x for x in range(0x100) if x not in VALID_SIGHASHES_TAPROOT]:
add_spender(spenders, "sighash/keypath_unk_hashtype_%x" % invalid_hashtype, tap=tap, key=secs[0], hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/scriptpath_unk_hashtype_%x" % invalid_hashtype, tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
# Test that hashtype 0 cannot have a hashtype byte, and 1 must have one.
add_spender(spenders, "sighash/hashtype0_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype0_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype1_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test that hashtype 0 and hashtype 1 cannot be transmuted into each other.
add_spender(spenders, "sighash/hashtype0to1_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype0to1_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test aspects of signatures with unusual lengths
for hashtype in [SIGHASH_DEFAULT, random.choice(VALID_SIGHASHES_TAPROOT)]:
scripts = [
("csv", CScript([pubs[2], OP_CHECKSIGVERIFY, OP_1])),
("cs_pos", CScript([pubs[2], OP_CHECKSIG])),
("csa_pos", CScript([OP_0, pubs[2], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
("cs_neg", CScript([pubs[2], OP_CHECKSIG, OP_NOT])),
("csa_neg", CScript([OP_2, pubs[2], OP_CHECKSIGADD, OP_2, OP_EQUAL]))
]
random.shuffle(scripts)
tap = taproot_construct(pubs[3], scripts)
# Empty signatures
add_spender(spenders, "siglen/empty_keypath", tap=tap, key=secs[3], hashtype=hashtype, failure={"sign": b""}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_CHECKSIGVERIFY)
add_spender(spenders, "siglen/empty_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(1, 63))}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(66, 100))}, **ERR_SIG_SIZE)
# Appending a zero byte to signatures invalidates them
add_spender(spenders, "siglen/padzero_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
# Removing the last byte from signatures invalidates them
add_spender(spenders, "siglen/popbyte_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
# Verify that an invalid signature is not allowed, not even when the CHECKSIG* is expected to fail.
add_spender(spenders, "siglen/invalid_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "siglen/invalid_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# == Test that BIP341 spending only applies to witness version 1, program length 32, no P2SH ==
for p2sh in [False, True]:
for witver in range(1, 17):
for witlen in [20, 31, 32, 33]:
def mutate(spk):
prog = spk[2:]
assert len(prog) == 32
if witlen < 32:
prog = prog[0:witlen]
elif witlen > 32:
prog += bytes([0 for _ in range(witlen - 32)])
return CScript([CScriptOp.encode_op_n(witver), prog])
scripts = [("s0", CScript([pubs[0], OP_CHECKSIG])), ("dummy", CScript([OP_RETURN]))]
tap = taproot_construct(pubs[1], scripts)
if not p2sh and witver == 1 and witlen == 32:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, failure={"leaf": "dummy"}, **ERR_OP_RETURN)
else:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], standard=False)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, standard=False)
# == Test various aspects of BIP341 spending paths ==
# A set of functions that compute the hashing partner in a Merkle tree, designed to exercise
# edge cases. This relies on the taproot_construct feature that a lambda can be passed in
# instead of a subtree, to compute the partner to be hashed with.
PARTNER_MERKLE_FN = [
# Combine with itself
lambda h: h,
# Combine with hash 0
lambda h: bytes([0 for _ in range(32)]),
# Combine with hash 2^256-1
lambda h: bytes([0xff for _ in range(32)]),
# Combine with itself-1 (BE)
lambda h: (int.from_bytes(h, 'big') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (BE)
lambda h: (int.from_bytes(h, 'big') + 1).to_bytes(32, 'big'),
# Combine with itself-1 (LE)
lambda h: (int.from_bytes(h, 'little') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (LE)
lambda h: (int.from_bytes(h, 'little') + 1).to_bytes(32, 'little'),
# Combine with random bitflipped version of self.
lambda h: (int.from_bytes(h, 'little') ^ (1 << random.randrange(256))).to_bytes(32, 'little')
]
# Start with a tree of that has depth 1 for "128deep" and depth 2 for "129deep".
scripts = [("128deep", CScript([pubs[0], OP_CHECKSIG])), [("129deep", CScript([pubs[0], OP_CHECKSIG])), random.choice(PARTNER_MERKLE_FN)]]
# Add 127 nodes on top of that tree, so that "128deep" and "129deep" end up at their designated depths.
for _ in range(127):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
# Test that spends with a depth of 128 work, but 129 doesn't (even with a tree with weird Merkle branches in it).
add_spender(spenders, "spendpath/merklelimit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"leaf": "129deep"}, **ERR_CONTROLBLOCK_SIZE)
# Test that flipping the negation bit invalidates spends.
add_spender(spenders, "spendpath/negflag", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"negflag": lambda ctx: 1 - default_negflag(ctx)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the Merkle branch invalidate it.
add_spender(spenders, "spendpath/bitflipmerkle", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"merklebranch": bitflipper(default_merklebranch)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the inner pubkey invalidate it.
add_spender(spenders, "spendpath/bitflippubkey", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"pubkey_inner": bitflipper(default_pubkey_inner)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that empty witnesses are invalid.
add_spender(spenders, "spendpath/emptywit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"witness": []}, **ERR_EMPTY_WITNESS)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padlongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/trunclongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
scripts = [("s", CScript([pubs[0], OP_CHECKSIG]))]
tap = taproot_construct(pubs[1], scripts)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/truncshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block to 1 byte ("-1 Merkle length") invalidates it
add_spender(spenders, "spendpath/trunc1shortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:1]}, **ERR_CONTROLBLOCK_SIZE)
# == Test BIP342 edge cases ==
csa_low_val = random.randrange(0, 17) # Within range for OP_n
csa_low_result = csa_low_val + 1
csa_high_val = random.randrange(17, 100) if random.getrandbits(1) else random.randrange(-100, -1) # Outside OP_n range
csa_high_result = csa_high_val + 1
OVERSIZE_NUMBER = 2**31
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER))), 6)
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER-1))), 5)
big_choices = []
big_scriptops = []
for i in range(1000):
r = random.randrange(len(pubs))
big_choices.append(r)
big_scriptops += [pubs[r], OP_CHECKSIGVERIFY]
def big_spend_inputs(ctx):
"""Helper function to construct the script input for t33/t34 below."""
# Instead of signing 999 times, precompute signatures for every (key, hashtype) combination
sigs = {}
for ht in VALID_SIGHASHES_TAPROOT:
for k in range(len(pubs)):
sigs[(k, ht)] = override(default_sign, hashtype=ht, key=secs[k])(ctx)
num = get(ctx, "num")
return [sigs[(big_choices[i], random.choice(VALID_SIGHASHES_TAPROOT))] for i in range(num - 1, -1, -1)]
# Various BIP342 features
scripts = [
# 0) drop stack element and OP_CHECKSIG
("t0", CScript([OP_DROP, pubs[1], OP_CHECKSIG])),
# 1) normal OP_CHECKSIG
("t1", CScript([pubs[1], OP_CHECKSIG])),
# 2) normal OP_CHECKSIGVERIFY
("t2", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 3) Hypothetical OP_CHECKMULTISIG script that takes a single sig as input
("t3", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIG])),
# 4) Hypothetical OP_CHECKMULTISIGVERIFY script that takes a single sig as input
("t4", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIGVERIFY, OP_1])),
# 5) OP_IF script that needs a true input
("t5", CScript([OP_IF, pubs[1], OP_CHECKSIG, OP_ELSE, OP_RETURN, OP_ENDIF])),
# 6) OP_NOTIF script that needs a true input
("t6", CScript([OP_NOTIF, OP_RETURN, OP_ELSE, pubs[1], OP_CHECKSIG, OP_ENDIF])),
# 7) OP_CHECKSIG with an empty key
("t7", CScript([OP_0, OP_CHECKSIG])),
# 8) OP_CHECKSIGVERIFY with an empty key
("t8", CScript([OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 9) normal OP_CHECKSIGADD that also ensures return value is correct
("t9", CScript([csa_low_val, pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 10) OP_CHECKSIGADD with empty key
("t10", CScript([csa_low_val, OP_0, OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 11) OP_CHECKSIGADD with missing counter stack element
("t11", CScript([pubs[1], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
# 12) OP_CHECKSIG that needs invalid signature
("t12", CScript([pubs[1], OP_CHECKSIGVERIFY, pubs[0], OP_CHECKSIG, OP_NOT])),
# 13) OP_CHECKSIG with empty key that needs invalid signature
("t13", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_CHECKSIG, OP_NOT])),
# 14) OP_CHECKSIGADD that needs invalid signature
("t14", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, pubs[0], OP_CHECKSIGADD, OP_NOT])),
# 15) OP_CHECKSIGADD with empty key that needs invalid signature
("t15", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGADD, OP_NOT])),
# 16) OP_CHECKSIG with unknown pubkey type
("t16", CScript([OP_1, OP_CHECKSIG])),
# 17) OP_CHECKSIGADD with unknown pubkey type
("t17", CScript([OP_0, OP_1, OP_CHECKSIGADD])),
# 18) OP_CHECKSIGVERIFY with unknown pubkey type
("t18", CScript([OP_1, OP_CHECKSIGVERIFY, OP_1])),
# 19) script longer than 10000 bytes and over 201 non-push opcodes
("t19", CScript([OP_0, OP_0, OP_2DROP] * 10001 + [pubs[1], OP_CHECKSIG])),
# 20) OP_CHECKSIGVERIFY with empty key
("t20", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 21) Script that grows the stack to 1000 elements
("t21", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 999 + [OP_DROP] * 999)),
# 22) Script that grows the stack to 1001 elements
("t22", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 1000 + [OP_DROP] * 1000)),
# 23) Script that expects an input stack of 1000 elements
("t23", CScript([OP_DROP] * 999 + [pubs[1], OP_CHECKSIG])),
# 24) Script that expects an input stack of 1001 elements
("t24", CScript([OP_DROP] * 1000 + [pubs[1], OP_CHECKSIG])),
# 25) Script that pushes a MAX_SCRIPT_ELEMENT_SIZE-bytes element
("t25", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE), OP_DROP, pubs[1], OP_CHECKSIG])),
# 26) Script that pushes a (MAX_SCRIPT_ELEMENT_SIZE+1)-bytes element
("t26", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, pubs[1], OP_CHECKSIG])),
# 27) CHECKSIGADD that must fail because numeric argument number is >4 bytes
("t27", CScript([CScriptNum(OVERSIZE_NUMBER), pubs[1], OP_CHECKSIGADD])),
# 28) Pushes random CScriptNum value, checks OP_CHECKSIGADD result
("t28", CScript([csa_high_val, pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 29) CHECKSIGADD that succeeds with proper sig because numeric argument number is <=4 bytes
("t29", CScript([CScriptNum(OVERSIZE_NUMBER-1), pubs[1], OP_CHECKSIGADD])),
# 30) Variant of t1 with "normal" 33-byte pubkey
("t30", CScript([b'\x03' + pubs[1], OP_CHECKSIG])),
# 31) Variant of t2 with "normal" 33-byte pubkey
("t31", CScript([b'\x02' + pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 32) Variant of t28 with "normal" 33-byte pubkey
("t32", CScript([csa_high_val, b'\x03' + pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 33) 999-of-999 multisig
("t33", CScript(big_scriptops[:1998] + [OP_1])),
# 34) 1000-of-1000 multisig
("t34", CScript(big_scriptops[:2000] + [OP_1])),
# 35) Variant of t9 that uses a non-minimally encoded input arg
("t35", CScript([bytes([csa_low_val]), pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 36) Empty script
("t36", CScript([])),
]
# Add many dummies to test huge trees
for j in range(100000):
scripts.append((None, CScript([OP_RETURN, random.randrange(100000)])))
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
common = {
"hashtype": hashtype,
"key": secs[1],
"tap": tap,
}
# Test that MAX_SCRIPT_ELEMENT_SIZE byte stack element inputs are valid, but not one more (and 80 bytes is standard but 81 is not).
add_spender(spenders, "tapscript/inputmaxlimit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE)], failure={"inputs": [getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1)]}, **ERR_PUSH_LIMIT)
add_spender(spenders, "tapscript/input80limit", leaf="t0", **common, inputs=[getter("sign"), random_bytes(80)])
add_spender(spenders, "tapscript/input81limit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(81)])
# Test that OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY cause failure, but OP_CHECKSIG and OP_CHECKSIGVERIFY work.
add_spender(spenders, "tapscript/disabled_checkmultisig", leaf="t1", **common, **SINGLE_SIG, failure={"leaf": "t3"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
add_spender(spenders, "tapscript/disabled_checkmultisigverify", leaf="t2", **common, **SINGLE_SIG, failure={"leaf": "t4"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
# Test that OP_IF and OP_NOTIF do not accept non-0x01 as truth value (the MINIMALIF rule is consensus in Tapscript)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x02']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x03']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0001']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0100']}, **ERR_MINIMALIF)
# Test that 1-byte public keys (which are unknown) are acceptable but nonstandard with unrelated signatures, but 0-byte public keys are not valid.
add_spender(spenders, "tapscript/unkpk/checksig", leaf="t16", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigadd", leaf="t17", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigverify", leaf="t18", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
# Test that 33-byte public keys (which are unknown) are acceptable but nonstandard with valid signatures, but normal pubkeys are not valid in that case.
add_spender(spenders, "tapscript/oldpk/checksig", leaf="t30", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t1"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigadd", leaf="t31", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t2"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigverify", leaf="t32", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t28"}, **ERR_SIG_SCHNORR)
# Test that 0-byte public keys are not acceptable.
add_spender(spenders, "tapscript/emptypk/checksig", leaf="t1", **SINGLE_SIG, **common, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigverify", leaf="t2", **SINGLE_SIG, **common, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t35", standard=False, **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
# Test that OP_CHECKSIGADD results are as expected
add_spender(spenders, "tapscript/checksigaddresults", leaf="t28", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
add_spender(spenders, "tapscript/checksigaddoversize", leaf="t29", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
# Test that OP_CHECKSIGADD requires 3 stack elements.
add_spender(spenders, "tapscript/checksigadd3args", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t11"}, **ERR_STACK_EMPTY)
# Test that empty signatures do not cause script failure in OP_CHECKSIG and OP_CHECKSIGADD (but do fail with empty pubkey, and do fail OP_CHECKSIGVERIFY)
add_spender(spenders, "tapscript/emptysigs/checksig", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t13"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/nochecksigverify", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t20"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/checksigadd", leaf="t14", **common, inputs=[b'', getter("sign")], failure={"leaf": "t15"}, **ERR_UNKNOWN_PUBKEY)
# Test that scripts over 10000 bytes (and over 201 non-push ops) are acceptable.
add_spender(spenders, "tapscript/no10000limit", leaf="t19", **SINGLE_SIG, **common)
# Test that a stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000stack", leaf="t21", **SINGLE_SIG, **common, failure={"leaf": "t22"}, **ERR_STACK_SIZE)
# Test that an input stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000inputs", leaf="t23", **common, inputs=[getter("sign")] + [b'' for _ in range(999)], failure={"leaf": "t24", "inputs": [getter("sign")] + [b'' for _ in range(1000)]}, **ERR_STACK_SIZE)
# Test that pushing a MAX_SCRIPT_ELEMENT_SIZE byte stack element is valid, but one longer is not.
add_spender(spenders, "tapscript/pushmaxlimit", leaf="t25", **common, **SINGLE_SIG, failure={"leaf": "t26"}, **ERR_PUSH_LIMIT)
# Test that 999-of-999 multisig works (but 1000-of-1000 triggers stack size limits)
add_spender(spenders, "tapscript/bigmulti", leaf="t33", **common, inputs=big_spend_inputs, num=999, failure={"leaf": "t34", "num": 1000}, **ERR_STACK_SIZE)
# Test that the CLEANSTACK rule is consensus critical in tapscript
add_spender(spenders, "tapscript/cleanstack", leaf="t36", tap=tap, inputs=[b'\x01'], failure={"inputs": [b'\x01', b'\x01']}, **ERR_CLEANSTACK)
# == Test for sigops ratio limit ==
# Given a number n, and a public key pk, functions that produce a (CScript, sigops). Each script takes as
# input a valid signature with the passed pk followed by a dummy push of bytes that are to be dropped, and
# will execute sigops signature checks.
SIGOPS_RATIO_SCRIPTS = [
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIG.
lambda n, pk: (CScript([OP_DROP, pk] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGVERIFY.
lambda n, pk: (CScript([OP_DROP, pk, OP_0, OP_IF, OP_2DUP, OP_CHECKSIGVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_2, OP_SWAP, OP_CHECKSIGADD, OP_3, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIG.
lambda n, pk: (CScript([random_bytes(220), OP_2DROP, pk, OP_1, OP_NOTIF, OP_2DUP, OP_CHECKSIG, OP_VERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_4, OP_SWAP, OP_CHECKSIGADD, OP_5, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGADD.
lambda n, pk: (CScript([OP_DROP, pk, OP_1, OP_IF, OP_ELSE, OP_2DUP, OP_6, OP_SWAP, OP_CHECKSIGADD, OP_7, OP_EQUALVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_8, OP_SWAP, OP_CHECKSIGADD, OP_9, OP_EQUAL]), n + 1),
# n+1 OP_CHECKSIGs, but also one OP_CHECKSIG with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, pk, OP_CHECKSIG, OP_NOT, OP_VERIFY, pk] + [OP_2DUP, OP_CHECKSIG, OP_VERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGADDs and 1 OP_CHECKSIG, but also an OP_CHECKSIGADD with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, OP_10, pk, OP_CHECKSIGADD, OP_10, OP_EQUALVERIFY, pk] + [OP_2DUP, OP_16, OP_SWAP, OP_CHECKSIGADD, b'\x11', OP_EQUALVERIFY] * n + [OP_CHECKSIG]), n + 1),
]
for annex in [None, bytes([ANNEX_TAG]) + random_bytes(random.randrange(1000))]:
for hashtype in [SIGHASH_DEFAULT, SIGHASH_ALL]:
for pubkey in [pubs[1], random_bytes(random.choice([x for x in range(2, 81) if x != 32]))]:
for fn_num, fn in enumerate(SIGOPS_RATIO_SCRIPTS):
merkledepth = random.randrange(129)
def predict_sigops_ratio(n, dummy_size):
"""Predict whether spending fn(n, pubkey) with dummy_size will pass the ratio test."""
script, sigops = fn(n, pubkey)
# Predict the size of the witness for a given choice of n
stacklen_size = 1
sig_size = 64 + (hashtype != SIGHASH_DEFAULT)
siglen_size = 1
dummylen_size = 1 + 2 * (dummy_size >= 253)
script_size = len(script)
scriptlen_size = 1 + 2 * (script_size >= 253)
control_size = 33 + 32 * merkledepth
controllen_size = 1 + 2 * (control_size >= 253)
annex_size = 0 if annex is None else len(annex)
annexlen_size = 0 if annex is None else 1 + 2 * (annex_size >= 253)
witsize = stacklen_size + sig_size + siglen_size + dummy_size + dummylen_size + script_size + scriptlen_size + control_size + controllen_size + annex_size + annexlen_size
# sigops ratio test
return witsize + 50 >= 50 * sigops
# Make sure n is high enough that with empty dummy, the script is not valid
n = 0
while predict_sigops_ratio(n, 0):
n += 1
# But allow picking a bit higher still
n += random.randrange(5)
# Now pick dummy size *just* large enough that the overall construction passes
dummylen = 0
while not predict_sigops_ratio(n, dummylen):
dummylen += 1
scripts = [("s", fn(n, pubkey)[0])]
for _ in range(merkledepth):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
standard = annex is None and dummylen <= 80 and len(pubkey) == 32
add_spender(spenders, "tapscript/sigopsratio_%i" % fn_num, tap=tap, leaf="s", annex=annex, hashtype=hashtype, key=secs[1], inputs=[getter("sign"), random_bytes(dummylen)], standard=standard, failure={"inputs": [getter("sign"), random_bytes(dummylen - 1)]}, **ERR_SIGOPS_RATIO)
# Future leaf versions
for leafver in range(0, 0x100, 2):
if leafver == LEAF_VERSION_TAPSCRIPT or leafver == ANNEX_TAG:
# Skip the defined LEAF_VERSION_TAPSCRIPT, and the ANNEX_TAG which is not usable as leaf version
continue
scripts = [
("bare_c0", CScript([OP_NOP])),
("bare_unkver", CScript([OP_NOP]), leafver),
("return_c0", CScript([OP_RETURN])),
("return_unkver", CScript([OP_RETURN]), leafver),
("undecodable_c0", CScript([OP_PUSHDATA1])),
("undecodable_unkver", CScript([OP_PUSHDATA1]), leafver),
("bigpush_c0", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP])),
("bigpush_unkver", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP]), leafver),
("1001push_c0", CScript([OP_0] * 1001)),
("1001push_unkver", CScript([OP_0] * 1001), leafver),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "unkver/bare", standard=False, tap=tap, leaf="bare_unkver", failure={"leaf": "bare_c0"}, **ERR_CLEANSTACK)
add_spender(spenders, "unkver/return", standard=False, tap=tap, leaf="return_unkver", failure={"leaf": "return_c0"}, **ERR_OP_RETURN)
add_spender(spenders, "unkver/undecodable", standard=False, tap=tap, leaf="undecodable_unkver", failure={"leaf": "undecodable_c0"}, **ERR_UNDECODABLE)
add_spender(spenders, "unkver/bigpush", standard=False, tap=tap, leaf="bigpush_unkver", failure={"leaf": "bigpush_c0"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "unkver/1001push", standard=False, tap=tap, leaf="1001push_unkver", failure={"leaf": "1001push_c0"}, **ERR_STACK_SIZE)
add_spender(spenders, "unkver/1001inputs", standard=False, tap=tap, leaf="bare_unkver", inputs=[b'']*1001, failure={"leaf": "bare_c0"}, **ERR_STACK_SIZE)
# OP_SUCCESSx tests.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for opval in range(76, 0x100):
opcode = CScriptOp(opval)
if not is_op_success(opcode):
continue
scripts = [
("bare_success", CScript([opcode])),
("bare_nop", CScript([OP_NOP])),
("unexecif_success", CScript([OP_0, OP_IF, opcode, OP_ENDIF])),
("unexecif_nop", CScript([OP_0, OP_IF, OP_NOP, OP_ENDIF])),
("return_success", CScript([OP_RETURN, opcode])),
("return_nop", CScript([OP_RETURN, OP_NOP])),
("undecodable_success", CScript([opcode, OP_PUSHDATA1])),
("undecodable_nop", CScript([OP_NOP, OP_PUSHDATA1])),
("undecodable_bypassed_success", CScript([OP_PUSHDATA1, OP_2, opcode])),
("bigpush_success", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, opcode])),
("bigpush_nop", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, OP_NOP])),
("1001push_success", CScript([OP_0] * 1001 + [opcode])),
("1001push_nop", CScript([OP_0] * 1001 + [OP_NOP])),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "opsuccess/bare", standard=False, tap=tap, leaf="bare_success", failure={"leaf": "bare_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/unexecif", standard=False, tap=tap, leaf="unexecif_success", failure={"leaf": "unexecif_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/return", standard=False, tap=tap, leaf="return_success", failure={"leaf": "return_nop"}, **ERR_OP_RETURN)
add_spender(spenders, "opsuccess/undecodable", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_nop"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/undecodable_bypass", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_bypassed_success"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/bigpush", standard=False, tap=tap, leaf="bigpush_success", failure={"leaf": "bigpush_nop"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "opsuccess/1001push", standard=False, tap=tap, leaf="1001push_success", failure={"leaf": "1001push_nop"}, **ERR_STACK_SIZE)
add_spender(spenders, "opsuccess/1001inputs", standard=False, tap=tap, leaf="bare_success", inputs=[b'']*1001, failure={"leaf": "bare_nop"}, **ERR_STACK_SIZE)
# Non-OP_SUCCESSx (verify that those aren't accidentally treated as OP_SUCCESSx)
for opval in range(0, 0x100):
opcode = CScriptOp(opval)
if is_op_success(opcode):
continue
scripts = [
("normal", CScript([OP_RETURN, opcode] + [OP_NOP] * 75)),
("op_success", CScript([OP_RETURN, CScriptOp(0x50)]))
]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "alwaysvalid/notsuccessx", tap=tap, leaf="op_success", inputs=[], standard=False, failure={"leaf": "normal"}) # err_msg differs based on opcode
# == Legacy tests ==
# Also add a few legacy spends into the mix, so that transactions which combine taproot and pre-taproot spends get tested too.
for compressed in [False, True]:
eckey1 = ECKey()
eckey1.set(generate_privkey(), compressed)
pubkey1 = eckey1.get_pubkey().get_bytes()
eckey2 = ECKey()
eckey2.set(generate_privkey(), compressed)
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = (hashtype in VALID_SIGHASHES_ECDSA) and (compressed or not witv0)
add_spender(spenders, "legacy/pk-wrongkey", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([pubkey1, OP_CHECKSIG]), **SINGLE_SIG, key=eckey1, failure={"key": eckey2}, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
add_spender(spenders, "legacy/pkh-sighashflip", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, pkh=pubkey1, key=eckey1, **SIGHASH_BITFLIP, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
# Verify that OP_CHECKSIGADD wasn't accidentally added to pre-taproot validation logic.
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0)
add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE)
return spenders
def spenders_taproot_inactive():
"""Spenders for testing that pre-activation Taproot rules don't apply."""
spenders = []
sec = generate_privkey()
pub, _ = compute_xonly_pubkey(sec)
scripts = [
("pk", CScript([pub, OP_CHECKSIG])),
("future_leaf", CScript([pub, OP_CHECKSIG]), 0xc2),
("op_success", CScript([pub, OP_CHECKSIG, OP_0, OP_IF, CScriptOp(0x50), OP_ENDIF])),
]
tap = taproot_construct(pub, scripts)
# Test that keypath spending is valid & non-standard, regardless of validity.
add_spender(spenders, "inactive/keypath_valid", key=sec, tap=tap, standard=False)
add_spender(spenders, "inactive/keypath_invalidsig", key=sec, tap=tap, standard=False, sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/keypath_empty", key=sec, tap=tap, standard=False, witness=[])
# Same for scriptpath spending (and features like annex, leaf versions, or OP_SUCCESS don't change this)
add_spender(spenders, "inactive/scriptpath_valid", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalidsig", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_invalidcb", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], controlblock=bitflipper(default_controlblock))
add_spender(spenders, "inactive/scriptpath_valid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
return spenders
# Consensus validation flags to use in dumps for tests with "legacy/" or "inactive/" prefix.
LEGACY_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY"
# Consensus validation flags to use in dumps for all other tests.
TAPROOT_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY,TAPROOT"
def dump_json_test(tx, input_utxos, idx, success, failure):
spender = input_utxos[idx].spender
# Determine flags to dump
flags = LEGACY_FLAGS if spender.comment.startswith("legacy/") or spender.comment.startswith("inactive/") else TAPROOT_FLAGS
fields = [
("tx", tx.serialize().hex()),
("prevouts", [x.output.serialize().hex() for x in input_utxos]),
("index", idx),
("flags", flags),
("comment", spender.comment)
]
# The "final" field indicates that a spend should be always valid, even with more validation flags enabled
# than the listed ones. Use standardness as a proxy for this (which gives a conservative underestimate).
if spender.is_standard:
fields.append(("final", True))
def dump_witness(wit):
return OrderedDict([("scriptSig", wit[0].hex()), ("witness", [x.hex() for x in wit[1]])])
if success is not None:
fields.append(("success", dump_witness(success)))
if failure is not None:
fields.append(("failure", dump_witness(failure)))
# Write the dump to $TEST_DUMP_DIR/x/xyz... where x,y,z,... are the SHA1 sum of the dump (which makes the
# file naming scheme compatible with fuzzing infrastructure).
dump = json.dumps(OrderedDict(fields)) + ",\n"
sha1 = hashlib.sha1(dump.encode("utf-8")).hexdigest()
dirname = os.environ.get("TEST_DUMP_DIR", ".") + ("/%s" % sha1[0])
os.makedirs(dirname, exist_ok=True)
with open(dirname + ("/%s" % sha1), 'w', encoding="utf8") as f:
f.write(dump)
# Data type to keep track of UTXOs, where they were created, and how to spend them.
UTXOData = namedtuple('UTXOData', 'outpoint,output,spender')
class TaprootTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_argument("--dumptests", dest="dump_tests", default=False, action="store_true",
help="Dump generated test cases to directory set by TEST_DUMP_DIR environment variable")
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
# Node 0 has Taproot inactive, Node 1 active.
# ELEMENTS: to preserve tests which depend on the exact number of outputs,
# we turn one of the original outputs into a fee output, which routinely
# results in us burning massive amounts of coin. Hence -maxtxfee.
self.extra_args = [["-par=1", "-vbparams=taproot:1:1"], ["-par=1", "-maxtxfee=100.0"]]
def block_submit(self, node, txs, msg, err_msg, cb_pubkey=None, fees=0, sigops_weight=0, witness=False, accept=False):
# Deplete block of any non-tapscript sigops using a single additional 0-value coinbase output.
# It is not impossible to fit enough tapscript sigops to hit the old 80k limit without
# busting txin-level limits. We simply have to account for the p2pk outputs in all
# transactions.
extra_output_script = CScript([OP_CHECKSIG]*((MAX_BLOCK_SIGOPS_WEIGHT - sigops_weight) // WITNESS_SCALE_FACTOR))
if extra_output_script == CScript():
extra_output_script = None ## ELEMENTS: an explicitly empty coinbase scriptpubkey would be rejected with bad-cb-fee
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1, pubkey=cb_pubkey, extra_output_script=extra_output_script, fees=fees), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
block_response = node.submitblock(block.serialize().hex())
if err_msg is not None:
assert block_response is not None
assert block_response is not None and err_msg in block_response, "Missing error message '%s' from block response '%s': %s" % (err_msg, "(None)" if block_response is None else block_response, msg)
if (accept):
assert node.getbestblockhash() == block.hash, "Failed to accept: %s (response: %s)" % (msg, block_response)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert node.getbestblockhash() == self.lastblockhash, "Failed to reject: " + msg
def test_spenders(self, node, spenders, input_counts):
"""Run randomized tests with a number of "spenders".
Steps:
1) Generate an appropriate UTXO for each spender to test spend conditions
2) Generate 100 random addresses of all wallet types: pkh/sh_wpkh/wpkh
3) Select random number of inputs from (1)
4) Select random number of addresses from (2) as outputs
Each spender embodies a test; in a large randomized test, it is verified
that toggling the valid argument to each lambda toggles the validity of
the transaction. This is accomplished by constructing transactions consisting
of all valid inputs, except one invalid one.
"""
# Construct a bunch of sPKs that send coins back to the host wallet
self.log.info("- Constructing addresses for returning coins")
host_spks = []
host_pubkeys = []
for i in range(16):
addr = node.getnewaddress(address_type=random.choice(["legacy", "p2sh-segwit", "bech32"]))
info = node.getaddressinfo(addr)
spk = bytes.fromhex(info['scriptPubKey'])
host_spks.append(spk)
host_pubkeys.append(bytes.fromhex(info['pubkey']))
# Initialize variables used by block_submit().
self.lastblockhash = node.getbestblockhash()
self.tip = int(self.lastblockhash, 16)
block = node.getblock(self.lastblockhash)
self.lastblockheight = block['height']
self.lastblocktime = block['time']
# Create transactions spending up to 50 of the wallet's inputs, with one output for each spender, and
# one change output at the end. The transaction is constructed on the Python side to enable
# having multiple outputs to the same address and outputs with no assigned address. The wallet
# is then asked to sign it through signrawtransactionwithwallet, and then added to a block on the
# Python side (to bypass standardness rules).
self.log.info("- Creating test UTXOs...")
random.shuffle(spenders)
normal_utxos = []
mismatching_utxos = [] # UTXOs with input that requires mismatching output position
done = 0
while done < len(spenders):
# Compute how many UTXOs to create with this transaction
count_this_tx = min(len(spenders) - done, (len(spenders) + 4) // 5, 10000)
fund_tx = CTransaction()
# Add the 50 highest-value inputs
unspents = node.listunspent()
random.shuffle(unspents)
unspents.sort(key=lambda x: int(x["amount"] * 100000000), reverse=True)
if len(unspents) > 50:
unspents = unspents[:50]
random.shuffle(unspents)
balance = 0
for unspent in unspents:
balance += int(unspent["amount"] * 100000000)
txid = int(unspent["txid"], 16)
fund_tx.vin.append(CTxIn(COutPoint(txid, int(unspent["vout"])), CScript()))
# Add outputs
cur_progress = done / len(spenders)
next_progress = (done + count_this_tx) / len(spenders)
change_goal = (1.0 - 0.6 * next_progress) / (1.0 - 0.6 * cur_progress) * balance
self.log.debug("Create %i UTXOs in a transaction spending %i inputs worth %.8f (sending ~%.8f to change)" % (count_this_tx, len(unspents), balance * 0.00000001, change_goal * 0.00000001))
for i in range(count_this_tx):
avg = (balance - change_goal) / (count_this_tx - i)
amount = int(random.randrange(int(avg*0.85 + 0.5), int(avg*1.15 + 0.5)) + 0.5)
balance -= amount
fund_tx.vout.append(CTxOut(amount, spenders[done + i].script))
fund_tx.wit.vtxoutwit.append(CTxOutWitness())
# Add change
fund_tx.vout.append(CTxOut(balance - 10000, random.choice(host_spks)))
fund_tx.wit.vtxoutwit.append(CTxOutWitness())
fund_tx.vout.append(CTxOut(10000)) # ELEMENTS: and fee
fund_tx.wit.vtxoutwit.append(CTxOutWitness())
# Ask the wallet to sign
ss = BytesIO(bytes.fromhex(node.signrawtransactionwithwallet(ToHex(fund_tx))["hex"]))
fund_tx.deserialize(ss)
# Construct UTXOData entries
fund_tx.rehash()
for i in range(count_this_tx):
utxodata = UTXOData(outpoint=COutPoint(fund_tx.sha256, i), output=fund_tx.vout[i], spender=spenders[done])
if utxodata.spender.need_vin_vout_mismatch:
mismatching_utxos.append(utxodata)
else:
normal_utxos.append(utxodata)
done += 1
# Mine into a block
self.block_submit(node, [fund_tx], "Funding tx", None, random.choice(host_pubkeys), 10000, MAX_BLOCK_SIGOPS_WEIGHT, True, True)
# Consume groups of choice(input_coins) from utxos in a tx, testing the spenders.
self.log.info("- Running %i spending tests" % done)
random.shuffle(normal_utxos)
random.shuffle(mismatching_utxos)
assert done == len(normal_utxos) + len(mismatching_utxos)
left = done
while left:
# Construct CTransaction with random nVersion, nLocktime
tx = CTransaction()
tx.nVersion = random.choice([1, 2, random.randint(-0x80000000, 0x7fffffff)])
min_sequence = (tx.nVersion != 1 and tx.nVersion != 0) * 0x80000000 # The minimum sequence number to disable relative locktime
if random.choice([True, False]):
tx.nLockTime = random.randrange(LOCKTIME_THRESHOLD, self.lastblocktime - 7200) # all absolute locktimes in the past
else:
tx.nLockTime = random.randrange(self.lastblockheight + 1) # all block heights in the past
# Decide how many UTXOs to test with.
acceptable = [n for n in input_counts if n <= left and (left - n > max(input_counts) or (left - n) in [0] + input_counts)]
num_inputs = random.choice(acceptable)
# If we have UTXOs that require mismatching inputs/outputs left, include exactly one of those
# unless there is only one normal UTXO left (as tests with mismatching UTXOs require at least one
# normal UTXO to go in the first position), and we don't want to run out of normal UTXOs.
input_utxos = []
while len(mismatching_utxos) and (len(input_utxos) == 0 or len(normal_utxos) == 1):
input_utxos.append(mismatching_utxos.pop())
left -= 1
# Top up until we hit num_inputs (but include at least one normal UTXO always).
for _ in range(max(1, num_inputs - len(input_utxos))):
input_utxos.append(normal_utxos.pop())
left -= 1
# The first input cannot require a mismatching output (as there is at least one output).
while True:
random.shuffle(input_utxos)
if not input_utxos[0].spender.need_vin_vout_mismatch:
break
first_mismatch_input = None
for i in range(len(input_utxos)):
if input_utxos[i].spender.need_vin_vout_mismatch:
first_mismatch_input = i
assert first_mismatch_input is None or first_mismatch_input > 0
# Decide fee, and add CTxIns to tx.
amount = sum(utxo.output.nValue.getAmount() for utxo in input_utxos)
fee = min(random.randrange(MIN_FEE * 2, MIN_FEE * 4), amount - DUST_LIMIT) # 10000-20000 sat fee
in_value = amount - fee
tx.vin = [CTxIn(outpoint=utxo.outpoint, nSequence=random.randint(min_sequence, 0xffffffff)) for utxo in input_utxos]
tx.wit.vtxinwit = [CTxInWitness() for _ in range(len(input_utxos))]
sigops_weight = sum(utxo.spender.sigops_weight for utxo in input_utxos)
self.log.debug("Test: %s" % (", ".join(utxo.spender.comment for utxo in input_utxos)))
# Add 1 to 4 random outputs (but constrained by inputs that require mismatching outputs)
# ELEMENTS: actually make it 0 to 3, plus a fee output which burns whatever the last output would've had
num_outputs = random.choice(range(0, min(4, 4 if first_mismatch_input is None else first_mismatch_input)))
assert in_value >= 0 and fee - num_outputs * DUST_LIMIT >= MIN_FEE
for i in range(num_outputs):
tx.vout.append(CTxOut())
tx.wit.vtxoutwit.append(CTxOutWitness())
if in_value <= DUST_LIMIT:
tx.vout[-1].nValue = CTxOutValue(DUST_LIMIT)
elif i < num_outputs - 1:
tx.vout[-1].nValue = CTxOutValue(in_value)
else:
tx.vout[-1].nValue = CTxOutValue(random.randint(DUST_LIMIT, in_value))
in_value -= tx.vout[-1].nValue.getAmount()
tx.vout[-1].scriptPubKey = random.choice(host_spks)
sigops_weight += CScript(tx.vout[-1].scriptPubKey).GetSigOpCount(False) * WITNESS_SCALE_FACTOR
fee += in_value
assert fee >= 0
tx.vout.append(CTxOut(fee))
tx.wit.vtxoutwit.append(CTxOutWitness())
# Select coinbase pubkey
cb_pubkey = random.choice(host_pubkeys)
sigops_weight += 1 * WITNESS_SCALE_FACTOR
# Precompute one satisfying and one failing scriptSig/witness for each input.
input_data = []
for i in range(len(input_utxos)):
fn = input_utxos[i].spender.sat_function
fail = None
success = fn(tx, i, [utxo.output for utxo in input_utxos], True)
if not input_utxos[i].spender.no_fail:
fail = fn(tx, i, [utxo.output for utxo in input_utxos], False)
input_data.append((fail, success))
if self.options.dump_tests:
dump_json_test(tx, input_utxos, i, success, fail)
# Sign each input incorrectly once on each complete signing pass, except the very last.
for fail_input in list(range(len(input_utxos))) + [None]:
# Skip trying to fail at spending something that can't be made to fail.
if fail_input is not None and input_utxos[fail_input].spender.no_fail:
continue
# Expected message with each input failure, may be None(which is ignored)
expected_fail_msg = None if fail_input is None else input_utxos[fail_input].spender.err_msg
# Fill inputs/witnesses
for i in range(len(input_utxos)):
tx.vin[i].scriptSig = input_data[i][i != fail_input][0]
tx.wit.vtxinwit[i].scriptWitness.stack = input_data[i][i != fail_input][1]
# Submit to mempool to check standardness
is_standard_tx = fail_input is None and all(utxo.spender.is_standard for utxo in input_utxos) and tx.nVersion >= 1 and tx.nVersion <= 2
tx.rehash()
msg = ','.join(utxo.spender.comment + ("*" if n == fail_input else "") for n, utxo in enumerate(input_utxos))
if is_standard_tx:
node.sendrawtransaction(tx.serialize().hex(), 0)
assert node.getmempoolentry(tx.hash) is not None, "Failed to accept into mempool: " + msg
else:
assert_raises_rpc_error(-26, None, node.sendrawtransaction, tx.serialize().hex(), 0)
# Submit in a block
self.block_submit(node, [tx], msg, witness=True, accept=fail_input is None, cb_pubkey=cb_pubkey, fees=fee, sigops_weight=sigops_weight, err_msg=expected_fail_msg)
if (len(spenders) - left) // 200 > (len(spenders) - left - len(input_utxos)) // 200:
self.log.info(" - %i tests done" % (len(spenders) - left))
assert left == 0
assert len(normal_utxos) == 0
assert len(mismatching_utxos) == 0
self.log.info(" - Done")
def run_test(self):
util.node_fastmerkle = self.nodes[0]
# Post-taproot activation tests go first (pre-taproot tests' blocks are invalid post-taproot).
self.log.info("Post-activation tests...")
self.nodes[1].generate(101)
global g_genesis_hash
g_genesis_hash = uint256_from_str(bytes.fromhex(self.nodes[1].getblockhash(0))[::-1])
self.test_spenders(self.nodes[1], spenders_taproot_active(), input_counts=[1, 2, 2, 2, 2, 3])
# Transfer funds to pre-taproot node.
addr = self.nodes[0].getnewaddress()
rawtx = self.nodes[1].createrawtransaction(
inputs=[{
'txid': i['txid'],
'vout': i['vout']
} for i in self.nodes[1].listunspent()],
outputs=[{addr: self.nodes[1].getbalance()['bitcoin']}],
)
rawtx = self.nodes[1].signrawtransactionwithwallet(rawtx)['hex']
# Transaction is too large to fit into the mempool, so put it into a block
block = create_block(tmpl=self.nodes[1].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS), txlist=[rawtx])
add_witness_commitment(block)
block.rehash()
block.solve()
assert_equal(None, self.nodes[1].submitblock(block.serialize().hex()))
self.sync_blocks()
# Pre-taproot activation tests.
self.log.info("Pre-activation tests...")
# Run each test twice; once in isolation, and once combined with others. Testing in isolation
# means that the standardness is verified in every test (as combined transactions are only standard
# when all their inputs are standard).
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[1])
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[2, 3])
if __name__ == '__main__':
TaprootTest().main()
| []
| []
| [
"TEST_DUMP_DIR"
]
| [] | ["TEST_DUMP_DIR"] | python | 1 | 0 | |
modin/conftest.py | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import os
import sys
import pytest
import pandas
from pandas.util._decorators import doc
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import shutil
assert (
"modin.utils" not in sys.modules
), "Do not import modin.utils before patching, or tests could fail"
# every import under this assert has to be postfixed with 'noqa: E402'
# as flake8 complains about that... but we _have_ to make sure we
# monkey-patch at the right spot, otherwise testing doc URLs might
# not catch all of them
import modin.utils # noqa: E402
_generated_doc_urls = set()
def _saving_make_api_url(token, _make_api_url=modin.utils._make_api_url):
url = _make_api_url(token)
_generated_doc_urls.add(url)
return url
modin.utils._make_api_url = _saving_make_api_url
import modin # noqa: E402
import modin.config # noqa: E402
from modin.config import IsExperimental, TestRayClient # noqa: E402
from modin.core.storage_formats import ( # noqa: E402
PandasQueryCompiler,
BaseQueryCompiler,
)
from modin.core.execution.python.implementations.pandas_on_python.io import ( # noqa: E402
PandasOnPythonIO,
)
from modin.core.execution.dispatching.factories import factories # noqa: E402
from modin.utils import get_current_backend # noqa: E402
from modin.pandas.test.utils import ( # noqa: E402
_make_csv_file,
get_unique_filename,
make_default_file,
teardown_test_files,
NROWS,
IO_OPS_DATA_DIR,
)
# create test data dir if it is not exists yet
if not os.path.exists(IO_OPS_DATA_DIR):
os.mkdir(IO_OPS_DATA_DIR)
def pytest_addoption(parser):
parser.addoption(
"--simulate-cloud",
action="store",
default="off",
help="simulate cloud for testing: off|normal|experimental",
)
parser.addoption(
"--backend",
action="store",
default=None,
help="specifies backend to run tests on",
)
parser.addoption(
"--extra-test-parameters",
action="store_true",
help="activate extra test parameter combinations",
)
class Patcher:
def __init__(self, conn, *pairs):
self.pairs = pairs
self.originals = None
self.conn = conn
def __wrap(self, func):
def wrapper(*a, **kw):
return func(
*(tuple(self.conn.obtain(x) for x in a)),
**({k: self.conn.obtain(v) for k, v in kw.items()}),
)
return func, wrapper
def __enter__(self):
self.originals = []
for module, attrname in self.pairs:
orig, wrapped = self.__wrap(getattr(module, attrname))
self.originals.append((module, attrname, orig))
setattr(module, attrname, wrapped)
return self
def __exit__(self, *a, **kw):
for module, attrname, orig in self.originals:
setattr(module, attrname, orig)
def set_experimental_env(mode):
from modin.config import IsExperimental
IsExperimental.put(mode == "experimental")
@pytest.fixture(scope="session", autouse=True)
def simulate_cloud(request):
mode = request.config.getoption("--simulate-cloud").lower()
if mode == "off":
yield
return
if mode not in ("normal", "experimental"):
raise ValueError(f"Unsupported --simulate-cloud mode: {mode}")
assert IsExperimental.get(), "Simulated cloud must be started in experimental mode"
from modin.experimental.cloud import create_cluster, get_connection
import modin.pandas.test.utils
with create_cluster("local", cluster_type="local"):
get_connection().teleport(set_experimental_env)(mode)
with Patcher(
get_connection(),
(modin.pandas.test.utils, "assert_index_equal"),
(modin.pandas.test.utils, "assert_series_equal"),
(modin.pandas.test.utils, "assert_frame_equal"),
(modin.pandas.test.utils, "assert_extension_array_equal"),
(modin.pandas.test.utils, "assert_empty_frame_equal"),
):
yield
@pytest.fixture(scope="session", autouse=True)
def enforce_config():
"""
A fixture that ensures that all checks for MODIN_* variables
are done using modin.config to prevent leakage
"""
orig_env = os.environ
modin_start = os.path.dirname(modin.__file__)
modin_exclude = [os.path.dirname(modin.config.__file__)]
class PatchedEnv:
@staticmethod
def __check_var(name):
if name.upper().startswith("MODIN_"):
frame = sys._getframe()
try:
# get the path to module where caller of caller is defined;
# caller of this function is inside PatchedEnv, and we're
# interested in whomever called a method on PatchedEnv
caller_file = frame.f_back.f_back.f_code.co_filename
finally:
del frame
pkg_name = os.path.dirname(caller_file)
if pkg_name.startswith(modin_start):
assert any(
pkg_name.startswith(excl) for excl in modin_exclude
), "Do not access MODIN_ environment variable bypassing modin.config"
def __getitem__(self, name):
self.__check_var(name)
return orig_env[name]
def __setitem__(self, name, value):
self.__check_var(name)
orig_env[name] = value
def __delitem__(self, name):
self.__check_var(name)
del orig_env[name]
def pop(self, name, default=object()):
self.__check_var(name)
return orig_env.pop(name, default)
def get(self, name, default=None):
self.__check_var(name)
return orig_env.get(name, default)
def __contains__(self, name):
self.__check_var(name)
return name in orig_env
def __getattr__(self, name):
return getattr(orig_env, name)
def __iter__(self):
return iter(orig_env)
os.environ = PatchedEnv()
yield
os.environ = orig_env
BASE_BACKEND_NAME = "BaseOnPython"
class TestQC(BaseQueryCompiler):
def __init__(self, modin_frame):
self._modin_frame = modin_frame
def finalize(self):
self._modin_frame.finalize()
@classmethod
def from_pandas(cls, df, data_cls):
return cls(data_cls.from_pandas(df))
@classmethod
def from_arrow(cls, at, data_cls):
return cls(data_cls.from_arrow(at))
def free(self):
pass
to_pandas = PandasQueryCompiler.to_pandas
default_to_pandas = PandasQueryCompiler.default_to_pandas
class BaseOnPythonIO(PandasOnPythonIO):
query_compiler_cls = TestQC
class BaseOnPythonFactory(factories.BaseFactory):
@classmethod
def prepare(cls):
cls.io_cls = BaseOnPythonIO
def set_base_backend(name=BASE_BACKEND_NAME):
setattr(factories, f"{name}Factory", BaseOnPythonFactory)
modin.set_backends(engine="python", partition=name.split("On")[0])
def pytest_configure(config):
if config.option.extra_test_parameters is not None:
import modin.pandas.test.utils as utils
utils.extra_test_parameters = config.option.extra_test_parameters
backend = config.option.backend
if backend is None:
return
if backend == BASE_BACKEND_NAME:
set_base_backend(BASE_BACKEND_NAME)
else:
partition, engine = backend.split("On")
modin.set_backends(engine=engine, partition=partition)
def pytest_runtest_call(item):
custom_markers = ["xfail", "skip"]
# dynamicly adding custom markers to tests
for custom_marker in custom_markers:
for marker in item.iter_markers(name=f"{custom_marker}_backends"):
backends = marker.args[0]
if not isinstance(backends, list):
backends = [backends]
current_backend = get_current_backend()
reason = marker.kwargs.pop("reason", "")
item.add_marker(
getattr(pytest.mark, custom_marker)(
condition=current_backend in backends,
reason=f"Backend {current_backend} does not pass this test. {reason}",
**marker.kwargs,
)
)
_doc_pytest_fixture = """
Pytest fixture factory that makes temp {file_type} files for testing.
Yields:
Function that generates {file_type} files
"""
@pytest.fixture(scope="class")
def TestReadCSVFixture():
filenames = []
files_ids = [
"test_read_csv_regular",
"test_read_csv_blank_lines",
"test_read_csv_yes_no",
"test_read_csv_nans",
"test_read_csv_bad_lines",
]
# each xdist worker spawned in separate process with separate namespace and dataset
pytest.csvs_names = {file_id: get_unique_filename() for file_id in files_ids}
# test_read_csv_col_handling, test_read_csv_parsing
_make_csv_file(filenames)(
filename=pytest.csvs_names["test_read_csv_regular"],
)
# test_read_csv_parsing
_make_csv_file(filenames)(
filename=pytest.csvs_names["test_read_csv_yes_no"],
additional_col_values=["Yes", "true", "No", "false"],
)
# test_read_csv_col_handling
_make_csv_file(filenames)(
filename=pytest.csvs_names["test_read_csv_blank_lines"],
add_blank_lines=True,
)
# test_read_csv_nans_handling
_make_csv_file(filenames)(
filename=pytest.csvs_names["test_read_csv_nans"],
add_blank_lines=True,
additional_col_values=["<NA>", "N/A", "NA", "NULL", "custom_nan", "73"],
)
# test_read_csv_error_handling
_make_csv_file(filenames)(
filename=pytest.csvs_names["test_read_csv_bad_lines"],
add_bad_lines=True,
)
yield
# Delete csv files that were created
teardown_test_files(filenames)
@pytest.fixture
@doc(_doc_pytest_fixture, file_type="csv")
def make_csv_file():
filenames = []
yield _make_csv_file(filenames)
# Delete csv files that were created
teardown_test_files(filenames)
def create_fixture(file_type):
@doc(_doc_pytest_fixture, file_type=file_type)
def fixture():
func, filenames = make_default_file(file_type=file_type)
yield func
teardown_test_files(filenames)
return fixture
for file_type in ("json", "html", "excel", "feather", "stata", "hdf", "pickle", "fwf"):
fixture = create_fixture(file_type)
fixture.__name__ = f"make_{file_type}_file"
globals()[fixture.__name__] = pytest.fixture(fixture)
@pytest.fixture
def make_parquet_file():
"""Pytest fixture factory that makes a parquet file/dir for testing.
Yields:
Function that generates a parquet file/dir
"""
filenames = []
def _make_parquet_file(
filename,
nrows=NROWS,
ncols=2,
force=True,
directory=False,
partitioned_columns=[],
):
"""Helper function to generate parquet files/directories.
Args:
filename: The name of test file, that should be created.
nrows: Number of rows for the dataframe.
ncols: Number of cols for the dataframe.
force: Create a new file/directory even if one already exists.
directory: Create a partitioned directory using pyarrow.
partitioned_columns: Create a partitioned directory using pandas.
Will be ignored if directory=True.
"""
if force or not os.path.exists(filename):
df = pandas.DataFrame(
{f"col{x + 1}": np.arange(nrows) for x in range(ncols)}
)
if directory:
if os.path.exists(filename):
shutil.rmtree(filename)
else:
os.makedirs(filename)
table = pa.Table.from_pandas(df)
pq.write_to_dataset(table, root_path=filename)
elif len(partitioned_columns) > 0:
df.to_parquet(filename, partition_cols=partitioned_columns)
else:
df.to_parquet(filename)
filenames.append(filename)
# Return function that generates parquet files
yield _make_parquet_file
# Delete parquet file that was created
for path in filenames:
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
@pytest.fixture
def make_sql_connection():
"""Sets up sql connections and takes them down after the caller is done.
Yields:
Factory that generates sql connection objects
"""
filenames = []
def _sql_connection(filename, table=""):
# Remove file if exists
if os.path.exists(filename):
os.remove(filename)
filenames.append(filename)
# Create connection and, if needed, table
conn = "sqlite:///{}".format(filename)
if table:
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3, 4, 5, 6],
"col2": [7, 8, 9, 10, 11, 12, 13],
"col3": [14, 15, 16, 17, 18, 19, 20],
"col4": [21, 22, 23, 24, 25, 26, 27],
"col5": [0, 0, 0, 0, 0, 0, 0],
}
)
df.to_sql(table, conn)
return conn
yield _sql_connection
# Teardown the fixture
teardown_test_files(filenames)
@pytest.fixture(scope="class")
def TestReadGlobCSVFixture():
filenames = []
base_name = get_unique_filename(extension="")
pytest.glob_path = "{}_*.csv".format(base_name)
pytest.files = ["{}_{}.csv".format(base_name, i) for i in range(11)]
for fname in pytest.files:
# Glob does not guarantee ordering so we have to remove the randomness in the generated csvs.
_make_csv_file(filenames)(fname, row_size=11, remove_randomness=True)
yield
teardown_test_files(filenames)
@pytest.fixture
def get_generated_doc_urls():
return lambda: _generated_doc_urls
ray_client_server = None
def pytest_sessionstart(session):
if TestRayClient.get():
import ray
import ray.util.client.server.server as ray_server
addr = "localhost:50051"
global ray_client_server
ray_client_server = ray_server.serve(addr)
ray.util.connect(addr)
def pytest_sessionfinish(session, exitstatus):
if TestRayClient.get():
import ray
ray.util.disconnect()
if ray_client_server:
ray_client_server.stop(0)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
client/v3/client.go | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"errors"
"fmt"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3/balancer"
"go.etcd.io/etcd/client/v3/balancer/picker"
"go.etcd.io/etcd/client/v3/balancer/resolver/endpoint"
"go.etcd.io/etcd/client/v3/credentials"
"go.etcd.io/etcd/pkg/v3/logutil"
"go.uber.org/zap"
"github.com/mchirico/grpc"
"github.com/mchirico/grpc/codes"
grpccredentials "github.com/mchirico/grpc/credentials"
"github.com/mchirico/grpc/keepalive"
"github.com/mchirico/grpc/status"
)
var (
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
ErrOldCluster = errors.New("etcdclient: old cluster version")
roundRobinBalancerName = fmt.Sprintf("etcd-%s", picker.RoundrobinBalanced.String())
)
func init() {
lg := zap.NewNop()
if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
lcfg := logutil.DefaultZapLoggerConfig
lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
var err error
lg, err = lcfg.Build() // info level logging
if err != nil {
panic(err)
}
}
// TODO: support custom balancer
balancer.RegisterBuilder(balancer.Config{
Policy: picker.RoundrobinBalanced,
Name: roundRobinBalancerName,
Logger: lg,
})
}
// Client provides and manages an etcd v3 client session.
type Client struct {
Cluster
KV
Lease
Watcher
Auth
Maintenance
conn *grpc.ClientConn
cfg Config
creds grpccredentials.TransportCredentials
resolverGroup *endpoint.ResolverGroup
mu *sync.RWMutex
ctx context.Context
cancel context.CancelFunc
// Username is a user name for authentication.
Username string
// Password is a password for authentication.
Password string
authTokenBundle credentials.Bundle
callOpts []grpc.CallOption
lg *zap.Logger
}
// New creates a new etcdv3 client from a given configuration.
func New(cfg Config) (*Client, error) {
if len(cfg.Endpoints) == 0 {
return nil, ErrNoAvailableEndpoints
}
return newClient(&cfg)
}
// NewCtxClient creates a client with a context but no underlying grpc
// connection. This is useful for embedded cases that override the
// service interface implementations and do not need connection management.
func NewCtxClient(ctx context.Context) *Client {
cctx, cancel := context.WithCancel(ctx)
return &Client{ctx: cctx, cancel: cancel, lg: zap.NewNop()}
}
// NewFromURL creates a new etcdv3 client from a URL.
func NewFromURL(url string) (*Client, error) {
return New(Config{Endpoints: []string{url}})
}
// NewFromURLs creates a new etcdv3 client from URLs.
func NewFromURLs(urls []string) (*Client, error) {
return New(Config{Endpoints: urls})
}
// WithLogger sets a logger
func (c *Client) WithLogger(lg *zap.Logger) *Client {
c.lg = lg
return c
}
// Close shuts down the client's etcd connections.
func (c *Client) Close() error {
c.cancel()
if c.Watcher != nil {
c.Watcher.Close()
}
if c.Lease != nil {
c.Lease.Close()
}
if c.resolverGroup != nil {
c.resolverGroup.Close()
}
if c.conn != nil {
return toErr(c.ctx, c.conn.Close())
}
return c.ctx.Err()
}
// Ctx is a context for "out of band" messages (e.g., for sending
// "clean up" message when another context is canceled). It is
// canceled on client Close().
func (c *Client) Ctx() context.Context { return c.ctx }
// Endpoints lists the registered endpoints for the client.
func (c *Client) Endpoints() []string {
// copy the slice; protect original endpoints from being changed
c.mu.RLock()
defer c.mu.RUnlock()
eps := make([]string, len(c.cfg.Endpoints))
copy(eps, c.cfg.Endpoints)
return eps
}
// SetEndpoints updates client's endpoints.
func (c *Client) SetEndpoints(eps ...string) {
c.mu.Lock()
defer c.mu.Unlock()
c.cfg.Endpoints = eps
c.resolverGroup.SetEndpoints(eps)
}
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
func (c *Client) Sync(ctx context.Context) error {
mresp, err := c.MemberList(ctx)
if err != nil {
return err
}
var eps []string
for _, m := range mresp.Members {
eps = append(eps, m.ClientURLs...)
}
c.SetEndpoints(eps...)
return nil
}
func (c *Client) autoSync() {
if c.cfg.AutoSyncInterval == time.Duration(0) {
return
}
for {
select {
case <-c.ctx.Done():
return
case <-time.After(c.cfg.AutoSyncInterval):
ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
err := c.Sync(ctx)
cancel()
if err != nil && err != c.ctx.Err() {
lg.Lvl(4).Infof("Auto sync endpoints failed: %v", err)
}
}
}
}
func (c *Client) processCreds(scheme string) (creds grpccredentials.TransportCredentials) {
creds = c.creds
switch scheme {
case "unix":
case "http":
creds = nil
case "https", "unixs":
if creds != nil {
break
}
creds = credentials.NewBundle(credentials.Config{}).TransportCredentials()
default:
creds = nil
}
return creds
}
// dialSetupOpts gives the dial opts prior to any authentication.
func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
if c.cfg.DialKeepAliveTime > 0 {
params := keepalive.ClientParameters{
Time: c.cfg.DialKeepAliveTime,
Timeout: c.cfg.DialKeepAliveTimeout,
PermitWithoutStream: c.cfg.PermitWithoutStream,
}
opts = append(opts, grpc.WithKeepaliveParams(params))
}
opts = append(opts, dopts...)
dialer := endpoint.Dialer
if creds != nil {
opts = append(opts, grpc.WithTransportCredentials(creds))
} else {
opts = append(opts, grpc.WithInsecure())
}
opts = append(opts, grpc.WithContextDialer(dialer))
// Interceptor retry and backoff.
// TODO: Replace all of clientv3/retry.go with interceptor based retry, or with
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy
// once it is available.
rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction))
opts = append(opts,
// Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
// Streams that are safe to retry are enabled individually.
grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)),
grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)),
)
return opts, nil
}
// Dial connects to a single endpoint using the client's config.
func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
creds, err := c.directDialCreds(ep)
if err != nil {
return nil, err
}
// Use the grpc passthrough resolver to directly dial a single endpoint.
// This resolver passes through the 'unix' and 'unixs' endpoints schemes used
// by etcd without modification, allowing us to directly dial endpoints and
// using the same dial functions that we use for load balancer dialing.
return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds)
}
func (c *Client) getToken(ctx context.Context) error {
var err error // return last error in a case of fail
if c.Username == "" || c.Password == "" {
return nil
}
resp, err := c.Auth.Authenticate(ctx, c.Username, c.Password)
if err != nil {
if err == rpctypes.ErrAuthNotEnabled {
return nil
}
return err
}
c.authTokenBundle.UpdateAuthToken(resp.Token)
return nil
}
// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host
// of the provided endpoint determines the scheme used for all endpoints of the client connection.
func (c *Client) dialWithBalancer(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
_, host, _ := endpoint.ParseEndpoint(ep)
target := c.resolverGroup.Target(host)
creds := c.dialWithBalancerCreds(ep)
return c.dial(target, creds, dopts...)
}
// dial configures and dials any grpc balancer target.
func (c *Client) dial(target string, creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
opts, err := c.dialSetupOpts(creds, dopts...)
if err != nil {
return nil, fmt.Errorf("failed to configure dialer: %v", err)
}
if c.Username != "" && c.Password != "" {
c.authTokenBundle = credentials.NewBundle(credentials.Config{})
opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
}
opts = append(opts, c.cfg.DialOptions...)
dctx := c.ctx
if c.cfg.DialTimeout > 0 {
var cancel context.CancelFunc
dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options?
}
conn, err := grpc.DialContext(dctx, target, opts...)
if err != nil {
return nil, err
}
return conn, nil
}
func (c *Client) directDialCreds(ep string) (grpccredentials.TransportCredentials, error) {
_, host, scheme := endpoint.ParseEndpoint(ep)
creds := c.creds
if len(scheme) != 0 {
creds = c.processCreds(scheme)
if creds != nil {
clone := creds.Clone()
// Set the server name must to the endpoint hostname without port since grpc
// otherwise attempts to check if x509 cert is valid for the full endpoint
// including the scheme and port, which fails.
overrideServerName, _, err := net.SplitHostPort(host)
if err != nil {
// Either the host didn't have a port or the host could not be parsed. Either way, continue with the
// original host string.
overrideServerName = host
}
clone.OverrideServerName(overrideServerName)
creds = clone
}
}
return creds, nil
}
func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCredentials {
_, _, scheme := endpoint.ParseEndpoint(ep)
creds := c.creds
if len(scheme) != 0 {
creds = c.processCreds(scheme)
}
return creds
}
func newClient(cfg *Config) (*Client, error) {
if cfg == nil {
cfg = &Config{}
}
var creds grpccredentials.TransportCredentials
if cfg.TLS != nil {
creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials()
}
// use a temporary skeleton client to bootstrap first connection
baseCtx := context.TODO()
if cfg.Context != nil {
baseCtx = cfg.Context
}
ctx, cancel := context.WithCancel(baseCtx)
client := &Client{
conn: nil,
cfg: *cfg,
creds: creds,
ctx: ctx,
cancel: cancel,
mu: new(sync.RWMutex),
callOpts: defaultCallOpts,
}
lcfg := logutil.DefaultZapLoggerConfig
if cfg.LogConfig != nil {
lcfg = *cfg.LogConfig
}
var err error
client.lg, err = lcfg.Build()
if err != nil {
return nil, err
}
if cfg.Username != "" && cfg.Password != "" {
client.Username = cfg.Username
client.Password = cfg.Password
}
if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
}
callOpts := []grpc.CallOption{
defaultFailFast,
defaultMaxCallSendMsgSize,
defaultMaxCallRecvMsgSize,
}
if cfg.MaxCallSendMsgSize > 0 {
callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
}
if cfg.MaxCallRecvMsgSize > 0 {
callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
}
client.callOpts = callOpts
}
// Prepare a 'endpoint://<unique-client-id>/' resolver for the client and create a endpoint target to pass
// to dial so the client knows to use this resolver.
client.resolverGroup, err = endpoint.NewResolverGroup(fmt.Sprintf("client-%s", uuid.New().String()))
if err != nil {
client.cancel()
return nil, err
}
client.resolverGroup.SetEndpoints(cfg.Endpoints)
if len(cfg.Endpoints) < 1 {
client.cancel()
return nil, fmt.Errorf("at least one Endpoint is required in client config")
}
dialEndpoint := cfg.Endpoints[0]
// Use a provided endpoint target so that for https:// without any tls config given, then
// grpc will assume the certificate server name is the endpoint host.
conn, err := client.dialWithBalancer(dialEndpoint, grpc.WithBalancerName(roundRobinBalancerName))
if err != nil {
client.cancel()
client.resolverGroup.Close()
return nil, err
}
// TODO: With the old grpc balancer interface, we waited until the dial timeout
// for the balancer to be ready. Is there an equivalent wait we should do with the new grpc balancer interface?
client.conn = conn
client.Cluster = NewCluster(client)
client.KV = NewKV(client)
client.Lease = NewLease(client)
client.Watcher = NewWatcher(client)
client.Auth = NewAuth(client)
client.Maintenance = NewMaintenance(client)
//get token with established connection
ctx, cancel = client.ctx, func() {}
if client.cfg.DialTimeout > 0 {
ctx, cancel = context.WithTimeout(ctx, client.cfg.DialTimeout)
}
err = client.getToken(ctx)
if err != nil {
client.Close()
cancel()
return nil, err
}
cancel()
if cfg.RejectOldCluster {
if err := client.checkVersion(); err != nil {
client.Close()
return nil, err
}
}
go client.autoSync()
return client, nil
}
// roundRobinQuorumBackoff retries against quorum between each backoff.
// This is intended for use with a round robin load balancer.
func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc {
return func(attempt uint) time.Duration {
// after each round robin across quorum, backoff for our wait between duration
n := uint(len(c.Endpoints()))
quorum := (n/2 + 1)
if attempt%quorum == 0 {
c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction))
return jitterUp(waitBetween, jitterFraction)
}
c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum))
return 0
}
}
func (c *Client) checkVersion() (err error) {
var wg sync.WaitGroup
eps := c.Endpoints()
errc := make(chan error, len(eps))
ctx, cancel := context.WithCancel(c.ctx)
if c.cfg.DialTimeout > 0 {
cancel()
ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
}
wg.Add(len(eps))
for _, ep := range eps {
// if cluster is current, any endpoint gives a recent version
go func(e string) {
defer wg.Done()
resp, rerr := c.Status(ctx, e)
if rerr != nil {
errc <- rerr
return
}
vs := strings.Split(resp.Version, ".")
maj, min := 0, 0
if len(vs) >= 2 {
var serr error
if maj, serr = strconv.Atoi(vs[0]); serr != nil {
errc <- serr
return
}
if min, serr = strconv.Atoi(vs[1]); serr != nil {
errc <- serr
return
}
}
if maj < 3 || (maj == 3 && min < 2) {
rerr = ErrOldCluster
}
errc <- rerr
}(ep)
}
// wait for success
for range eps {
if err = <-errc; err == nil {
break
}
}
cancel()
wg.Wait()
return err
}
// ActiveConnection returns the current in-use connection
func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
// isHaltErr returns true if the given error and context indicate no forward
// progress can be made, even after reconnecting.
func isHaltErr(ctx context.Context, err error) bool {
if ctx != nil && ctx.Err() != nil {
return true
}
if err == nil {
return false
}
ev, _ := status.FromError(err)
// Unavailable codes mean the system will be right back.
// (e.g., can't connect, lost leader)
// Treat Internal codes as if something failed, leaving the
// system in an inconsistent state, but retrying could make progress.
// (e.g., failed in middle of send, corrupted frame)
// TODO: are permanent Internal errors possible from grpc?
return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
}
// isUnavailableErr returns true if the given error is an unavailable error
func isUnavailableErr(ctx context.Context, err error) bool {
if ctx != nil && ctx.Err() != nil {
return false
}
if err == nil {
return false
}
ev, ok := status.FromError(err)
if ok {
// Unavailable codes mean the system will be right back.
// (e.g., can't connect, lost leader)
return ev.Code() == codes.Unavailable
}
return false
}
func toErr(ctx context.Context, err error) error {
if err == nil {
return nil
}
err = rpctypes.Error(err)
if _, ok := err.(rpctypes.EtcdError); ok {
return err
}
if ev, ok := status.FromError(err); ok {
code := ev.Code()
switch code {
case codes.DeadlineExceeded:
fallthrough
case codes.Canceled:
if ctx.Err() != nil {
err = ctx.Err()
}
}
}
return err
}
func canceledByCaller(stopCtx context.Context, err error) bool {
if stopCtx.Err() == nil || err == nil {
return false
}
return err == context.Canceled || err == context.DeadlineExceeded
}
// IsConnCanceled returns true, if error is from a closed gRPC connection.
// ref. https://github.com/grpc/grpc-go/pull/1854
func IsConnCanceled(err error) bool {
if err == nil {
return false
}
// >= gRPC v1.23.x
s, ok := status.FromError(err)
if ok {
// connection is canceled or server has already closed the connection
return s.Code() == codes.Canceled || s.Message() == "transport is closing"
}
// >= gRPC v1.10.x
if err == context.Canceled {
return true
}
// <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
return strings.Contains(err.Error(), "grpc: the client connection is closing")
}
| [
"\"ETCD_CLIENT_DEBUG\""
]
| []
| [
"ETCD_CLIENT_DEBUG"
]
| [] | ["ETCD_CLIENT_DEBUG"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'que_comer.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/shelf/utils.go | package main
import (
"archive/tar"
"bytes"
"compress/gzip"
"fmt"
"io"
"os"
"path"
"path/filepath"
"runtime"
"time"
"github.com/go-git/go-git/v5"
)
const shelvesDIR = ".shelves"
// GetOrCreateShelvesDir gets the directory in user's home folder
// where all shelves are stored. If a directory doesn't exist it creates one.
func GetOrCreateShelvesDir() (string, error) {
// Get path to shelves directory under $HOME.
shelfDir := getShelfDirPath()
_, err := os.Stat(shelfDir)
if err != nil {
if os.IsNotExist(err) {
// Since the shelves directory doesn't exist, create it.
err = os.Mkdir(shelfDir, 0755)
if err != nil {
return "", err
}
return shelfDir, nil
}
return "", err
}
return shelfDir, nil
}
func getShelfDirPath() string {
return path.Join(getHomeDir(), shelvesDIR)
}
func getHomeDir() string {
var (
home string
)
if runtime.GOOS == "linux" {
home = os.Getenv("XDG_CONFIG_HOME")
if home == "" {
home = os.Getenv("HOME")
}
}
return path.Clean(home)
}
// SOURCE: https://gist.github.com/mimoo/25fc9716e0f1353791f5908f94d6e726
func compress(src string, buf io.Writer) error {
zr := gzip.NewWriter(buf)
tw := tar.NewWriter(zr)
// walk through every file in the folder
filepath.Walk(src, func(file string, fi os.FileInfo, err error) error {
// generate tar header
header, err := tar.FileInfoHeader(fi, file)
if err != nil {
return err
}
// must provide real name
// (see https://golang.org/src/archive/tar/common.go?#L626)
header.Name = filepath.ToSlash(file)
// write header
if err := tw.WriteHeader(header); err != nil {
return err
}
// if not a dir, write file content
if !fi.IsDir() {
data, err := os.Open(file)
if err != nil {
return err
}
if _, err := io.Copy(tw, data); err != nil {
return err
}
}
return nil
})
// produce tar
if err := tw.Close(); err != nil {
return err
}
// produce gzip
if err := zr.Close(); err != nil {
return err
}
return nil
}
func createGitSnapshot(dir string) error {
// Opens an already existing repository.
r, err := git.PlainOpen(dir)
if err != nil {
return err
}
w, err := r.Worktree()
if err != nil {
return err
}
_, err = w.Add(".")
if err != nil {
return fmt.Errorf("error while adding all files: %w", err)
}
commitMsg := fmt.Sprintf("snapshot: Automatic commit for snapshot taken at %s", time.Now().Format("Mon Jan _2 15:04:05 2006"))
_, err = w.Commit(commitMsg, &git.CommitOptions{})
if err != nil {
return fmt.Errorf("error while creating a commit: %s", err.Error())
}
err = r.Push(&git.PushOptions{})
if err != nil {
return fmt.Errorf("error while pushing the commit: %w", err)
}
return nil
}
func createArchiveSnapshot(dir string, output string) error {
var buf bytes.Buffer
_ = compress(dir, &buf)
// write the .tar.gz
fileToWrite, err := os.OpenFile(output, os.O_CREATE|os.O_RDWR, os.FileMode(0755))
if err != nil {
return fmt.Errorf("error while creating output file: %w", err)
}
if _, err := io.Copy(fileToWrite, &buf); err != nil {
return fmt.Errorf("error while writing data to output: %w", err)
}
return nil
}
| [
"\"XDG_CONFIG_HOME\"",
"\"HOME\""
]
| []
| [
"HOME",
"XDG_CONFIG_HOME"
]
| [] | ["HOME", "XDG_CONFIG_HOME"] | go | 2 | 0 | |
infer.py | try:
import unzip_requirements
except ImportError:
pass
import json
import os
import tarfile
import boto3
import tensorflow as tf
import numpy as np
import census_data
FILE_DIR = '/tmp/'
BUCKET = os.environ['BUCKET']
def _easy_input_function(data_dict, batch_size=64):
"""
data_dict = {
'<csv_col_1>': ['<first_pred_value>', '<second_pred_value>']
'<csv_col_2>': ['<first_pred_value>', '<second_pred_value>']
...
}
"""
# Convert input data to numpy arrays
for col in data_dict:
col_ind = census_data._CSV_COLUMNS.index(col)
dtype = type(census_data._CSV_COLUMN_DEFAULTS[col_ind][0])
data_dict[col] = np.array(data_dict[col],
dtype=dtype)
labels = data_dict.pop('income_bracket')
ds = tf.data.Dataset.from_tensor_slices((data_dict, labels))
ds = ds.batch(64)
return ds
def inferHandler(event, context):
body = json.loads(event.get('body'))
# Read in prediction data as dictionary
# Keys should match _CSV_COLUMNS, values should be lists
predict_input = body['input']
# Read in epoch
epoch_files = body['epoch']
# Download model from S3 and extract
boto3.Session(
).resource('s3'
).Bucket(BUCKET
).download_file(
os.path.join(epoch_files,'model.tar.gz'),
FILE_DIR+'model.tar.gz')
tarfile.open(FILE_DIR+'model.tar.gz', 'r').extractall(FILE_DIR)
# Create feature columns
wide_cols, deep_cols = census_data.build_model_columns()
# Load model
classifier = tf.estimator.LinearClassifier(
feature_columns=wide_cols,
model_dir=FILE_DIR+'tmp/model_'+epoch_files+'/',
warm_start_from=FILE_DIR+'tmp/model_'+epoch_files+'/')
# Setup prediction
predict_iter = classifier.predict(
lambda:_easy_input_function(predict_input))
# Iterate over prediction and convert to lists
predictions = []
for prediction in predict_iter:
for key in prediction:
prediction[key] = prediction[key].tolist()
predictions.append(prediction)
response = {
"statusCode": 200,
"body": json.dumps(predictions,
default=lambda x: x.decode('utf-8'))
}
return response
| []
| []
| [
"BUCKET"
]
| [] | ["BUCKET"] | python | 1 | 0 | |
spotifyart/spotifyart_test.go | package spotifyart_test
import (
"fmt"
"github.com/piraveen/go-coverart/spotifyart"
"testing"
)
func TestAlbumCover(t *testing.T) {
results, err := spotifyart.AlbumCover("halcyon days", "ellie goulding")
if err == nil {
fmt.Printf("AlbumCover %v\n", results.Default)
// Output: AlbumCover https://i.scdn.co/image/c649d891ee6e0b86bf460bca264bd66715bd87f4
}
}
func TestArtistCover(t *testing.T) {
results, err := spotifyart.ArtistCover("ellie goulding", "pop", "metropopolis")
if err == nil {
fmt.Printf("ArtistCover %v\n", results.Default)
// Output: ArtistCover https://i.scdn.co/image/b72e148adf8cec8bf91784bee05d836858546367
}
}
func TestTrackCover(t *testing.T) {
results, err := spotifyart.TrackCover("lights", "ellie goulding")
if err == nil {
fmt.Printf("TrackCover %v\n", results.Default)
// Output: TrackCover https://i.scdn.co/image/5031974d62fe1ef31b7678343e3ad4d11d922131
}
}
func ExampleAlbumCover() {
// The API Keys can be defined in your code itself, however I recommend
// loading them through an environment variable like this:
// c := os.Getenv("SPOTIFY_CLIENTID")
// s := os.Getenv("SPOTIFY_CLIENTSECRET")
// Note: Providing Spotify Client Id and Client Secret is optional, but it
// would help you increase the Spotify rate limit for requests
// if err := spotifyart.Configure(c, s); err != nil {
// // Abort action
// return
// }
results, err := spotifyart.AlbumCover("halcyon days", "ellie goulding")
if err == nil {
fmt.Printf("AlbumCover %v\n", results.Default)
}
}
func ExampleArtistCover() {
// The API Keys can be defined in your code itself, however I recommend
// loading them through an environment variable like this:
// c := os.Getenv("SPOTIFY_CLIENTID")
// s := os.Getenv("SPOTIFY_CLIENTSECRET")
// Note: Providing Spotify Client Id and Client Secret is optional, but it
// would help you increase the Spotify rate limit for requests
// if err := spotifyart.Configure(c, s); err != nil {
// // Abort action
// return
// }
results, err := spotifyart.ArtistCover("ellie goulding", "pop", "metropopolis")
if err == nil {
fmt.Printf("ArtistCover %v\n", results.Default)
}
}
func ExampleTrackCover() {
// The API Keys can be defined in your code itself, however I recommend
// loading them through an environment variable like this:
// c := os.Getenv("SPOTIFY_CLIENTID")
// s := os.Getenv("SPOTIFY_CLIENTSECRET")
// Note: Providing Spotify Client Id and Client Secret is optional, but it
// would help you increase the Spotify rate limit for requests
// if err := spotifyart.Configure(c, s); err != nil {
// // Abort action
// return
// }
//
results, err := spotifyart.TrackCover("lights", "ellie goulding")
if err == nil {
fmt.Printf("TrackCover %v\n", results.Default)
}
}
| [
"\"SPOTIFY_CLIENTID\"",
"\"SPOTIFY_CLIENTSECRET\"",
"\"SPOTIFY_CLIENTID\"",
"\"SPOTIFY_CLIENTSECRET\"",
"\"SPOTIFY_CLIENTID\"",
"\"SPOTIFY_CLIENTSECRET\""
]
| []
| [
"SPOTIFY_CLIENTID",
"SPOTIFY_CLIENTSECRET"
]
| [] | ["SPOTIFY_CLIENTID", "SPOTIFY_CLIENTSECRET"] | go | 2 | 0 | |
filesystem_listener/operations.py | import subprocess
import settings
import time_parser
import time
import database
import helpers
import os
def isProjectRelated(path):
return (
path.startswith( os.environ['HOME'] + "/Documentos/UFG-CDC/PFC/PFC2/Sistema" )
or
path.startswith( os.environ['HOME'] + "/.ctxt_search-")
)
def shouldIgnore(path):
if isProjectRelated(path):
return True
for ignored_substring in settings.loaded['ignore_occurrences']:
if ignored_substring in path:
return True
return False
def created_something(type_names, path):
return True if (type_names[0] in ["IN_MOVED_TO", "IN_CREATE"]) and not shouldIgnore(path) else False
def accessed_something(type_names, path):
return True if (type_names[0] == "IN_OPEN") and not shouldIgnore(path) else False
def deleted_something(type_names, path):
return True if (type_names[0] in ["IN_MOVED_FROM", "IN_DELETE"]) and not shouldIgnore(path) else False
def store_events(connection, events):
if events != None:
normalized_events = []
for event in events:
normalized_events.append((
time_parser.convert_to_python_datetime(event[':start']),
time_parser.convert_to_python_datetime(event[':end']),
event[':summary']
))
database.store_events(connection, normalized_events)
def call_ics_plugin(connection, file):
print("PARSING ICS FILE: ", file)
return_data = subprocess.run([settings.loaded['ics_parser_bin'], file], stdout=subprocess.PIPE)
parsed_return = helpers.parse_yaml_string(return_data.stdout.decode('utf8'))
with connection:
store_events(connection, parsed_return)
print("FILE PARSED")
def handle_access(path, filename):
file = path + '/' + filename
if filename == 'START':
print("Starting time monitoring")
settings.add_runtime('start_timestamp', time.time())
if os.path.isfile(file):
file_id = database.store_file(file)
def handle_file_created(connection, path, filename):
file = path + '/' + filename
if filename.endswith('.ics'):
# print("ICS File detected")
call_ics_plugin(connection, file)
elif file == (settings.loaded['database'] + '-journal'):
return
else:
file_id = database.store_file(file)
def handle_file_deleted(connection, path, filename):
file = path + '/' + filename
if(filename.endswith('.ics')):
print("File calendar deleted: ", file)
# callIcsPlugin(file)
elif file == (settings.loaded['database'] + '-journal'):
return
else:
# print("Deleted file ", file)
database.delete_file_reference(connection.cursor(), file)
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
pkgs/clamchk/src/main.go | package main
import (
"bytes"
"fmt"
"github.com/dutchcoders/go-clamd"
"gitlab.intr/go-hms/libs.git/hmsclient"
"io/ioutil"
// "mjuser"
"gitlab.intr/go-hms/libs.git/mjuser"
"net/http"
"os"
"path/filepath"
"strings"
"syscall"
)
func Gowalk(infectedchann chan string) {
filepath.Walk(".", func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
if info.Size() < 10485760 {
result := myscan(path)
abs, _ := filepath.Abs(path)
if result != "OK" {
// fmt.Printf("sending to infectedchann : %v\n", abs)
infectedchann <- abs
// fmt.Printf("sended to infectedchann : %v\n", abs)
}
} else {
fmt.Println(path, " Skiped(file size", info.Size(), " is too large)\n")
}
}
return nil
})
close(infectedchann)
}
func main() {
var UnixAccountId string
Infected := make([]string, 0)
infectedchann := make(chan string)
go Gowalk(infectedchann)
for i := range infectedchann {
// fmt.Println("Read from channel infectedchann : " + i)
Infected = append(Infected, i)
}
if len(Infected) > 0 {
fmt.Printf("Infected: %v\n", Infected)
info, _ := os.Stat(Infected[0])
uid := fmt.Sprint(info.Sys().(*syscall.Stat_t).Uid)
u, _ := mjuser.LookupId(uid)
if strings.HasPrefix(u.Name, "Hosting") {
UnixAccountId = strings.Split(strings.Split(u.Name, ",")[4], "=")[1]
}
if UnixAccountId != "" {
res := hmsclient.Getkey(os.Getenv("MALWARE_URL"), os.Getenv("MALWARE_USER"), os.Getenv("MALWARE_PASSWORD"))
infectedFiles := fmt.Sprint("{\"infectedFiles\":[\"" + strings.Join(Infected, "\", \"") + "\"],\"solved\":false}")
url := fmt.Sprint("https://api.intr/unix-account/" + UnixAccountId + "/malware-report")
fmt.Printf("POSTdata is: %v\n", infectedFiles)
fmt.Printf("UnixAccountId is: %v\n", UnixAccountId)
fmt.Printf("url is: %v\n", url)
postres := apiPost(url, res.Access_token, infectedFiles)
fmt.Printf("postres is: %v\n", postres)
}
}
}
func myscan(scanfile string) string {
c := clamd.NewClamd("tcp://clamav.intr:3310")
_ = c
filebyte, err := ioutil.ReadFile(scanfile)
if err != nil {
fmt.Printf("ReadFile: %v\n", err)
}
reader := bytes.NewReader(filebyte)
response, err := c.ScanStream(reader, make(chan bool))
for s := range response {
return s.Status
}
return " "
}
func apiPost(posturl string, key string, payload string) []byte {
req, err := http.NewRequest("POST", posturl, bytes.NewBuffer([]byte(payload)))
req.Header.Set("cache-control", "no-cache")
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+key)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
contents, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Printf("%s", err)
os.Exit(1)
}
return []byte(contents)
}
| [
"\"MALWARE_URL\"",
"\"MALWARE_USER\"",
"\"MALWARE_PASSWORD\""
]
| []
| [
"MALWARE_PASSWORD",
"MALWARE_USER",
"MALWARE_URL"
]
| [] | ["MALWARE_PASSWORD", "MALWARE_USER", "MALWARE_URL"] | go | 3 | 0 | |
attendanceDjango/asgi.py | """
ASGI config for attendanceDjango project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'attendanceDjango.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
Seasons_Greetings/asgi.py | """
ASGI config for Seasons_Greetings project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Seasons_Greetings.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
openpype/scripts/non_python_host_launch.py | """Script wraps launch mechanism of non python host implementations.
Arguments passed to the script are passed to launch function in host
implementation. In all cases requires host app executable and may contain
workfile or others.
"""
import os
import sys
# Get current file to locate start point of sys.argv
CURRENT_FILE = os.path.abspath(__file__)
def show_error_messagebox(title, message, detail_message=None):
"""Function will show message and process ends after closing it."""
from Qt import QtWidgets, QtCore
from openpype import style
app = QtWidgets.QApplication([])
app.setStyleSheet(style.load_stylesheet())
msgbox = QtWidgets.QMessageBox()
msgbox.setWindowTitle(title)
msgbox.setText(message)
if detail_message:
msgbox.setDetailedText(detail_message)
msgbox.setWindowModality(QtCore.Qt.ApplicationModal)
msgbox.show()
sys.exit(app.exec_())
def on_invalid_args(script_not_found):
"""Show to user message box saying that something went wrong.
Tell user that arguments to launch implementation are invalid with
arguments details.
Args:
script_not_found (bool): Use different message based on this value.
"""
title = "Invalid arguments"
joined_args = ", ".join("\"{}\"".format(arg) for arg in sys.argv)
if script_not_found:
submsg = "Where couldn't find script path:\n\"{}\""
else:
submsg = "Expected Host executable after script path:\n\"{}\""
message = "BUG: Got invalid arguments so can't launch Host application."
detail_message = "Process was launched with arguments:\n{}\n\n{}".format(
joined_args,
submsg.format(CURRENT_FILE)
)
show_error_messagebox(title, message, detail_message)
def main(argv):
# Modify current file path to find match in sys.argv which may be different
# on windows (different letter cases and slashes).
modified_current_file = CURRENT_FILE.replace("\\", "/").lower()
# Create a copy of sys argv
sys_args = list(argv)
after_script_idx = None
# Find script path in sys.argv to know index of argv where host
# executable should be.
for idx, item in enumerate(sys_args):
if item.replace("\\", "/").lower() == modified_current_file:
after_script_idx = idx + 1
break
# Validate that there is at least one argument after script path
launch_args = None
if after_script_idx is not None:
launch_args = sys_args[after_script_idx:]
host_name = os.environ["AVALON_APP"].lower()
if host_name == "photoshop":
from openpype.hosts.photoshop.api.lib import main
elif host_name == "aftereffects":
from openpype.hosts.aftereffects.api.lib import main
elif host_name == "harmony":
from openpype.hosts.harmony.api.lib import main
else:
title = "Unknown host name"
message = (
"BUG: Environment variable AVALON_APP contains unknown"
" host name \"{}\""
).format(host_name)
show_error_messagebox(title, message)
return
if launch_args:
# Launch host implementation
main(*launch_args)
else:
# Show message box
on_invalid_args(after_script_idx is None)
if __name__ == "__main__":
main(sys.argv)
| []
| []
| [
"AVALON_APP"
]
| [] | ["AVALON_APP"] | python | 1 | 0 | |
pkg/domain/i18n/japanese.go | package i18n
import (
"os"
"regexp"
"golang.org/x/text/language"
"golang.org/x/text/message"
)
func initJapanese() {
if matched, _ := regexp.Match(`ja_JP.*`, []byte(os.Getenv("LANG"))); !matched {
return
}
T = message.NewPrinter(language.Japanese)
message.SetString(language.Japanese, MessageError, `内部エラーが発生しました`)
message.SetString(language.Japanese, MessageHelp, `
!help
ヘルプメッセージを表示します
!list
ログイン中のユーザ一覧を表示します
!title hoge
Minecraftのゲーム画面に hoge と表示されます
!whitelist list
ホワイトリストを表示します
!whitelist add hoge
ユーザ hoge をホワイトリストに追加します
!whitelist delete hoge
ユーザ hoge をホワイトリストから削除します
`)
message.SetString(language.Japanese, MessageInvalidArguments, `引数が間違っています`)
message.SetString(language.Japanese, MessageMemberJoined, `
ようこそ! 以下の手順でセットアップをしてください。
1. このトークルームで "!whitelist add ${MINECRAFT_ACCOUNT_ID}" と発言
2. Minecraft を起動して、"%s" サーバに参加
`)
message.SetString(language.Japanese, MessageNoLoginUserExists, `ログイン中のユーザは存在しません`)
message.SetString(language.Japanese, MessageNoSuchCommand, `コマンドが存在しません`)
message.SetString(language.Japanese, MessageNoUserExists, `ユーザが存在しません`)
message.SetString(language.Japanese, MessageSentMessage, `%s に送信しました`)
message.SetString(language.Japanese, MessageUserIncorrect, `ユーザ指定が間違っています: %s`)
message.SetString(language.Japanese, MessageUsersLogin, `ユーザがログインしました: %v`)
message.SetString(language.Japanese, MessageUsersLogout, `ユーザがログアウトしました: %v`)
message.SetString(language.Japanese, MessageWhitelistAdd, `ユーザをホワイトリストに追加しました: %s`)
message.SetString(language.Japanese, MessageWhitelistRemove, `ユーザをホワイトリストから削除しました: %s`)
}
| [
"\"LANG\""
]
| []
| [
"LANG"
]
| [] | ["LANG"] | go | 1 | 0 | |
src/cmd/go/internal/modload/init.go | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modload
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"go/build"
"internal/lazyregexp"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"cmd/go/internal/base"
"cmd/go/internal/cfg"
"cmd/go/internal/fsys"
"cmd/go/internal/lockedfile"
"cmd/go/internal/modconv"
"cmd/go/internal/modfetch"
"cmd/go/internal/search"
"golang.org/x/mod/modfile"
"golang.org/x/mod/module"
"golang.org/x/mod/semver"
)
// Variables set by other packages.
//
// TODO(#40775): See if these can be plumbed as explicit parameters.
var (
// RootMode determines whether a module root is needed.
RootMode Root
// ForceUseModules may be set to force modules to be enabled when
// GO111MODULE=auto or to report an error when GO111MODULE=off.
ForceUseModules bool
allowMissingModuleImports bool
)
// Variables set in Init.
var (
initialized bool
modRoot string
gopath string
)
// Variables set in initTarget (during {Load,Create}ModFile).
var (
Target module.Version
// targetPrefix is the path prefix for packages in Target, without a trailing
// slash. For most modules, targetPrefix is just Target.Path, but the
// standard-library module "std" has an empty prefix.
targetPrefix string
// targetInGorootSrc caches whether modRoot is within GOROOT/src.
// The "std" module is special within GOROOT/src, but not otherwise.
targetInGorootSrc bool
)
type Root int
const (
// AutoRoot is the default for most commands. modload.Init will look for
// a go.mod file in the current directory or any parent. If none is found,
// modules may be disabled (GO111MODULE=auto) or commands may run in a
// limited module mode.
AutoRoot Root = iota
// NoRoot is used for commands that run in module mode and ignore any go.mod
// file the current directory or in parent directories.
NoRoot
// NeedRoot is used for commands that must run in module mode and don't
// make sense without a main module.
NeedRoot
)
// ModFile returns the parsed go.mod file.
//
// Note that after calling LoadPackages or LoadModGraph,
// the require statements in the modfile.File are no longer
// the source of truth and will be ignored: edits made directly
// will be lost at the next call to WriteGoMod.
// To make permanent changes to the require statements
// in go.mod, edit it before loading.
func ModFile() *modfile.File {
Init()
if modFile == nil {
die()
}
return modFile
}
func BinDir() string {
Init()
return filepath.Join(gopath, "bin")
}
// Init determines whether module mode is enabled, locates the root of the
// current module (if any), sets environment variables for Git subprocesses, and
// configures the cfg, codehost, load, modfetch, and search packages for use
// with modules.
func Init() {
if initialized {
return
}
initialized = true
// Keep in sync with WillBeEnabled. We perform extra validation here, and
// there are lots of diagnostics and side effects, so we can't use
// WillBeEnabled directly.
var mustUseModules bool
env := cfg.Getenv("GO111MODULE")
switch env {
default:
base.Fatalf("go: unknown environment setting GO111MODULE=%s", env)
case "auto":
mustUseModules = ForceUseModules
case "on", "":
mustUseModules = true
case "off":
if ForceUseModules {
base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'")
}
mustUseModules = false
return
}
if err := fsys.Init(base.Cwd()); err != nil {
base.Fatalf("go: %v", err)
}
// Disable any prompting for passwords by Git.
// Only has an effect for 2.3.0 or later, but avoiding
// the prompt in earlier versions is just too hard.
// If user has explicitly set GIT_TERMINAL_PROMPT=1, keep
// prompting.
// See golang.org/issue/9341 and golang.org/issue/12706.
if os.Getenv("GIT_TERMINAL_PROMPT") == "" {
os.Setenv("GIT_TERMINAL_PROMPT", "0")
}
// Disable any ssh connection pooling by Git.
// If a Git subprocess forks a child into the background to cache a new connection,
// that child keeps stdout/stderr open. After the Git subprocess exits,
// os /exec expects to be able to read from the stdout/stderr pipe
// until EOF to get all the data that the Git subprocess wrote before exiting.
// The EOF doesn't come until the child exits too, because the child
// is holding the write end of the pipe.
// This is unfortunate, but it has come up at least twice
// (see golang.org/issue/13453 and golang.org/issue/16104)
// and confuses users when it does.
// If the user has explicitly set GIT_SSH or GIT_SSH_COMMAND,
// assume they know what they are doing and don't step on it.
// But default to turning off ControlMaster.
if os.Getenv("GIT_SSH") == "" && os.Getenv("GIT_SSH_COMMAND") == "" {
os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no -o BatchMode=yes")
}
if os.Getenv("GCM_INTERACTIVE") == "" {
os.Setenv("GCM_INTERACTIVE", "never")
}
if modRoot != "" {
// modRoot set before Init was called ("go mod init" does this).
// No need to search for go.mod.
} else if RootMode == NoRoot {
if cfg.ModFile != "" && !base.InGOFLAGS("-modfile") {
base.Fatalf("go: -modfile cannot be used with commands that ignore the current module")
}
modRoot = ""
} else {
modRoot = findModuleRoot(base.Cwd())
if modRoot == "" {
if cfg.ModFile != "" {
base.Fatalf("go: cannot find main module, but -modfile was set.\n\t-modfile cannot be used to set the module root directory.")
}
if RootMode == NeedRoot {
base.Fatalf("go: %v", ErrNoModRoot)
}
if !mustUseModules {
// GO111MODULE is 'auto', and we can't find a module root.
// Stay in GOPATH mode.
return
}
} else if search.InDir(modRoot, os.TempDir()) == "." {
// If you create /tmp/go.mod for experimenting,
// then any tests that create work directories under /tmp
// will find it and get modules when they're not expecting them.
// It's a bit of a peculiar thing to disallow but quite mysterious
// when it happens. See golang.org/issue/26708.
modRoot = ""
fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in system temp root %v\n", os.TempDir())
if !mustUseModules {
return
}
}
}
if cfg.ModFile != "" && !strings.HasSuffix(cfg.ModFile, ".mod") {
base.Fatalf("go: -modfile=%s: file does not have .mod extension", cfg.ModFile)
}
// We're in module mode. Set any global variables that need to be set.
cfg.ModulesEnabled = true
setDefaultBuildMod()
list := filepath.SplitList(cfg.BuildContext.GOPATH)
if len(list) == 0 || list[0] == "" {
base.Fatalf("missing $GOPATH")
}
gopath = list[0]
if _, err := fsys.Stat(filepath.Join(gopath, "go.mod")); err == nil {
base.Fatalf("$GOPATH/go.mod exists but should not")
}
if modRoot == "" {
// We're in module mode, but not inside a module.
//
// Commands like 'go build', 'go run', 'go list' have no go.mod file to
// read or write. They would need to find and download the latest versions
// of a potentially large number of modules with no way to save version
// information. We can succeed slowly (but not reproducibly), but that's
// not usually a good experience.
//
// Instead, we forbid resolving import paths to modules other than std and
// cmd. Users may still build packages specified with .go files on the
// command line, but they'll see an error if those files import anything
// outside std.
//
// This can be overridden by calling AllowMissingModuleImports.
// For example, 'go get' does this, since it is expected to resolve paths.
//
// See golang.org/issue/32027.
} else {
modfetch.GoSumFile = strings.TrimSuffix(ModFilePath(), ".mod") + ".sum"
search.SetModRoot(modRoot)
}
}
// WillBeEnabled checks whether modules should be enabled but does not
// initialize modules by installing hooks. If Init has already been called,
// WillBeEnabled returns the same result as Enabled.
//
// This function is needed to break a cycle. The main package needs to know
// whether modules are enabled in order to install the module or GOPATH version
// of 'go get', but Init reads the -modfile flag in 'go get', so it shouldn't
// be called until the command is installed and flags are parsed. Instead of
// calling Init and Enabled, the main package can call this function.
func WillBeEnabled() bool {
if modRoot != "" || cfg.ModulesEnabled {
// Already enabled.
return true
}
if initialized {
// Initialized, not enabled.
return false
}
// Keep in sync with Init. Init does extra validation and prints warnings or
// exits, so it can't call this function directly.
env := cfg.Getenv("GO111MODULE")
switch env {
case "on", "":
return true
case "auto":
break
default:
return false
}
if modRoot := findModuleRoot(base.Cwd()); modRoot == "" {
// GO111MODULE is 'auto', and we can't find a module root.
// Stay in GOPATH mode.
return false
} else if search.InDir(modRoot, os.TempDir()) == "." {
// If you create /tmp/go.mod for experimenting,
// then any tests that create work directories under /tmp
// will find it and get modules when they're not expecting them.
// It's a bit of a peculiar thing to disallow but quite mysterious
// when it happens. See golang.org/issue/26708.
return false
}
return true
}
// Enabled reports whether modules are (or must be) enabled.
// If modules are enabled but there is no main module, Enabled returns true
// and then the first use of module information will call die
// (usually through MustModRoot).
func Enabled() bool {
Init()
return modRoot != "" || cfg.ModulesEnabled
}
// ModRoot returns the root of the main module.
// It calls base.Fatalf if there is no main module.
func ModRoot() string {
if !HasModRoot() {
die()
}
return modRoot
}
// HasModRoot reports whether a main module is present.
// HasModRoot may return false even if Enabled returns true: for example, 'get'
// does not require a main module.
func HasModRoot() bool {
Init()
return modRoot != ""
}
// ModFilePath returns the effective path of the go.mod file. Normally, this
// "go.mod" in the directory returned by ModRoot, but the -modfile flag may
// change its location. ModFilePath calls base.Fatalf if there is no main
// module, even if -modfile is set.
func ModFilePath() string {
if !HasModRoot() {
die()
}
if cfg.ModFile != "" {
return cfg.ModFile
}
return filepath.Join(modRoot, "go.mod")
}
func die() {
if cfg.Getenv("GO111MODULE") == "off" {
base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'")
}
if dir, name := findAltConfig(base.Cwd()); dir != "" {
rel, err := filepath.Rel(base.Cwd(), dir)
if err != nil {
rel = dir
}
cdCmd := ""
if rel != "." {
cdCmd = fmt.Sprintf("cd %s && ", rel)
}
base.Fatalf("go: cannot find main module, but found %s in %s\n\tto create a module there, run:\n\t%sgo mod init", name, dir, cdCmd)
}
base.Fatalf("go: %v", ErrNoModRoot)
}
var ErrNoModRoot = errors.New("go.mod file not found in current directory or any parent directory; see 'go help modules'")
type goModDirtyError struct{}
func (goModDirtyError) Error() string {
if cfg.BuildModExplicit {
return fmt.Sprintf("updates to go.mod needed, disabled by -mod=%v; to update it:\n\tgo mod tidy", cfg.BuildMod)
}
if cfg.BuildModReason != "" {
return fmt.Sprintf("updates to go.mod needed, disabled by -mod=%s\n\t(%s)\n\tto update it:\n\tgo mod tidy", cfg.BuildMod, cfg.BuildModReason)
}
return "updates to go.mod needed; to update it:\n\tgo mod tidy"
}
var errGoModDirty error = goModDirtyError{}
// LoadModFile sets Target and, if there is a main module, parses the initial
// build list from its go.mod file.
//
// LoadModFile may make changes in memory, like adding a go directive and
// ensuring requirements are consistent, and will write those changes back to
// disk unless DisallowWriteGoMod is in effect.
//
// As a side-effect, LoadModFile may change cfg.BuildMod to "vendor" if
// -mod wasn't set explicitly and automatic vendoring should be enabled.
//
// If LoadModFile or CreateModFile has already been called, LoadModFile returns
// the existing in-memory requirements (rather than re-reading them from disk).
//
// LoadModFile checks the roots of the module graph for consistency with each
// other, but unlike LoadModGraph does not load the full module graph or check
// it for global consistency. Most callers outside of the modload package should
// use LoadModGraph instead.
func LoadModFile(ctx context.Context) *Requirements {
rs, needCommit := loadModFile(ctx)
if needCommit {
commitRequirements(ctx, modFileGoVersion(), rs)
}
return rs
}
// loadModFile is like LoadModFile, but does not implicitly commit the
// requirements back to disk after fixing inconsistencies.
//
// If needCommit is true, after the caller makes any other needed changes to the
// returned requirements they should invoke commitRequirements to fix any
// inconsistencies that may be present in the on-disk go.mod file.
func loadModFile(ctx context.Context) (rs *Requirements, needCommit bool) {
if requirements != nil {
return requirements, false
}
Init()
if modRoot == "" {
Target = module.Version{Path: "command-line-arguments"}
targetPrefix = "command-line-arguments"
goVersion := LatestGoVersion()
rawGoVersion.Store(Target, goVersion)
requirements = newRequirements(modDepthFromGoVersion(goVersion), nil, nil)
return requirements, false
}
gomod := ModFilePath()
var data []byte
var err error
if gomodActual, ok := fsys.OverlayPath(gomod); ok {
// Don't lock go.mod if it's part of the overlay.
// On Plan 9, locking requires chmod, and we don't want to modify any file
// in the overlay. See #44700.
data, err = os.ReadFile(gomodActual)
} else {
data, err = lockedfile.Read(gomodActual)
}
if err != nil {
base.Fatalf("go: %v", err)
}
var fixed bool
f, err := modfile.Parse(gomod, data, fixVersion(ctx, &fixed))
if err != nil {
// Errors returned by modfile.Parse begin with file:line.
base.Fatalf("go: errors parsing go.mod:\n%s\n", err)
}
if f.Module == nil {
// No module declaration. Must add module path.
base.Fatalf("go: no module declaration in go.mod. To specify the module path:\n\tgo mod edit -module=example.com/mod")
}
modFile = f
initTarget(f.Module.Mod)
index = indexModFile(data, f, fixed)
if err := module.CheckImportPath(f.Module.Mod.Path); err != nil {
if pathErr, ok := err.(*module.InvalidPathError); ok {
pathErr.Kind = "module"
}
base.Fatalf("go: %v", err)
}
setDefaultBuildMod() // possibly enable automatic vendoring
rs = requirementsFromModFile()
if cfg.BuildMod == "vendor" {
readVendorList()
checkVendorConsistency()
rs.initVendor(vendorList)
}
if rs.hasRedundantRoot() {
// If any module path appears more than once in the roots, we know that the
// go.mod file needs to be updated even though we have not yet loaded any
// transitive dependencies.
rs, err = updateRoots(ctx, rs.direct, rs, nil, nil, false)
if err != nil {
base.Fatalf("go: %v", err)
}
}
if index.goVersionV == "" {
// TODO(#45551): Do something more principled instead of checking
// cfg.CmdName directly here.
if cfg.BuildMod == "mod" && cfg.CmdName != "mod graph" && cfg.CmdName != "mod why" {
addGoStmt(LatestGoVersion())
if go117EnableLazyLoading {
// We need to add a 'go' version to the go.mod file, but we must assume
// that its existing contents match something between Go 1.11 and 1.16.
// Go 1.11 through 1.16 have eager requirements, but the latest Go
// version uses lazy requirements instead — so we need to cnvert the
// requirements to be lazy.
rs, err = convertDepth(ctx, rs, lazy)
if err != nil {
base.Fatalf("go: %v", err)
}
}
} else {
rawGoVersion.Store(Target, modFileGoVersion())
}
}
requirements = rs
return requirements, true
}
// CreateModFile initializes a new module by creating a go.mod file.
//
// If modPath is empty, CreateModFile will attempt to infer the path from the
// directory location within GOPATH.
//
// If a vendoring configuration file is present, CreateModFile will attempt to
// translate it to go.mod directives. The resulting build list may not be
// exactly the same as in the legacy configuration (for example, we can't get
// packages at multiple versions from the same module).
func CreateModFile(ctx context.Context, modPath string) {
modRoot = base.Cwd()
Init()
modFilePath := ModFilePath()
if _, err := fsys.Stat(modFilePath); err == nil {
base.Fatalf("go: %s already exists", modFilePath)
}
if modPath == "" {
var err error
modPath, err = findModulePath(modRoot)
if err != nil {
base.Fatalf("go: %v", err)
}
} else if err := module.CheckImportPath(modPath); err != nil {
if pathErr, ok := err.(*module.InvalidPathError); ok {
pathErr.Kind = "module"
// Same as build.IsLocalPath()
if pathErr.Path == "." || pathErr.Path == ".." ||
strings.HasPrefix(pathErr.Path, "./") || strings.HasPrefix(pathErr.Path, "../") {
pathErr.Err = errors.New("is a local import path")
}
}
base.Fatalf("go: %v", err)
}
fmt.Fprintf(os.Stderr, "go: creating new go.mod: module %s\n", modPath)
modFile = new(modfile.File)
modFile.AddModuleStmt(modPath)
initTarget(modFile.Module.Mod)
addGoStmt(LatestGoVersion()) // Add the go directive before converted module requirements.
convertedFrom, err := convertLegacyConfig(modPath)
if convertedFrom != "" {
fmt.Fprintf(os.Stderr, "go: copying requirements from %s\n", base.ShortPath(convertedFrom))
}
if err != nil {
base.Fatalf("go: %v", err)
}
rs := requirementsFromModFile()
rs, err = updateRoots(ctx, rs.direct, rs, nil, nil, false)
if err != nil {
base.Fatalf("go: %v", err)
}
commitRequirements(ctx, modFileGoVersion(), rs)
// Suggest running 'go mod tidy' unless the project is empty. Even if we
// imported all the correct requirements above, we're probably missing
// some sums, so the next build command in -mod=readonly will likely fail.
//
// We look for non-hidden .go files or subdirectories to determine whether
// this is an existing project. Walking the tree for packages would be more
// accurate, but could take much longer.
empty := true
files, _ := os.ReadDir(modRoot)
for _, f := range files {
name := f.Name()
if strings.HasPrefix(name, ".") || strings.HasPrefix(name, "_") {
continue
}
if strings.HasSuffix(name, ".go") || f.IsDir() {
empty = false
break
}
}
if !empty {
fmt.Fprintf(os.Stderr, "go: to add module requirements and sums:\n\tgo mod tidy\n")
}
}
// fixVersion returns a modfile.VersionFixer implemented using the Query function.
//
// It resolves commit hashes and branch names to versions,
// canonicalizes versions that appeared in early vgo drafts,
// and does nothing for versions that already appear to be canonical.
//
// The VersionFixer sets 'fixed' if it ever returns a non-canonical version.
func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer {
return func(path, vers string) (resolved string, err error) {
defer func() {
if err == nil && resolved != vers {
*fixed = true
}
}()
// Special case: remove the old -gopkgin- hack.
if strings.HasPrefix(path, "gopkg.in/") && strings.Contains(vers, "-gopkgin-") {
vers = vers[strings.Index(vers, "-gopkgin-")+len("-gopkgin-"):]
}
// fixVersion is called speculatively on every
// module, version pair from every go.mod file.
// Avoid the query if it looks OK.
_, pathMajor, ok := module.SplitPathVersion(path)
if !ok {
return "", &module.ModuleError{
Path: path,
Err: &module.InvalidVersionError{
Version: vers,
Err: fmt.Errorf("malformed module path %q", path),
},
}
}
if vers != "" && module.CanonicalVersion(vers) == vers {
if err := module.CheckPathMajor(vers, pathMajor); err != nil {
return "", module.VersionError(module.Version{Path: path, Version: vers}, err)
}
return vers, nil
}
info, err := Query(ctx, path, vers, "", nil)
if err != nil {
return "", err
}
return info.Version, nil
}
}
// AllowMissingModuleImports allows import paths to be resolved to modules
// when there is no module root. Normally, this is forbidden because it's slow
// and there's no way to make the result reproducible, but some commands
// like 'go get' are expected to do this.
//
// This function affects the default cfg.BuildMod when outside of a module,
// so it can only be called prior to Init.
func AllowMissingModuleImports() {
if initialized {
panic("AllowMissingModuleImports after Init")
}
allowMissingModuleImports = true
}
// initTarget sets Target and associated variables according to modFile,
func initTarget(m module.Version) {
Target = m
targetPrefix = m.Path
if rel := search.InDir(base.Cwd(), cfg.GOROOTsrc); rel != "" {
targetInGorootSrc = true
if m.Path == "std" {
// The "std" module in GOROOT/src is the Go standard library. Unlike other
// modules, the packages in the "std" module have no import-path prefix.
//
// Modules named "std" outside of GOROOT/src do not receive this special
// treatment, so it is possible to run 'go test .' in other GOROOTs to
// test individual packages using a combination of the modified package
// and the ordinary standard library.
// (See https://golang.org/issue/30756.)
targetPrefix = ""
}
}
}
// requirementsFromModFile returns the set of non-excluded requirements from
// the global modFile.
func requirementsFromModFile() *Requirements {
roots := make([]module.Version, 0, len(modFile.Require))
direct := map[string]bool{}
for _, r := range modFile.Require {
if index != nil && index.exclude[r.Mod] {
if cfg.BuildMod == "mod" {
fmt.Fprintf(os.Stderr, "go: dropping requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version)
} else {
fmt.Fprintf(os.Stderr, "go: ignoring requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version)
}
continue
}
roots = append(roots, r.Mod)
if !r.Indirect {
direct[r.Mod.Path] = true
}
}
module.Sort(roots)
rs := newRequirements(modDepthFromGoVersion(modFileGoVersion()), roots, direct)
return rs
}
// setDefaultBuildMod sets a default value for cfg.BuildMod if the -mod flag
// wasn't provided. setDefaultBuildMod may be called multiple times.
func setDefaultBuildMod() {
if cfg.BuildModExplicit {
// Don't override an explicit '-mod=' argument.
return
}
// TODO(#40775): commands should pass in the module mode as an option
// to modload functions instead of relying on an implicit setting
// based on command name.
switch cfg.CmdName {
case "get", "mod download", "mod init", "mod tidy":
// These commands are intended to update go.mod and go.sum.
cfg.BuildMod = "mod"
return
case "mod graph", "mod verify", "mod why":
// These commands should not update go.mod or go.sum, but they should be
// able to fetch modules not in go.sum and should not report errors if
// go.mod is inconsistent. They're useful for debugging, and they need
// to work in buggy situations.
cfg.BuildMod = "mod"
allowWriteGoMod = false
return
case "mod vendor":
cfg.BuildMod = "readonly"
return
}
if modRoot == "" {
if allowMissingModuleImports {
cfg.BuildMod = "mod"
} else {
cfg.BuildMod = "readonly"
}
return
}
if fi, err := fsys.Stat(filepath.Join(modRoot, "vendor")); err == nil && fi.IsDir() {
modGo := "unspecified"
if index != nil && index.goVersionV != "" {
if semver.Compare(index.goVersionV, "v1.14") >= 0 {
// The Go version is at least 1.14, and a vendor directory exists.
// Set -mod=vendor by default.
cfg.BuildMod = "vendor"
cfg.BuildModReason = "Go version in go.mod is at least 1.14 and vendor directory exists."
return
} else {
modGo = index.goVersionV[1:]
}
}
// Since a vendor directory exists, we should record why we didn't use it.
// This message won't normally be shown, but it may appear with import errors.
cfg.BuildModReason = fmt.Sprintf("Go version in go.mod is %s, so vendor directory was not used.", modGo)
}
cfg.BuildMod = "readonly"
}
// convertLegacyConfig imports module requirements from a legacy vendoring
// configuration file, if one is present.
func convertLegacyConfig(modPath string) (from string, err error) {
noneSelected := func(path string) (version string) { return "none" }
queryPackage := func(path, rev string) (module.Version, error) {
pkgMods, modOnly, err := QueryPattern(context.Background(), path, rev, noneSelected, nil)
if err != nil {
return module.Version{}, err
}
if len(pkgMods) > 0 {
return pkgMods[0].Mod, nil
}
return modOnly.Mod, nil
}
for _, name := range altConfigs {
cfg := filepath.Join(modRoot, name)
data, err := os.ReadFile(cfg)
if err == nil {
convert := modconv.Converters[name]
if convert == nil {
return "", nil
}
cfg = filepath.ToSlash(cfg)
err := modconv.ConvertLegacyConfig(modFile, cfg, data, queryPackage)
return name, err
}
}
return "", nil
}
// addGoStmt adds a go directive to the go.mod file if it does not already
// include one. The 'go' version added, if any, is the latest version supported
// by this toolchain.
func addGoStmt(v string) {
if modFile.Go != nil && modFile.Go.Version != "" {
return
}
if err := modFile.AddGoStmt(v); err != nil {
base.Fatalf("go: internal error: %v", err)
}
rawGoVersion.Store(Target, v)
}
// LatestGoVersion returns the latest version of the Go language supported by
// this toolchain, like "1.17".
func LatestGoVersion() string {
tags := build.Default.ReleaseTags
version := tags[len(tags)-1]
if !strings.HasPrefix(version, "go") || !modfile.GoVersionRE.MatchString(version[2:]) {
base.Fatalf("go: internal error: unrecognized default version %q", version)
}
return version[2:]
}
// priorGoVersion returns the Go major release immediately preceding v,
// or v itself if v is the first Go major release (1.0) or not a supported
// Go version.
func priorGoVersion(v string) string {
vTag := "go" + v
tags := build.Default.ReleaseTags
for i, tag := range tags {
if tag == vTag {
if i == 0 {
return v
}
version := tags[i-1]
if !strings.HasPrefix(version, "go") || !modfile.GoVersionRE.MatchString(version[2:]) {
base.Fatalf("go: internal error: unrecognized version %q", version)
}
return version[2:]
}
}
return v
}
var altConfigs = []string{
"Gopkg.lock",
"GLOCKFILE",
"Godeps/Godeps.json",
"dependencies.tsv",
"glide.lock",
"vendor.conf",
"vendor.yml",
"vendor/manifest",
"vendor/vendor.json",
".git/config",
}
func findModuleRoot(dir string) (root string) {
if dir == "" {
panic("dir not set")
}
dir = filepath.Clean(dir)
// Look for enclosing go.mod.
for {
if fi, err := fsys.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
return dir
}
d := filepath.Dir(dir)
if d == dir {
break
}
dir = d
}
return ""
}
func findAltConfig(dir string) (root, name string) {
if dir == "" {
panic("dir not set")
}
dir = filepath.Clean(dir)
if rel := search.InDir(dir, cfg.BuildContext.GOROOT); rel != "" {
// Don't suggest creating a module from $GOROOT/.git/config
// or a config file found in any parent of $GOROOT (see #34191).
return "", ""
}
for {
for _, name := range altConfigs {
if fi, err := fsys.Stat(filepath.Join(dir, name)); err == nil && !fi.IsDir() {
return dir, name
}
}
d := filepath.Dir(dir)
if d == dir {
break
}
dir = d
}
return "", ""
}
func findModulePath(dir string) (string, error) {
// TODO(bcmills): once we have located a plausible module path, we should
// query version control (if available) to verify that it matches the major
// version of the most recent tag.
// See https://golang.org/issue/29433, https://golang.org/issue/27009, and
// https://golang.org/issue/31549.
// Cast about for import comments,
// first in top-level directory, then in subdirectories.
list, _ := os.ReadDir(dir)
for _, info := range list {
if info.Type().IsRegular() && strings.HasSuffix(info.Name(), ".go") {
if com := findImportComment(filepath.Join(dir, info.Name())); com != "" {
return com, nil
}
}
}
for _, info1 := range list {
if info1.IsDir() {
files, _ := os.ReadDir(filepath.Join(dir, info1.Name()))
for _, info2 := range files {
if info2.Type().IsRegular() && strings.HasSuffix(info2.Name(), ".go") {
if com := findImportComment(filepath.Join(dir, info1.Name(), info2.Name())); com != "" {
return path.Dir(com), nil
}
}
}
}
}
// Look for Godeps.json declaring import path.
data, _ := os.ReadFile(filepath.Join(dir, "Godeps/Godeps.json"))
var cfg1 struct{ ImportPath string }
json.Unmarshal(data, &cfg1)
if cfg1.ImportPath != "" {
return cfg1.ImportPath, nil
}
// Look for vendor.json declaring import path.
data, _ = os.ReadFile(filepath.Join(dir, "vendor/vendor.json"))
var cfg2 struct{ RootPath string }
json.Unmarshal(data, &cfg2)
if cfg2.RootPath != "" {
return cfg2.RootPath, nil
}
// Look for path in GOPATH.
var badPathErr error
for _, gpdir := range filepath.SplitList(cfg.BuildContext.GOPATH) {
if gpdir == "" {
continue
}
if rel := search.InDir(dir, filepath.Join(gpdir, "src")); rel != "" && rel != "." {
path := filepath.ToSlash(rel)
// gorelease will alert users publishing their modules to fix their paths.
if err := module.CheckImportPath(path); err != nil {
badPathErr = err
break
}
return path, nil
}
}
reason := "outside GOPATH, module path must be specified"
if badPathErr != nil {
// return a different error message if the module was in GOPATH, but
// the module path determined above would be an invalid path.
reason = fmt.Sprintf("bad module path inferred from directory in GOPATH: %v", badPathErr)
}
msg := `cannot determine module path for source directory %s (%s)
Example usage:
'go mod init example.com/m' to initialize a v0 or v1 module
'go mod init example.com/m/v2' to initialize a v2 module
Run 'go help mod init' for more information.
`
return "", fmt.Errorf(msg, dir, reason)
}
var (
importCommentRE = lazyregexp.New(`(?m)^package[ \t]+[^ \t\r\n/]+[ \t]+//[ \t]+import[ \t]+(\"[^"]+\")[ \t]*\r?\n`)
)
func findImportComment(file string) string {
data, err := os.ReadFile(file)
if err != nil {
return ""
}
m := importCommentRE.FindSubmatch(data)
if m == nil {
return ""
}
path, err := strconv.Unquote(string(m[1]))
if err != nil {
return ""
}
return path
}
var allowWriteGoMod = true
// DisallowWriteGoMod causes future calls to WriteGoMod to do nothing at all.
func DisallowWriteGoMod() {
allowWriteGoMod = false
}
// AllowWriteGoMod undoes the effect of DisallowWriteGoMod:
// future calls to WriteGoMod will update go.mod if needed.
// Note that any past calls have been discarded, so typically
// a call to AlowWriteGoMod should be followed by a call to WriteGoMod.
func AllowWriteGoMod() {
allowWriteGoMod = true
}
// WriteGoMod writes the current build list back to go.mod.
func WriteGoMod(ctx context.Context) {
if !allowWriteGoMod {
panic("WriteGoMod called while disallowed")
}
commitRequirements(ctx, modFileGoVersion(), LoadModFile(ctx))
}
// commitRequirements writes sets the global requirements variable to rs and
// writes its contents back to the go.mod file on disk.
func commitRequirements(ctx context.Context, goVersion string, rs *Requirements) {
requirements = rs
if !allowWriteGoMod {
// Some package outside of modload promised to update the go.mod file later.
return
}
if modRoot == "" {
// We aren't in a module, so we don't have anywhere to write a go.mod file.
return
}
var list []*modfile.Require
for _, m := range rs.rootModules {
list = append(list, &modfile.Require{
Mod: m,
Indirect: !rs.direct[m.Path],
})
}
if goVersion != "" {
modFile.AddGoStmt(goVersion)
}
if semver.Compare("v"+modFileGoVersion(), separateIndirectVersionV) < 0 {
modFile.SetRequire(list)
} else {
modFile.SetRequireSeparateIndirect(list)
}
modFile.Cleanup()
dirty := index.modFileIsDirty(modFile)
if dirty && cfg.BuildMod != "mod" {
// If we're about to fail due to -mod=readonly,
// prefer to report a dirty go.mod over a dirty go.sum
base.Fatalf("go: %v", errGoModDirty)
}
if !dirty && cfg.CmdName != "mod tidy" {
// The go.mod file has the same semantic content that it had before
// (but not necessarily the same exact bytes).
// Don't write go.mod, but write go.sum in case we added or trimmed sums.
// 'go mod init' shouldn't write go.sum, since it will be incomplete.
if cfg.CmdName != "mod init" {
modfetch.WriteGoSum(keepSums(ctx, loaded, rs, addBuildListZipSums))
}
return
}
gomod := ModFilePath()
if _, ok := fsys.OverlayPath(gomod); ok {
if dirty {
base.Fatalf("go: updates to go.mod needed, but go.mod is part of the overlay specified with -overlay")
}
return
}
new, err := modFile.Format()
if err != nil {
base.Fatalf("go: %v", err)
}
defer func() {
// At this point we have determined to make the go.mod file on disk equal to new.
index = indexModFile(new, modFile, false)
// Update go.sum after releasing the side lock and refreshing the index.
// 'go mod init' shouldn't write go.sum, since it will be incomplete.
if cfg.CmdName != "mod init" {
modfetch.WriteGoSum(keepSums(ctx, loaded, rs, addBuildListZipSums))
}
}()
// Make a best-effort attempt to acquire the side lock, only to exclude
// previous versions of the 'go' command from making simultaneous edits.
if unlock, err := modfetch.SideLock(); err == nil {
defer unlock()
}
errNoChange := errors.New("no update needed")
err = lockedfile.Transform(ModFilePath(), func(old []byte) ([]byte, error) {
if bytes.Equal(old, new) {
// The go.mod file is already equal to new, possibly as the result of some
// other process.
return nil, errNoChange
}
if index != nil && !bytes.Equal(old, index.data) {
// The contents of the go.mod file have changed. In theory we could add all
// of the new modules to the build list, recompute, and check whether any
// module in *our* build list got bumped to a different version, but that's
// a lot of work for marginal benefit. Instead, fail the command: if users
// want to run concurrent commands, they need to start with a complete,
// consistent module definition.
return nil, fmt.Errorf("existing contents have changed since last read")
}
return new, nil
})
if err != nil && err != errNoChange {
base.Fatalf("go: updating go.mod: %v", err)
}
}
// keepSums returns the set of modules (and go.mod file entries) for which
// checksums would be needed in order to reload the same set of packages
// loaded by the most recent call to LoadPackages or ImportFromFiles,
// including any go.mod files needed to reconstruct the MVS result,
// in addition to the checksums for every module in keepMods.
func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums) map[module.Version]bool {
// Every module in the full module graph contributes its requirements,
// so in order to ensure that the build list itself is reproducible,
// we need sums for every go.mod in the graph (regardless of whether
// that version is selected).
keep := make(map[module.Version]bool)
// Add entries for modules in the build list with paths that are prefixes of
// paths of loaded packages. We need to retain sums for all of these modules —
// not just the modules containing the actual packages — in order to rule out
// ambiguous import errors the next time we load the package.
if ld != nil {
for _, pkg := range ld.pkgs {
// We check pkg.mod.Path here instead of pkg.inStd because the
// pseudo-package "C" is not in std, but not provided by any module (and
// shouldn't force loading the whole module graph).
if pkg.testOf != nil || (pkg.mod.Path == "" && pkg.err == nil) || module.CheckImportPath(pkg.path) != nil {
continue
}
if rs.depth == lazy && pkg.mod.Path != "" {
if v, ok := rs.rootSelected(pkg.mod.Path); ok && v == pkg.mod.Version {
// pkg was loaded from a root module, and because the main module is
// lazy we do not check non-root modules for conflicts for packages
// that can be found in roots. So we only need the checksums for the
// root modules that may contain pkg, not all possible modules.
for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) {
if v, ok := rs.rootSelected(prefix); ok && v != "none" {
m := module.Version{Path: prefix, Version: v}
keep[resolveReplacement(m)] = true
}
}
continue
}
}
mg, _ := rs.Graph(ctx)
for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) {
if v := mg.Selected(prefix); v != "none" {
m := module.Version{Path: prefix, Version: v}
keep[resolveReplacement(m)] = true
}
}
}
}
if rs.graph.Load() == nil {
// The module graph was not loaded, possibly because the main module is lazy
// or possibly because we haven't needed to load the graph yet.
// Save sums for the root modules (or their replacements), but don't
// incur the cost of loading the graph just to find and retain the sums.
for _, m := range rs.rootModules {
r := resolveReplacement(m)
keep[modkey(r)] = true
if which == addBuildListZipSums {
keep[r] = true
}
}
} else {
mg, _ := rs.Graph(ctx)
mg.WalkBreadthFirst(func(m module.Version) {
if _, ok := mg.RequiredBy(m); ok {
// The requirements from m's go.mod file are present in the module graph,
// so they are relevant to the MVS result regardless of whether m was
// actually selected.
keep[modkey(resolveReplacement(m))] = true
}
})
if which == addBuildListZipSums {
for _, m := range mg.BuildList() {
keep[resolveReplacement(m)] = true
}
}
}
return keep
}
type whichSums int8
const (
loadedZipSumsOnly = whichSums(iota)
addBuildListZipSums
)
// modKey returns the module.Version under which the checksum for m's go.mod
// file is stored in the go.sum file.
func modkey(m module.Version) module.Version {
return module.Version{Path: m.Path, Version: m.Version + "/go.mod"}
}
| [
"\"GIT_TERMINAL_PROMPT\"",
"\"GIT_SSH\"",
"\"GIT_SSH_COMMAND\"",
"\"GCM_INTERACTIVE\""
]
| []
| [
"GIT_SSH",
"GCM_INTERACTIVE",
"GIT_SSH_COMMAND",
"GIT_TERMINAL_PROMPT"
]
| [] | ["GIT_SSH", "GCM_INTERACTIVE", "GIT_SSH_COMMAND", "GIT_TERMINAL_PROMPT"] | go | 4 | 0 | |
sdk/storage/azure-storage-queue/samples/queue_samples_message.py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: queue_samples_message.py
DESCRIPTION:
These samples demonstrate the following: creating and setting an access policy to generate a
sas token, getting a queue client from a queue URL, setting and getting queue
metadata, sending messages and receiving them individually or by batch, deleting and
clearing all messages, and peeking and updating messages.
USAGE:
python queue_samples_message.py
Set the environment variables with your own values before running the sample:
1) AZURE_STORAGE_CONNECTION_STRING - the connection string to your storage account
"""
from datetime import datetime, timedelta
import os
class QueueMessageSamples(object):
connection_string = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
def set_access_policy(self):
# [START create_queue_client_from_connection_string]
from azure.storage.queue import QueueClient
queue = QueueClient.from_connection_string(self.connection_string, "myqueue1")
# [END create_queue_client_from_connection_string]
# Create the queue
queue.create_queue()
# Send a message
queue.send_message(u"hello world")
try:
# [START set_access_policy]
# Create an access policy
from azure.storage.queue import AccessPolicy, QueueSasPermissions
access_policy = AccessPolicy()
access_policy.start = datetime.utcnow() - timedelta(hours=1)
access_policy.expiry = datetime.utcnow() + timedelta(hours=1)
access_policy.permission = QueueSasPermissions(read=True)
identifiers = {'my-access-policy-id': access_policy}
# Set the access policy
queue.set_queue_access_policy(identifiers)
# [END set_access_policy]
# Use the access policy to generate a SAS token
# [START queue_client_sas_token]
from azure.storage.queue import generate_queue_sas
sas_token = generate_queue_sas(
queue.account_name,
queue.queue_name,
queue.credential.account_key,
policy_id='my-access-policy-id'
)
# [END queue_client_sas_token]
# Authenticate with the sas token
# [START create_queue_client]
token_auth_queue = QueueClient.from_queue_url(
queue_url=queue.url,
credential=sas_token
)
# [END create_queue_client]
# Use the newly authenticated client to receive messages
my_message = token_auth_queue.receive_messages()
finally:
# Delete the queue
queue.delete_queue()
def queue_metadata(self):
# Instantiate a queue client
from azure.storage.queue import QueueClient
queue = QueueClient.from_connection_string(self.connection_string, "myqueue2")
# Create the queue
queue.create_queue()
try:
# [START set_queue_metadata]
metadata = {'foo': 'val1', 'bar': 'val2', 'baz': 'val3'}
queue.set_queue_metadata(metadata=metadata)
# [END set_queue_metadata]
# [START get_queue_properties]
properties = queue.get_queue_properties().metadata
# [END get_queue_properties]
finally:
# Delete the queue
queue.delete_queue()
def send_and_receive_messages(self):
# Instantiate a queue client
from azure.storage.queue import QueueClient
queue = QueueClient.from_connection_string(self.connection_string, "myqueue3")
# Create the queue
queue.create_queue()
try:
# [START send_messages]
queue.send_message(u"message1")
queue.send_message(u"message2", visibility_timeout=30) # wait 30s before becoming visible
queue.send_message(u"message3")
queue.send_message(u"message4")
queue.send_message(u"message5")
# [END send_messages]
# [START receive_messages]
# Receive messages one-by-one
messages = queue.receive_messages()
for msg in messages:
print(msg.content)
# Receive messages by batch
messages = queue.receive_messages(messages_per_page=5)
for msg_batch in messages.by_page():
for msg in msg_batch:
print(msg.content)
queue.delete_message(msg)
# [END receive_messages]
# Only prints 4 messages because message 2 is not visible yet
# >>message1
# >>message3
# >>message4
# >>message5
finally:
# Delete the queue
queue.delete_queue()
def list_message_pages(self):
# Instantiate a queue client
from azure.storage.queue import QueueClient
queue = QueueClient.from_connection_string(self.connection_string, "myqueue4")
# Create the queue
queue.create_queue()
try:
queue.send_message(u"message1")
queue.send_message(u"message2")
queue.send_message(u"message3")
queue.send_message(u"message4")
queue.send_message(u"message5")
queue.send_message(u"message6")
# [START receive_messages_listing]
# Store two messages in each page
message_batches = queue.receive_messages(messages_per_page=2).by_page()
# Iterate through the page lists
print(list(next(message_batches)))
print(list(next(message_batches)))
# There are two iterations in the last page as well.
last_page = next(message_batches)
for message in last_page:
print(message)
# [END receive_messages_listing]
finally:
queue.delete_queue()
def receive_one_message_from_queue(self):
# Instantiate a queue client
from azure.storage.queue import QueueClient
queue = QueueClient.from_connection_string(self.connection_string, "myqueue5")
# Create the queue
queue.create_queue()
try:
queue.send_message(u"message1")
queue.send_message(u"message2")
queue.send_message(u"message3")
# [START receive_one_message]
# Pop two messages from the front of the queue
message1 = queue.receive_message()
message2 = queue.receive_message()
# We should see message 3 if we peek
message3 = queue.peek_messages()[0]
print(message1.content)
print(message2.content)
print(message3.content)
# [END receive_one_message]
finally:
queue.delete_queue()
def delete_and_clear_messages(self):
# Instantiate a queue client
from azure.storage.queue import QueueClient
queue = QueueClient.from_connection_string(self.connection_string, "myqueue6")
# Create the queue
queue.create_queue()
try:
# Send messages
queue.send_message(u"message1")
queue.send_message(u"message2")
queue.send_message(u"message3")
queue.send_message(u"message4")
queue.send_message(u"message5")
# [START delete_message]
# Get the message at the front of the queue
msg = next(queue.receive_messages())
# Delete the specified message
queue.delete_message(msg)
# [END delete_message]
# [START clear_messages]
queue.clear_messages()
# [END clear_messages]
finally:
# Delete the queue
queue.delete_queue()
def peek_messages(self):
# Instantiate a queue client
from azure.storage.queue import QueueClient
queue = QueueClient.from_connection_string(self.connection_string, "myqueue7")
# Create the queue
queue.create_queue()
try:
# Send messages
queue.send_message(u"message1")
queue.send_message(u"message2")
queue.send_message(u"message3")
queue.send_message(u"message4")
queue.send_message(u"message5")
# [START peek_message]
# Peek at one message at the front of the queue
msg = queue.peek_messages()
# Peek at the last 5 messages
messages = queue.peek_messages(max_messages=5)
# Print the last 5 messages
for message in messages:
print(message.content)
# [END peek_message]
finally:
# Delete the queue
queue.delete_queue()
def update_message(self):
# Instantiate a queue client
from azure.storage.queue import QueueClient
queue = QueueClient.from_connection_string(self.connection_string, "myqueue8")
# Create the queue
queue.create_queue()
try:
# [START update_message]
# Send a message
queue.send_message(u"update me")
# Receive the message
messages = queue.receive_messages()
# Update the message
list_result = next(messages)
message = queue.update_message(
list_result.id,
pop_receipt=list_result.pop_receipt,
visibility_timeout=0,
content=u"updated")
# [END update_message]
finally:
# Delete the queue
queue.delete_queue()
if __name__ == '__main__':
sample = QueueMessageSamples()
sample.set_access_policy()
sample.queue_metadata()
sample.send_and_receive_messages()
sample.list_message_pages()
sample.receive_one_message_from_queue()
sample.delete_and_clear_messages()
sample.peek_messages()
sample.update_message()
| []
| []
| [
"AZURE_STORAGE_CONNECTION_STRING"
]
| [] | ["AZURE_STORAGE_CONNECTION_STRING"] | python | 1 | 0 | |
internal/ingress/controller/template/template.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package template
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"math/rand"
"net"
"net/url"
"os"
"os/exec"
"reflect"
"regexp"
"sort"
"strings"
text_template "text/template"
"time"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/ingress-nginx/internal/file"
"k8s.io/ingress-nginx/internal/ingress"
"k8s.io/ingress-nginx/internal/ingress/annotations/influxdb"
"k8s.io/ingress-nginx/internal/ingress/annotations/ratelimit"
"k8s.io/ingress-nginx/internal/ingress/controller/config"
ing_net "k8s.io/ingress-nginx/internal/net"
"k8s.io/klog"
)
const (
slash = "/"
nonIdempotent = "non_idempotent"
defBufferSize = 65535
)
// TemplateWriter is the interface to render a template
type TemplateWriter interface {
Write(conf config.TemplateConfig) ([]byte, error)
}
// Template ...
type Template struct {
tmpl *text_template.Template
//fw watch.FileWatcher
bp *BufferPool
}
//NewTemplate returns a new Template instance or an
//error if the specified template file contains errors
func NewTemplate(file string, fs file.Filesystem) (*Template, error) {
data, err := fs.ReadFile(file)
if err != nil {
return nil, errors.Wrapf(err, "unexpected error reading template %v", file)
}
tmpl, err := text_template.New("nginx.tmpl").Funcs(funcMap).Parse(string(data))
if err != nil {
return nil, err
}
return &Template{
tmpl: tmpl,
bp: NewBufferPool(defBufferSize),
}, nil
}
// Write populates a buffer using a template with NGINX configuration
// and the servers and upstreams created by Ingress rules
func (t *Template) Write(conf config.TemplateConfig) ([]byte, error) {
tmplBuf := t.bp.Get()
defer t.bp.Put(tmplBuf)
outCmdBuf := t.bp.Get()
defer t.bp.Put(outCmdBuf)
if klog.V(3) {
b, err := json.Marshal(conf)
if err != nil {
klog.Errorf("unexpected error: %v", err)
}
klog.Infof("NGINX configuration: %v", string(b))
}
err := t.tmpl.Execute(tmplBuf, conf)
if err != nil {
return nil, err
}
// squeezes multiple adjacent empty lines to be single
// spaced this is to avoid the use of regular expressions
cmd := exec.Command("/ingress-controller/clean-nginx-conf.sh")
cmd.Stdin = tmplBuf
cmd.Stdout = outCmdBuf
if err := cmd.Run(); err != nil {
klog.Warningf("unexpected error cleaning template: %v", err)
return tmplBuf.Bytes(), nil
}
return outCmdBuf.Bytes(), nil
}
var (
funcMap = text_template.FuncMap{
"empty": func(input interface{}) bool {
check, ok := input.(string)
if ok {
return len(check) == 0
}
return true
},
"escapeLiteralDollar": escapeLiteralDollar,
"shouldConfigureLuaRestyWAF": shouldConfigureLuaRestyWAF,
"buildLuaSharedDictionaries": buildLuaSharedDictionaries,
"buildLocation": buildLocation,
"buildAuthLocation": buildAuthLocation,
"shouldApplyGlobalAuth": shouldApplyGlobalAuth,
"buildAuthResponseHeaders": buildAuthResponseHeaders,
"buildProxyPass": buildProxyPass,
"filterRateLimits": filterRateLimits,
"buildRateLimitZones": buildRateLimitZones,
"buildRateLimit": buildRateLimit,
"buildResolversForLua": buildResolversForLua,
"configForLua": configForLua,
"locationConfigForLua": locationConfigForLua,
"buildResolvers": buildResolvers,
"buildUpstreamName": buildUpstreamName,
"isLocationInLocationList": isLocationInLocationList,
"isLocationAllowed": isLocationAllowed,
"buildLogFormatUpstream": buildLogFormatUpstream,
"buildDenyVariable": buildDenyVariable,
"getenv": os.Getenv,
"contains": strings.Contains,
"hasPrefix": strings.HasPrefix,
"hasSuffix": strings.HasSuffix,
"trimSpace": strings.TrimSpace,
"toUpper": strings.ToUpper,
"toLower": strings.ToLower,
"formatIP": formatIP,
"buildNextUpstream": buildNextUpstream,
"getIngressInformation": getIngressInformation,
"serverConfig": func(all config.TemplateConfig, server *ingress.Server) interface{} {
return struct{ First, Second interface{} }{all, server}
},
"isValidByteSize": isValidByteSize,
"buildForwardedFor": buildForwardedFor,
"buildAuthSignURL": buildAuthSignURL,
"buildOpentracing": buildOpentracing,
"proxySetHeader": proxySetHeader,
"buildInfluxDB": buildInfluxDB,
"enforceRegexModifier": enforceRegexModifier,
"stripLocationModifer": stripLocationModifer,
"buildCustomErrorDeps": buildCustomErrorDeps,
"opentracingPropagateContext": opentracingPropagateContext,
"buildCustomErrorLocationsPerServer": buildCustomErrorLocationsPerServer,
"shouldLoadModSecurityModule": shouldLoadModSecurityModule,
}
)
// escapeLiteralDollar will replace the $ character with ${literal_dollar}
// which is made to work via the following configuration in the http section of
// the template:
// geo $literal_dollar {
// default "$";
// }
func escapeLiteralDollar(input interface{}) string {
inputStr, ok := input.(string)
if !ok {
return ""
}
return strings.Replace(inputStr, `$`, `${literal_dollar}`, -1)
}
// formatIP will wrap IPv6 addresses in [] and return IPv4 addresses
// without modification. If the input cannot be parsed as an IP address
// it is returned without modification.
func formatIP(input string) string {
ip := net.ParseIP(input)
if ip == nil {
return input
}
if v4 := ip.To4(); v4 != nil {
return input
}
return fmt.Sprintf("[%s]", input)
}
func shouldConfigureLuaRestyWAF(disableLuaRestyWAF bool, mode string) bool {
if !disableLuaRestyWAF && len(mode) > 0 {
return true
}
return false
}
func buildLuaSharedDictionaries(s interface{}, disableLuaRestyWAF bool) string {
servers, ok := s.([]*ingress.Server)
if !ok {
klog.Errorf("expected an '[]*ingress.Server' type but %T was returned", s)
return ""
}
out := []string{
"lua_shared_dict configuration_data 15M",
"lua_shared_dict certificate_data 16M",
}
if !disableLuaRestyWAF {
luaRestyWAFEnabled := func() bool {
for _, server := range servers {
for _, location := range server.Locations {
if len(location.LuaRestyWAF.Mode) > 0 {
return true
}
}
}
return false
}()
if luaRestyWAFEnabled {
out = append(out, "lua_shared_dict waf_storage 64M")
}
}
return strings.Join(out, ";\n\r") + ";"
}
func buildResolversForLua(res interface{}, disableIpv6 interface{}) string {
nss, ok := res.([]net.IP)
if !ok {
klog.Errorf("expected a '[]net.IP' type but %T was returned", res)
return ""
}
no6, ok := disableIpv6.(bool)
if !ok {
klog.Errorf("expected a 'bool' type but %T was returned", disableIpv6)
return ""
}
if len(nss) == 0 {
return ""
}
r := []string{}
for _, ns := range nss {
if ing_net.IsIPV6(ns) {
if no6 {
continue
}
r = append(r, fmt.Sprintf("\"[%v]\"", ns))
} else {
r = append(r, fmt.Sprintf("\"%v\"", ns))
}
}
return strings.Join(r, ", ")
}
// configForLua returns some general configuration as Lua table represented as string
func configForLua(input interface{}) string {
all, ok := input.(config.TemplateConfig)
if !ok {
klog.Errorf("expected a 'config.TemplateConfig' type but %T was given", input)
return "{}"
}
return fmt.Sprintf(`{
use_forwarded_headers = %t,
is_ssl_passthrough_enabled = %t,
http_redirect_code = %v,
listen_ports = { ssl_proxy = "%v", https = "%v" },
}`, all.Cfg.UseForwardedHeaders, all.IsSSLPassthroughEnabled, all.Cfg.HTTPRedirectCode, all.ListenPorts.SSLProxy, all.ListenPorts.HTTPS)
}
// locationConfigForLua formats some location specific configuration into Lua table represented as string
func locationConfigForLua(l interface{}, s interface{}, a interface{}) string {
location, ok := l.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was given", l)
return "{}"
}
server, ok := s.(*ingress.Server)
if !ok {
klog.Errorf("expected an '*ingress.Server' type but %T was given", s)
return "{}"
}
all, ok := a.(config.TemplateConfig)
if !ok {
klog.Errorf("expected a 'config.TemplateConfig' type but %T was given", a)
return "{}"
}
forceSSLRedirect := location.Rewrite.ForceSSLRedirect || (len(server.SSLCert.PemFileName) > 0 && location.Rewrite.SSLRedirect)
forceSSLRedirect = forceSSLRedirect && !isLocationInLocationList(l, all.Cfg.NoTLSRedirectLocations)
return fmt.Sprintf(`{
force_ssl_redirect = %t,
use_port_in_redirects = %t,
}`, forceSSLRedirect, location.UsePortInRedirects)
}
// buildResolvers returns the resolvers reading the /etc/resolv.conf file
func buildResolvers(res interface{}, disableIpv6 interface{}) string {
// NGINX need IPV6 addresses to be surrounded by brackets
nss, ok := res.([]net.IP)
if !ok {
klog.Errorf("expected a '[]net.IP' type but %T was returned", res)
return ""
}
no6, ok := disableIpv6.(bool)
if !ok {
klog.Errorf("expected a 'bool' type but %T was returned", disableIpv6)
return ""
}
if len(nss) == 0 {
return ""
}
r := []string{"resolver"}
for _, ns := range nss {
if ing_net.IsIPV6(ns) {
if no6 {
continue
}
r = append(r, fmt.Sprintf("[%v]", ns))
} else {
r = append(r, fmt.Sprintf("%v", ns))
}
}
r = append(r, "valid=30s")
if no6 {
r = append(r, "ipv6=off")
}
return strings.Join(r, " ") + ";"
}
func needsRewrite(location *ingress.Location) bool {
if len(location.Rewrite.Target) > 0 && location.Rewrite.Target != location.Path {
return true
}
return false
}
func stripLocationModifer(path string) string {
return strings.TrimLeft(path, "~* ")
}
// enforceRegexModifier checks if the "rewrite-target" or "use-regex" annotation
// is used on any location path within a server
func enforceRegexModifier(input interface{}) bool {
locations, ok := input.([]*ingress.Location)
if !ok {
klog.Errorf("expected an '[]*ingress.Location' type but %T was returned", input)
return false
}
for _, location := range locations {
if needsRewrite(location) || location.Rewrite.UseRegex {
return true
}
}
return false
}
// buildLocation produces the location string, if the ingress has redirects
// (specified through the nginx.ingress.kubernetes.io/rewrite-target annotation)
func buildLocation(input interface{}, enforceRegex bool) string {
location, ok := input.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return slash
}
path := location.Path
if enforceRegex {
return fmt.Sprintf(`~* "^%s"`, path)
}
return path
}
func buildAuthLocation(input interface{}, globalExternalAuthURL string) string {
location, ok := input.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return ""
}
if (location.ExternalAuth.URL == "") && (!shouldApplyGlobalAuth(input, globalExternalAuthURL)) {
return ""
}
str := base64.URLEncoding.EncodeToString([]byte(location.Path))
// removes "=" after encoding
str = strings.Replace(str, "=", "", -1)
return fmt.Sprintf("/_external-auth-%v", str)
}
// shouldApplyGlobalAuth returns true only in case when ExternalAuth.URL is not set and
// GlobalExternalAuth is set and enabled
func shouldApplyGlobalAuth(input interface{}, globalExternalAuthURL string) bool {
location, ok := input.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
}
if (location.ExternalAuth.URL == "") && (globalExternalAuthURL != "") && (location.EnableGlobalAuth) {
return true
}
return false
}
func buildAuthResponseHeaders(headers []string) []string {
res := []string{}
if len(headers) == 0 {
return res
}
for i, h := range headers {
hvar := strings.ToLower(h)
hvar = strings.NewReplacer("-", "_").Replace(hvar)
res = append(res, fmt.Sprintf("auth_request_set $authHeader%v $upstream_http_%v;", i, hvar))
res = append(res, fmt.Sprintf("proxy_set_header '%v' $authHeader%v;", h, i))
}
return res
}
func buildLogFormatUpstream(input interface{}) string {
cfg, ok := input.(config.Configuration)
if !ok {
klog.Errorf("expected a 'config.Configuration' type but %T was returned", input)
return ""
}
return cfg.BuildLogFormatUpstream()
}
// buildProxyPass produces the proxy pass string, if the ingress has redirects
// (specified through the nginx.ingress.kubernetes.io/rewrite-target annotation)
// If the annotation nginx.ingress.kubernetes.io/add-base-url:"true" is specified it will
// add a base tag in the head of the response from the service
func buildProxyPass(host string, b interface{}, loc interface{}) string {
backends, ok := b.([]*ingress.Backend)
if !ok {
klog.Errorf("expected an '[]*ingress.Backend' type but %T was returned", b)
return ""
}
location, ok := loc.(*ingress.Location)
if !ok {
klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return ""
}
path := location.Path
proto := "http://"
proxyPass := "proxy_pass"
switch location.BackendProtocol {
case "HTTPS":
proto = "https://"
case "GRPC":
proto = "grpc://"
proxyPass = "grpc_pass"
case "GRPCS":
proto = "grpcs://"
proxyPass = "grpc_pass"
case "AJP":
proto = ""
proxyPass = "ajp_pass"
}
upstreamName := "upstream_balancer"
for _, backend := range backends {
if backend.Name == location.Backend {
if backend.SSLPassthrough {
proto = "https://"
if location.BackendProtocol == "GRPCS" {
proto = "grpcs://"
}
}
break
}
}
// defProxyPass returns the default proxy_pass, just the name of the upstream
defProxyPass := fmt.Sprintf("%v %s%s;", proxyPass, proto, upstreamName)
// if the path in the ingress rule is equals to the target: no special rewrite
if path == location.Rewrite.Target {
return defProxyPass
}
if len(location.Rewrite.Target) > 0 {
var xForwardedPrefix string
if len(location.XForwardedPrefix) > 0 {
xForwardedPrefix = fmt.Sprintf("proxy_set_header X-Forwarded-Prefix \"%s\";\n", location.XForwardedPrefix)
}
return fmt.Sprintf(`
rewrite "(?i)%s" %s break;
%v%v %s%s;`, path, location.Rewrite.Target, xForwardedPrefix, proxyPass, proto, upstreamName)
}
// default proxy_pass
return defProxyPass
}
// TODO: Needs Unit Tests
func filterRateLimits(input interface{}) []ratelimit.Config {
ratelimits := []ratelimit.Config{}
found := sets.String{}
servers, ok := input.([]*ingress.Server)
if !ok {
klog.Errorf("expected a '[]ratelimit.RateLimit' type but %T was returned", input)
return ratelimits
}
for _, server := range servers {
for _, loc := range server.Locations {
if loc.RateLimit.ID != "" && !found.Has(loc.RateLimit.ID) {
found.Insert(loc.RateLimit.ID)
ratelimits = append(ratelimits, loc.RateLimit)
}
}
}
return ratelimits
}
// TODO: Needs Unit Tests
// buildRateLimitZones produces an array of limit_conn_zone in order to allow
// rate limiting of request. Each Ingress rule could have up to three zones, one
// for connection limit by IP address, one for limiting requests per minute, and
// one for limiting requests per second.
func buildRateLimitZones(input interface{}) []string {
zones := sets.String{}
servers, ok := input.([]*ingress.Server)
if !ok {
klog.Errorf("expected a '[]*ingress.Server' type but %T was returned", input)
return zones.List()
}
for _, server := range servers {
for _, loc := range server.Locations {
if loc.RateLimit.Connections.Limit > 0 {
zone := fmt.Sprintf("limit_conn_zone $limit_%s zone=%v:%vm;",
loc.RateLimit.ID,
loc.RateLimit.Connections.Name,
loc.RateLimit.Connections.SharedSize)
if !zones.Has(zone) {
zones.Insert(zone)
}
}
if loc.RateLimit.RPM.Limit > 0 {
zone := fmt.Sprintf("limit_req_zone $limit_%s zone=%v:%vm rate=%vr/m;",
loc.RateLimit.ID,
loc.RateLimit.RPM.Name,
loc.RateLimit.RPM.SharedSize,
loc.RateLimit.RPM.Limit)
if !zones.Has(zone) {
zones.Insert(zone)
}
}
if loc.RateLimit.RPS.Limit > 0 {
zone := fmt.Sprintf("limit_req_zone $limit_%s zone=%v:%vm rate=%vr/s;",
loc.RateLimit.ID,
loc.RateLimit.RPS.Name,
loc.RateLimit.RPS.SharedSize,
loc.RateLimit.RPS.Limit)
if !zones.Has(zone) {
zones.Insert(zone)
}
}
}
}
return zones.List()
}
// buildRateLimit produces an array of limit_req to be used inside the Path of
// Ingress rules. The order: connections by IP first, then RPS, and RPM last.
func buildRateLimit(input interface{}) []string {
limits := []string{}
loc, ok := input.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return limits
}
if loc.RateLimit.Connections.Limit > 0 {
limit := fmt.Sprintf("limit_conn %v %v;",
loc.RateLimit.Connections.Name, loc.RateLimit.Connections.Limit)
limits = append(limits, limit)
}
if loc.RateLimit.RPS.Limit > 0 {
limit := fmt.Sprintf("limit_req zone=%v burst=%v nodelay;",
loc.RateLimit.RPS.Name, loc.RateLimit.RPS.Burst)
limits = append(limits, limit)
}
if loc.RateLimit.RPM.Limit > 0 {
limit := fmt.Sprintf("limit_req zone=%v burst=%v nodelay;",
loc.RateLimit.RPM.Name, loc.RateLimit.RPM.Burst)
limits = append(limits, limit)
}
if loc.RateLimit.LimitRateAfter > 0 {
limit := fmt.Sprintf("limit_rate_after %vk;",
loc.RateLimit.LimitRateAfter)
limits = append(limits, limit)
}
if loc.RateLimit.LimitRate > 0 {
limit := fmt.Sprintf("limit_rate %vk;",
loc.RateLimit.LimitRate)
limits = append(limits, limit)
}
return limits
}
func isLocationInLocationList(location interface{}, rawLocationList string) bool {
loc, ok := location.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", location)
return false
}
locationList := strings.Split(rawLocationList, ",")
for _, locationListItem := range locationList {
locationListItem = strings.Trim(locationListItem, " ")
if locationListItem == "" {
continue
}
if strings.HasPrefix(loc.Path, locationListItem) {
return true
}
}
return false
}
func isLocationAllowed(input interface{}) bool {
loc, ok := input.(*ingress.Location)
if !ok {
klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return false
}
return loc.Denied == nil
}
var (
denyPathSlugMap = map[string]string{}
)
// buildDenyVariable returns a nginx variable for a location in a
// server to be used in the whitelist check
// This method uses a unique id generator library to reduce the
// size of the string to be used as a variable in nginx to avoid
// issue with the size of the variable bucket size directive
func buildDenyVariable(a interface{}) string {
l, ok := a.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", a)
return ""
}
if _, ok := denyPathSlugMap[l]; !ok {
denyPathSlugMap[l] = randomString()
}
return fmt.Sprintf("$deny_%v", denyPathSlugMap[l])
}
func buildUpstreamName(loc interface{}) string {
location, ok := loc.(*ingress.Location)
if !ok {
klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return ""
}
upstreamName := location.Backend
return upstreamName
}
func buildNextUpstream(i, r interface{}) string {
nextUpstream, ok := i.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", i)
return ""
}
retryNonIdempotent := r.(bool)
parts := strings.Split(nextUpstream, " ")
nextUpstreamCodes := make([]string, 0, len(parts))
for _, v := range parts {
if v != "" && v != nonIdempotent {
nextUpstreamCodes = append(nextUpstreamCodes, v)
}
if v == nonIdempotent {
retryNonIdempotent = true
}
}
if retryNonIdempotent {
nextUpstreamCodes = append(nextUpstreamCodes, nonIdempotent)
}
return strings.Join(nextUpstreamCodes, " ")
}
// refer to http://nginx.org/en/docs/syntax.html
// Nginx differentiates between size and offset
// offset directives support gigabytes in addition
var nginxSizeRegex = regexp.MustCompile("^[0-9]+[kKmM]{0,1}$")
var nginxOffsetRegex = regexp.MustCompile("^[0-9]+[kKmMgG]{0,1}$")
// isValidByteSize validates size units valid in nginx
// http://nginx.org/en/docs/syntax.html
func isValidByteSize(input interface{}, isOffset bool) bool {
s, ok := input.(string)
if !ok {
klog.Errorf("expected an 'string' type but %T was returned", input)
return false
}
s = strings.TrimSpace(s)
if s == "" {
klog.V(2).Info("empty byte size, hence it will not be set")
return false
}
if isOffset {
return nginxOffsetRegex.MatchString(s)
}
return nginxSizeRegex.MatchString(s)
}
type ingressInformation struct {
Namespace string
Rule string
Service string
Annotations map[string]string
}
func (info *ingressInformation) Equal(other *ingressInformation) bool {
if info.Namespace != other.Namespace {
return false
}
if info.Rule != other.Rule {
return false
}
if info.Service != other.Service {
return false
}
if !reflect.DeepEqual(info.Annotations, other.Annotations) {
return false
}
return true
}
func getIngressInformation(i, h, p interface{}) *ingressInformation {
ing, ok := i.(*ingress.Ingress)
if !ok {
klog.Errorf("expected an '*ingress.Ingress' type but %T was returned", i)
return &ingressInformation{}
}
hostname, ok := h.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", h)
return &ingressInformation{}
}
path, ok := p.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", p)
return &ingressInformation{}
}
if ing == nil {
return &ingressInformation{}
}
info := &ingressInformation{
Namespace: ing.GetNamespace(),
Rule: ing.GetName(),
Annotations: ing.Annotations,
}
if ing.Spec.Backend != nil {
info.Service = ing.Spec.Backend.ServiceName
}
for _, rule := range ing.Spec.Rules {
if rule.HTTP == nil {
continue
}
if hostname != "" && hostname != rule.Host {
continue
}
for _, rPath := range rule.HTTP.Paths {
if path == rPath.Path {
info.Service = rPath.Backend.ServiceName
return info
}
}
}
return info
}
func buildForwardedFor(input interface{}) string {
s, ok := input.(string)
if !ok {
klog.Errorf("expected a 'string' type but %T was returned", input)
return ""
}
ffh := strings.Replace(s, "-", "_", -1)
ffh = strings.ToLower(ffh)
return fmt.Sprintf("$http_%v", ffh)
}
func buildAuthSignURL(input interface{}) string {
s, ok := input.(string)
if !ok {
klog.Errorf("expected an 'string' type but %T was returned", input)
return ""
}
u, _ := url.Parse(s)
q := u.Query()
if len(q) == 0 {
return fmt.Sprintf("%v?rd=$pass_access_scheme://$http_host$escaped_request_uri", s)
}
if q.Get("rd") != "" {
return s
}
return fmt.Sprintf("%v&rd=$pass_access_scheme://$http_host$escaped_request_uri", s)
}
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func init() {
rand.Seed(time.Now().UnixNano())
}
func randomString() string {
b := make([]rune, 32)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
func buildOpentracing(input interface{}) string {
cfg, ok := input.(config.Configuration)
if !ok {
klog.Errorf("expected a 'config.Configuration' type but %T was returned", input)
return ""
}
if !cfg.EnableOpentracing {
return ""
}
buf := bytes.NewBufferString("")
if cfg.ZipkinCollectorHost != "" {
buf.WriteString("opentracing_load_tracer /usr/local/lib/libzipkin_opentracing.so /etc/nginx/opentracing.json;")
} else if cfg.JaegerCollectorHost != "" {
buf.WriteString("opentracing_load_tracer /usr/local/lib/libjaegertracing_plugin.so /etc/nginx/opentracing.json;")
} else if cfg.DatadogCollectorHost != "" {
buf.WriteString("opentracing_load_tracer /usr/local/lib/libdd_opentracing.so /etc/nginx/opentracing.json;")
}
buf.WriteString("\r\n")
return buf.String()
}
// buildInfluxDB produces the single line configuration
// needed by the InfluxDB module to send request's metrics
// for the current resource
func buildInfluxDB(input interface{}) string {
cfg, ok := input.(influxdb.Config)
if !ok {
klog.Errorf("expected an 'influxdb.Config' type but %T was returned", input)
return ""
}
if !cfg.InfluxDBEnabled {
return ""
}
return fmt.Sprintf(
"influxdb server_name=%s host=%s port=%s measurement=%s enabled=true;",
cfg.InfluxDBServerName,
cfg.InfluxDBHost,
cfg.InfluxDBPort,
cfg.InfluxDBMeasurement,
)
}
func proxySetHeader(loc interface{}) string {
location, ok := loc.(*ingress.Location)
if !ok {
klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return "proxy_set_header"
}
if location.BackendProtocol == "GRPC" || location.BackendProtocol == "GRPCS" {
return "grpc_set_header"
}
return "proxy_set_header"
}
// buildCustomErrorDeps is a utility function returning a struct wrapper with
// the data required to build the 'CUSTOM_ERRORS' template
func buildCustomErrorDeps(upstreamName string, errorCodes []int, enableMetrics bool) interface{} {
return struct {
UpstreamName string
ErrorCodes []int
EnableMetrics bool
}{
UpstreamName: upstreamName,
ErrorCodes: errorCodes,
EnableMetrics: enableMetrics,
}
}
type errorLocation struct {
UpstreamName string
Codes []int
}
// buildCustomErrorLocationsPerServer is a utility function which will collect all
// custom error codes for all locations of a server block, deduplicates them,
// and returns a set which is unique by default-upstream and error code. It returns an array
// of errorLocations, each of which contain the upstream name and a list of
// error codes for that given upstream, so that sufficiently unique
// @custom error location blocks can be created in the template
func buildCustomErrorLocationsPerServer(input interface{}) interface{} {
server, ok := input.(*ingress.Server)
if !ok {
klog.Errorf("expected a '*ingress.Server' type but %T was returned", input)
return nil
}
codesMap := make(map[string]map[int]bool)
for _, loc := range server.Locations {
backendUpstream := loc.DefaultBackendUpstreamName
var dedupedCodes map[int]bool
if existingMap, ok := codesMap[backendUpstream]; ok {
dedupedCodes = existingMap
} else {
dedupedCodes = make(map[int]bool)
}
for _, code := range loc.CustomHTTPErrors {
dedupedCodes[code] = true
}
codesMap[backendUpstream] = dedupedCodes
}
errorLocations := []errorLocation{}
for upstream, dedupedCodes := range codesMap {
codesForUpstream := []int{}
for code := range dedupedCodes {
codesForUpstream = append(codesForUpstream, code)
}
sort.Ints(codesForUpstream)
errorLocations = append(errorLocations, errorLocation{
UpstreamName: upstream,
Codes: codesForUpstream,
})
}
sort.Slice(errorLocations, func(i, j int) bool {
return errorLocations[i].UpstreamName < errorLocations[j].UpstreamName
})
return errorLocations
}
func opentracingPropagateContext(loc interface{}) string {
location, ok := loc.(*ingress.Location)
if !ok {
klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return "opentracing_propagate_context"
}
if location.BackendProtocol == "GRPC" || location.BackendProtocol == "GRPCS" {
return "opentracing_grpc_propagate_context"
}
return "opentracing_propagate_context"
}
// shouldLoadModSecurityModule determines whether or not the ModSecurity module needs to be loaded.
// First, it checks if `enable-modsecurity` is set in the ConfigMap. If it is not, it iterates over all locations to
// check if ModSecurity is enabled by the annotation `nginx.ingress.kubernetes.io/enable-modsecurity`.
func shouldLoadModSecurityModule(c interface{}, s interface{}) bool {
cfg, ok := c.(config.Configuration)
if !ok {
klog.Errorf("expected a 'config.Configuration' type but %T was returned", c)
return false
}
servers, ok := s.([]*ingress.Server)
if !ok {
klog.Errorf("expected an '[]*ingress.Server' type but %T was returned", s)
return false
}
// Determine if ModSecurity is enabled globally.
if cfg.EnableModsecurity {
return true
}
// If ModSecurity is not enabled globally, check if any location has it enabled via annotation.
for _, server := range servers {
for _, location := range server.Locations {
if location.ModSecurity.Enable {
return true
}
}
}
// Not enabled globally nor via annotation on a location, no need to load the module.
return false
}
| []
| []
| []
| [] | [] | go | 0 | 0 | |
src/main/java/Entry.java | package org.nlogo.extension.r;
/*
This file is part of NetLogo-R-Extension.
Contact: jthiele at gwdg.de
Copyright (C) 2009-2012 Jan C. Thiele
NetLogo-R-Extension is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with NetLogo-R-Extension. If not, see <http://www.gnu.org/licenses/>.
Linking this library statically or dynamically with other modules is making a combined work based on this library.
Thus, the terms and conditions of the GNU General Public License cover the whole combination.
As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable,
regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice,
provided that you also meet, for each linked independent module, the terms and conditions of the license of that module.
An independent module is a module which is not derived from or based on this library.
If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so.
If you do not wish to do so, delete this exception statement from your version.
*/
import com.sun.jna.Library;
import com.sun.jna.Native;
import java.io.IOException;
import java.lang.reflect.*;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.Permission;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import javax.swing.JOptionPane;
import org.nlogo.api.Argument;
import org.nlogo.api.Command;
import org.nlogo.api.Context;
import org.nlogo.api.ExtensionException;
import org.nlogo.api.LogoException;
import org.nlogo.api.Reporter;
import org.nlogo.core.Syntax;
import org.nlogo.core.SyntaxJ;
import org.nlogo.workspace.ExtensionManager$;
import org.rosuda.REngine.*;
/**
* Class to provide access to Gnu R from NetLogo. Contains definitions of NetLogo primitives.
*
* @author JC Thiele
* @version 1.0beta
*/
public class Entry extends org.nlogo.api.DefaultClassManager {
/**
* Object of type ExtensionManager to store the R console instance. Needed because the dynamic
* library (jri.dll/jri.so) can't be loaded twice. Because NetLogo can't cast an Object stored in
* the ExtensionManager via ex.storeObject/ex.retrieveObject to classes and interfaces other than
* java natives and NetLogo ones, using NetLogo interfaces is one (dirty) way, to store the
* ShellWindow object.
*/
private static org.nlogo.api.ExtensionManager shellwin;
/** Object containing the connection to R */
public static HoldRengineX rConn = null;
/**
* Object to synchronize console input/execution
*//*private*/ public static ConsoleSync rSync = new ConsoleSync();
static String osName = System.getProperty("os.name").toLowerCase();
public interface LibC extends Library {
public int setenv(String name, String value, int overwrite);
}
public interface WinLibC extends Library {
public int _putenv_s(String key, String value);
}
public interface WinLib32 extends Library {
public int SetDllDirectoryA(String directory);
}
static LibC libc = null;
static WinLibC winLibc = null;
static WinLib32 winLib32 = null;
static {
try {
if (osName.startsWith("windows", 0)) {
String msvcrLibName =
System.getProperty("org.nlogo.r.extension.msvcr.lib.name", "msvcr120");
winLibc = (WinLibC) Native.loadLibrary(msvcrLibName, WinLibC.class);
winLib32 = (WinLib32) Native.loadLibrary("kernel32", WinLib32.class);
} else {
libc = (LibC) Native.loadLibrary("c", LibC.class);
}
} catch (Throwable t) {
System.err.println("Error loading native library: " + t.getMessage());
t.printStackTrace();
}
}
Configuration configuration = null;
/**
* Method executed when extension is loaded and only then. Initializes the connection to R and
* ShellWindow or loads the stored ShellWindow instance from storage.
*
* @param em an instance of ExtensionManager, handled by NetLogo
*/
public void runOnce(org.nlogo.api.ExtensionManager em) throws ExtensionException {
configuration = Configuration.fromRExtensionProperties();
Path rHome = validateRHome();
try {
// dynamically load of the needed JARs from the JRI package
loadJRILibraries(findJRIHomePath(configuration), rHome);
org.rosuda.REngine.REngine lastEngine = org.rosuda.REngine.REngine.getLastEngine();
// if no further REnginer was initialized
// Check for headless mode
// if NetLogo running headless, do not create interactiveShell and REngineCallbacks
// Don't forget to call "stop" in the model!
if (System.getProperty("java.awt.headless", "false") == "true"
|| System.getProperty("org.nlogo.preferHeadless") == "true") {
rConn = headlessREngine();
addRLibPaths(rConn, false);
} else {
// NetLogo running in GUI mode
if (lastEngine == null) {
rConn = guiREngine();
addRLibPaths(rConn, true);
em.storeObject(shellwin);
}
// otherwise, reload the last REngine object and retrieve the stored ShellWindow object
else {
// this will also create a new Environment
rConn = new HoldRengineX(lastEngine);
shellwin = (org.nlogo.api.ExtensionManager) em.retrieveObject();
}
}
} catch (UnsatisfiedLinkError ex) {
throw new ExtensionException("Error loading JRI library (Error #03): \n" + ex);
} catch (ExtensionException ex) {
throw ex;
} catch (Exception ex) {
throw new ExtensionException(
"Error in R-Extension: Error in runOnce (Error #04): \n" + ex, ex);
}
}
/** Returns the headless R engine */
HoldRengineX headlessREngine() throws ExtensionException {
try {
Class<?> rengineClass = Class.forName("org.rosuda.REngine.JRI.JRIEngine");
Class<?> callbacks_class = Class.forName("org.rosuda.REngine.REngineCallbacks");
Method thisMethod = rengineClass.getDeclaredMethod("createEngine");
REngine rToStore = (REngine) thisMethod.invoke(rengineClass);
return new HoldRengineX(rToStore);
} catch (Exception ex) {
throw new ExtensionException("Error Initializing Headless R Extension (Error #04).\n", ex);
}
}
private static class ExitTrappedException extends SecurityException {}
static final SecurityManager exitPreventingSecurityManager =
new SecurityManager() {
public void checkPermission(Permission permission) {
if (permission.getName().startsWith("exitVM")) {
throw new ExitTrappedException();
}
}
};
/** Returns the GUI R engine */
HoldRengineX guiREngine() throws ExtensionException {
// We do a bit of a song and dance here because JRIEngine.createEngine invokes
// a method that will perform a hard exit (System.exit(1)) if it can't find the
// appropriate library. This is exactly the sort of error we would like to report
// to the user. So we install a security manager that will prevent exiting and
// raise an exception if someone tries it, then we catch that exception and
// percolate the error to the user.
try {
Class<?> iashell_class = Class.forName("org.nlogo.extension.r.ShellWindow");
Class<?> partypes1[] = new Class<?>[] {ConsoleSync.class};
Constructor<?> shellConstructor = iashell_class.getConstructor(partypes1);
Object arglist1[] = new Object[] {rSync};
Object shell = shellConstructor.newInstance(arglist1);
org.nlogo.api.ExtensionManager tc = (org.nlogo.api.ExtensionManager) shell;
shellwin = tc;
Class<?> rengineClass = Class.forName("org.rosuda.REngine.JRI.JRIEngine");
Class<?> callbacks_class = Class.forName("org.rosuda.REngine.REngineCallbacks");
Class<?> partypes[] = new Class<?>[] {String[].class, callbacks_class, boolean.class};
Object arglist[] = new Object[] {new String[] {"--no-save"}, tc, true};
Method thisMethod = rengineClass.getDeclaredMethod("createEngine", partypes);
System.setSecurityManager(exitPreventingSecurityManager);
REngine rToStore = (REngine) thisMethod.invoke(rengineClass, arglist);
return new HoldRengineX(rToStore);
} catch (ExitTrappedException ex) {
throw new ExtensionException("Could not load R libraries. (Error #06)\n", ex);
} catch (ClassNotFoundException ex) {
throw new ExtensionException("Error initializing R extension. (Error #04)\n" + ex, ex);
} catch (NoSuchMethodException ex) {
throw new ExtensionException("Error initializing R extension. (Error #04)\n" + ex, ex);
} catch (IllegalAccessException ex) {
throw new ExtensionException("Error initializing R extension. (Error #04)\n" + ex, ex);
} catch (InstantiationException ex) {
throw new ExtensionException("Error initializing R extension. (Error #04)\n" + ex, ex);
} catch (InvocationTargetException ex) {
if (ex.getCause() instanceof ExitTrappedException) {
throw new ExtensionException("Could not load R libraries. (Error #06)\n", ex);
} else {
throw new ExtensionException(
"Error initializing R extension. (Error #04)\n" + ex + " " + ex.getCause(), ex);
}
} finally {
System.setSecurityManager(null);
}
}
/**
* Validates that R_HOME is set to a valid path, sets it from property if not set in environment.
*/
public Path validateRHome() throws ExtensionException {
String rHomeEnv = System.getenv("R_HOME");
if (rHomeEnv == null || rHomeEnv.isEmpty()) {
if (!Files.exists(configuration.rHomePath())) {
throw new ExtensionException(
"Could not find R Home. Please set R home in the environment or in user.properties (Error #01)\n");
}
int setResult = 0;
try {
if (osName.startsWith("windows", 0) && winLibc != null) {
setResult =
winLibc._putenv_s("R_HOME", configuration.rHomePath().toAbsolutePath().toString());
} else {
setResult =
libc.setenv("R_HOME", configuration.rHomePath().toAbsolutePath().toString(), 1);
}
} catch (Exception e) {
setResult = -1;
}
if (setResult != 0) throw new ExtensionException("Error setting R_HOME (#05).\n");
} else {
Path rHomePath = Paths.get(System.getenv("R_HOME"));
if (!Files.exists(rHomePath)) {
throw new ExtensionException(
"Could not find R at: "
+ System.getenv("R_HOME")
+ " . Please set R home in the environment or in user.properties (Error #01)\n");
}
configuration.setRHomePath(rHomePath);
}
return configuration.rHomePath();
}
static Path findJRIHomePath(Configuration configuration) throws ExtensionException {
return configuration
.jriHomePaths()
.stream()
.filter(path -> Files.exists(path.resolve("JRI.jar")))
.findFirst()
.orElseThrow(
() ->
new ExtensionException(
"Cannot locate rJava/JRI. Please check the location of your rJava installation and add a user.properties file in the r extension directory (Error #02).\n"));
}
/** Adds standard JRI Libraries */
void loadJRILibraries(Path jriHomePath, Path rHome) throws ExtensionException {
List<String> jarList = Arrays.asList(new String[] {"JRI.jar", "REngine.jar", "JRIEngine.jar"});
for (String jar : jarList) {
try {
JavaLibraryPath.addFile(jriHomePath.resolve(jar).toFile());
} catch (IOException ex) {
throw new ExtensionException("Error loading JRI Libraries (Error #04)\n", ex);
}
}
Path jriLib = jriHomePath;
Path jri64Lib = jriHomePath.resolve("x64");
Path jri32Lib = jriHomePath.resolve("i386");
Optional<Path> rLibPath = Optional.empty();
String dataModel = System.getProperty("sun.arch.data.model", "?");
if (Files.exists(jri64Lib) && dataModel.contains("64")) {
jriLib = jri64Lib;
if (winLib32 != null) {
rLibPath = Optional.of(rHome.resolve("bin/x64"));
}
} else if (Files.exists(jri32Lib) && dataModel.contains("32")) {
jriLib = jri32Lib;
if (winLib32 != null) {
rLibPath = Optional.of(rHome.resolve("bin/i386"));
}
}
try {
try {
JavaLibraryPath.addLibraryPath(jriLib.toFile());
} catch (Exception ex) {
throw new ExtensionException(
"Error Initializing R Extension: could not add JRI to library path (Error #04).\n", ex);
}
rLibPath.ifPresent(libPath -> winLib32.SetDllDirectoryA(libPath.toAbsolutePath().toString()));
} catch (UnsatisfiedLinkError localUnsatisfiedLinkError) {
throw new ExtensionException(
"Cannot load rJava libraries. Please check your rJava installation. (Error #03)\n"
+ localUnsatisfiedLinkError);
}
}
private void addRLibPaths(HoldRengineX rConn, boolean guiPresent) {
try {
if (! configuration.rLibPaths().isEmpty()) {
StringBuilder pathsString = new StringBuilder("c(");
for (Path p : configuration.rLibPaths()) {
// if the file path has backslashes, we need to escape them. If this isn't done, we get a hard
// crash on Windows RG 2017-2-24
pathsString.append("'" + p.toString().replace("\\", "\\\\") + "',");
}
pathsString.deleteCharAt(pathsString.length() - 1);
pathsString.append(")");
rConn.execute(
rConn.rConnection, ".libPaths(" + pathsString.toString() + ")", rConn.WorkingEnvironment, true);
}
} catch (Exception ex) {
if (guiPresent) {
JOptionPane.showMessageDialog(
null,
"Error while configuring r library paths: " + ex,
"Error in R-Extension",
JOptionPane.INFORMATION_MESSAGE);
} else {
System.err.println("Error while configuring r library paths, continuing: " + ex);
}
}
}
/**
* Method to define the NetLogo primitives.
*
* @param primManager an instance of PrimitiveManager, handled by NetLogo
*/
public void load(org.nlogo.api.PrimitiveManager primManager) {
primManager.addPrimitive("put", new Put());
primManager.addPrimitive("putNamedList", new PutNamedList());
primManager.addPrimitive("putList", new PutList());
primManager.addPrimitive("putDataframe", new PutDataframe());
primManager.addPrimitive("putAgent", new PutAgent());
primManager.addPrimitive("putAgentDf", new PutAgentDataFrame());
primManager.addPrimitive("eval", new Eval());
primManager.addPrimitive("__evalDirect", new EvalDirect());
primManager.addPrimitive("get", new Get());
primManager.addPrimitive("gc", new GC());
primManager.addPrimitive("clear", new ClearWorkspace());
primManager.addPrimitive("clearLocal", new ClearLocalWorkspace());
primManager.addPrimitive("interactiveShell", new interactiveShell());
primManager.addPrimitive("setPlotDevice", new SetPlotDevice());
primManager.addPrimitive("stop", new Stop());
primManager.addPrimitive("r-home", new DebugPrim(new RPath()));
primManager.addPrimitive("jri-path", new DebugPrim(new JRIPath()));
}
@FunctionalInterface
interface DebugSupplier {
public String get() throws ExtensionException;
}
class RPath implements DebugSupplier {
@Override
public String get() throws ExtensionException {
return configuration.rHomePath().toString();
}
}
class JRIPath implements DebugSupplier {
@Override
public String get() throws ExtensionException {
return findJRIHomePath(configuration).toString();
}
}
public static class DebugPrim implements Reporter {
final DebugSupplier supplier;
public DebugPrim(DebugSupplier getValue) {
supplier = getValue;
}
public Syntax getSyntax() {
return SyntaxJ.reporterSyntax(Syntax.StringType());
}
public Object report(Argument args[], Context context)
throws ExtensionException, LogoException {
return supplier.get();
}
}
/**
* Class to stop the R connection. Needed for (true) headless runs ("java.awt.headless" ==
* "true"). (Implementation of the primitive stop)
*
* @since new in Version 1.1
*/
public static class Stop implements Command {
public Syntax getSyntax() {
return SyntaxJ.commandSyntax(new int[] {});
}
public String getAgentClassString() {
return "OTPL";
}
public void perform(Argument args[], Context context) throws ExtensionException, LogoException {
try {
if (System.getProperty("java.awt.headless", "false") == "true") {
rConn.rConnection.close();
}
} catch (Exception ex) {
throw new ExtensionException("Error in R-Extension: Error in stop: \n" + ex);
}
}
}
/**
* Class to setup the JavaGD plot device. (Implementation of the primitive setPlotDevice)
*
* @since new in Version 1.0beta
*/
public static class SetPlotDevice implements Command {
public Syntax getSyntax() {
return SyntaxJ.commandSyntax(new int[] {});
}
public String getAgentClassString() {
return "OTPL";
}
public void perform(Argument args[], Context context) throws ExtensionException, LogoException {
try {
if (!System.getProperty("java.awt.headless", "false").equals("true")) {
shellwin.storeObject(null);
}
} catch (Exception ex) {
throw new ExtensionException("Error in R-Extension: Error in setPlotDevice: \n" + ex, ex);
}
}
}
/**
* Class to setup InteractiveShell. (Implementation of the primitive interactiveShell)
*
* @since new in Version 0.3
*/
public static class interactiveShell implements Command {
public Syntax getSyntax() {
return SyntaxJ.commandSyntax(new int[] {});
}
public String getAgentClassString() {
return "OTPL";
}
public void perform(Argument args[], Context context) throws ExtensionException, LogoException {
try {
if (!System.getProperty("java.awt.headless", "false").equals("true")) {
if (!Entry.shellwin.anyExtensionsLoaded()) {
Entry.shellwin.finishFullCompilation();
}
}
} catch (Exception ex) {
throw new ExtensionException(
"Error in R-Extension: Error in interactiveShell: \n" + ex, ex);
}
}
}
/**
* Class to create a new Vector from Agent-Variables. (Implementation of the primitive putAgent)
*/
public static class PutAgent implements Command {
public Syntax getSyntax() {
return SyntaxJ.commandSyntax(
new int[] {
Syntax.StringType(),
Syntax.AgentsetType() | Syntax.AgentType(),
Syntax.StringType() | Syntax.RepeatableType()
});
}
public String getAgentClassString() {
return "OTPL";
}
public void perform(Argument args[], Context context) throws ExtensionException, LogoException {
try {
rConn.AssignAgentsetorAgent(args, false);
} catch (Exception ex) {
throw new ExtensionException("Error in R-Extension: Error in PutAgent: \n" + ex, ex);
}
}
}
/**
* Class to create a new R-DataFrame from Agent-Variables. (Implementation of the primitive
* putAgentDf)
*/
public static class PutAgentDataFrame implements Command {
public Syntax getSyntax() {
return SyntaxJ.commandSyntax(
new int[] {
Syntax.StringType(),
Syntax.AgentsetType() | Syntax.AgentType(),
Syntax.StringType() | Syntax.RepeatableType()
});
}
public String getAgentClassString() {
return "OTPL";
}
public void perform(Argument args[], Context context) throws ExtensionException, LogoException {
try {
rConn.AssignAgentsetorAgent(args, true);
} catch (Exception ex) {
throw new ExtensionException("Error in R-Extension: Error in PutAgentDf: \n" + ex, ex);
}
}
}
/**
* Class to create a new R-DataFrame from NetLogo-Values. (Implementation of the primitive
* putDataframe)
*/
public static class PutDataframe implements Command {
public Syntax getSyntax() {
return SyntaxJ.commandSyntax(
new int[] {Syntax.StringType(), Syntax.WildcardType() | Syntax.RepeatableType()});
}
public String getAgentClassString() {
return "OTPL";
}
public void perform(Argument args[], Context context) throws ExtensionException, LogoException {
try {
java.util.Vector<String> names = new java.util.Vector<String>();
org.rosuda.REngine.RList rlist = new RList();
for (int i = 0; i < args.length - 2; i += 2) {
names.add(args[i + 1].getString());
rlist.add(rConn.resolveNLObject(args[i + 2].get()));
}
rlist.names = names;
rConn.rConnection.assign(
args[0].getString(),
org.rosuda.REngine.REXP.createDataFrame(rlist),
rConn.WorkingEnvironment);
;
// clean up
names.clear();
names = null;
rlist = null;
//System.gc();
//System.gc();
} catch (Exception ex) {
throw new ExtensionException("Error in R-Extension: Error in PutDataFrame: \n" + ex, ex);
}
}
}
/** Class to create a new R-List from NetLogo-Values. (Implementation of the primitive putList) */
public static class PutList implements Command {
public Syntax getSyntax() {
return SyntaxJ.commandSyntax(
new int[] {Syntax.StringType(), Syntax.WildcardType() | Syntax.RepeatableType()});
}
public String getAgentClassString() {
return "OTPL";
}
public void perform(Argument args[], Context context) throws ExtensionException, LogoException {
try {
java.util.Vector<String> names = new java.util.Vector<String>();
org.rosuda.REngine.RList rlist = new RList();
for (int i = 0; i < args.length - 1; i++) {
names.add(((Integer) i).toString());
rlist.add(rConn.resolveNLObject(args[i + 1].get()));
}
rlist.names = names;
rConn.rConnection.assign(
args[0].getString(), new REXPGenericVector(rlist), rConn.WorkingEnvironment);
// clean up
names.clear();
names = null;
rlist = null;
//System.gc();
//System.gc();
} catch (Exception ex) {
throw new ExtensionException("Error in R-Extension: Error in PutVector: \n" + ex, ex);
}
}
}
/**
* Class to create a new named R-List from NetLogo-Values. (Implementation of the primitive
* putNamedList)
*/
public static class PutNamedList implements Command {
public Syntax getSyntax() {
return SyntaxJ.commandSyntax(
new int[] {Syntax.StringType(), Syntax.WildcardType() | Syntax.RepeatableType()});
}
public String getAgentClassString() {
return "OTPL";
}
public void perform(Argument args[], Context context) throws ExtensionException, LogoException {
try {
java.util.Vector<String> names = new java.util.Vector<String>();
org.rosuda.REngine.RList rlist = new RList();
for (int i = 0; i < args.length - 2; i += 2) {
names.add(args[i + 1].getString());
rlist.add(rConn.resolveNLObject(args[i + 2].get()));
}
rlist.names = names;
rConn.rConnection.assign(
args[0].getString(), new REXPGenericVector(rlist), rConn.WorkingEnvironment);
// clean up
names.clear();
names = null;
rlist = null;
//System.gc();
//System.gc();
} catch (Exception ex) {
throw new ExtensionException("Error in R-Extension: Error in PutNamedList: \n" + ex, ex);
}
}
}
/**
* Class to create a new R-Variable/Array from NetLogo-Values. (Implementation of the primitive
* put)
*/
public static class Put implements Command {
public Syntax getSyntax() {
return SyntaxJ.commandSyntax(new int[] {Syntax.StringType(), Syntax.WildcardType()});
}
public String getAgentClassString() {
return "OTPL";
}
public void perform(Argument args[], Context context) throws ExtensionException, LogoException {
try {
REXP val = rConn.resolveNLObject(args[1].get());
rConn.rConnection.assign(args[0].getString(), val, rConn.WorkingEnvironment);
val = null;
//System.gc();
//System.gc();
} catch (Exception ex) {
throw new ExtensionException("Error in R-Extension: Error in Put. \n" + ex, ex);
}
}
}
/**
* Class to evaluate submitted String in R without results. (Implementation of the primitive eval)
*/
public static class Eval implements Command {
public Syntax getSyntax() {
return SyntaxJ.commandSyntax(new int[] {Syntax.StringType()});
}
public String getAgentClassString() {
return "OTPL";
}
public void perform(Argument args[], Context context) throws ExtensionException, LogoException {
try {
REXP returnVal =
rConn.execute(rConn.rConnection, args[0].getString(), rConn.WorkingEnvironment, true);
returnVal = null;
} catch (Exception ex) {
throw new ExtensionException("Error in R-Extension: Error in Eval: \n" + ex, ex);
}
}
}
/**
* Class to evaluate submitted String directly in R Console without results. (Implementation of
* the primitive evalDirect) Some packages (e.g. ggplot2) doesn't work with eval
*/
public static class EvalDirect implements Command {
public Syntax getSyntax() {
return SyntaxJ.commandSyntax(new int[] {Syntax.StringType()});
}
public String getAgentClassString() {
return "OTPL";
}
public void perform(Argument args[], Context context) throws ExtensionException, LogoException {
try {
String[] cmdArray = args[0].getString().split("\n");
String c = null;
for (int i = 0; i < cmdArray.length; i++) {
c = cmdArray[i];
Entry.rSync.triggerNotification(c.trim());
// clean up
c = null;
}
//REXP returnVal = rConn.execute(rConn.rConnection, args[0].getString(), rConn.WorkingEnvironment, true);
// clean up
cmdArray = null;
} catch (Exception ex) {
throw new ExtensionException("Error in R-Extension: Error in Eval: \n" + ex, ex);
}
}
}
/**
* Class to evaluate submitted String in R, and send back the results to NetLogo. (Implementation
* of the primitive get)
*/
public static class Get implements Reporter {
public Syntax getSyntax() {
return SyntaxJ.reporterSyntax(new int[] {Syntax.StringType()}, Syntax.WildcardType());
}
public Object report(Argument args[], Context context)
throws ExtensionException, LogoException {
try {
REXP returnVal =
rConn.execute(rConn.rConnection, args[0].getString(), rConn.WorkingEnvironment, true);
Object retObj = rConn.returnObject(returnVal);
// clean up
returnVal = null;
return retObj;
//return rConn.returnObject(returnVal);
} catch (Exception ex) {
throw new ExtensionException("Error in R-Extension: Error in Get. \n" + ex, ex);
}
}
}
/**
* Class to perform Java and R Garbage Collection. (Implementation of the primitive javagc)
*
* @since new in version 1.2
*/
public static class GC implements Command {
public Syntax getSyntax() {
return SyntaxJ.commandSyntax(new int[] {});
}
public String getAgentClassString() {
return "OTPL";
}
public void perform(Argument args[], Context context) throws ExtensionException, LogoException {
try {
System.gc();
rConn.execute(rConn.rConnection, "gc(reset=T)", rConn.WorkingEnvironment, true);
rConn.rConnection.parseAndEval("gc(reset=T)");
} catch (Exception ex) {
throw new ExtensionException("Error in R-Extension: Error in GC: \n" + ex, ex);
}
}
}
/** Class to clear R workspace. (Implementation of the primitive clear) */
public static class ClearWorkspace implements Command {
public Syntax getSyntax() {
return SyntaxJ.commandSyntax(new int[] {});
}
public String getAgentClassString() {
return "OTPL";
}
public void perform(Argument args[], Context context) throws ExtensionException, LogoException {
try {
System.gc();
REXP returnVal =
rConn.execute(rConn.rConnection, "rm(list=ls())", rConn.WorkingEnvironment, true);
returnVal = null;
rConn.rConnection.parseAndEval("rm(list=ls())");
rConn.rConnection.parseAndEval("gc(reset=T)");
System.gc();
rConn.sendEnvironmentToGlobal();
} catch (Exception ex) {
throw new ExtensionException("Error in R-Extension: Error in ClearWorkspace: \n" + ex, ex);
}
}
}
/** Class to clear local (nl.env) R workspace. (Implementation of the primitive clear) */
public static class ClearLocalWorkspace implements Command {
public Syntax getSyntax() {
return SyntaxJ.commandSyntax(new int[] {});
}
public String getAgentClassString() {
return "OTPL";
}
public void perform(Argument args[], Context context) throws ExtensionException, LogoException {
try {
System.gc();
REXP returnVal =
rConn.execute(rConn.rConnection, "rm(list=ls())", rConn.WorkingEnvironment, true);
returnVal = null;
REXP returnVal2 =
rConn.execute(rConn.rConnection, "gc(reset=T)", rConn.WorkingEnvironment, true);
returnVal2 = null;
System.gc();
} catch (Exception ex) {
throw new ExtensionException(
"Error in R-Extension: Error in ClearLocalWorkspace: \n" + ex, ex);
}
}
}
/**
* Method executed when extension is unloaded. Clears the R workspace, destroy MessageWindow (if
* created) and reset Debugging.
*/
public void unload() throws ExtensionException {
// run unload only when NetLogo is runnning in GUI mode
if (!System.getProperty("java.awt.headless", "false").equals("true")) {
// clear workspace
try {
// clear workspace
System.gc();
REXP returnVal =
rConn.execute(rConn.rConnection, "rm(list=ls())", rConn.WorkingEnvironment, true);
returnVal = null;
rConn.rConnection.parseAndEval("rm(list=ls())");
rConn.rConnection.parseAndEval("gc(reset=T)");
System.gc();
} catch (Exception ex) {
throw new ExtensionException("Error in R-Extension: Error in unload: \n" + ex, ex);
}
try {
// check if ShellWindow is open - if so, close it...
if (Entry.shellwin.anyExtensionsLoaded()) {
Entry.shellwin.reset();
}
} catch (Exception ex) {
throw new ExtensionException(
"Error in R-Extension: Error in making interactiveShell invisible: \n" + ex, ex);
}
}
}
}
| [
"\"R_HOME\"",
"\"R_HOME\"",
"\"R_HOME\""
]
| []
| [
"R_HOME"
]
| [] | ["R_HOME"] | java | 1 | 0 | |
tradebot/tests/manual/test_trade_environment.py | import logging
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logging.getLogger('tensorflow').disabled = True
from tradebot.environments.trade_environment import TradeEnvironment
import pandas as pd
from datetime import datetime
from stable_baselines.common.policies import MlpLstmPolicy
from stable_baselines import PPO2
from stable_baselines.common.vec_env import DummyVecEnv
def test_trade_environment():
# Drop csv file in tests/data
data = pd.read_csv(os.path.join(
os.path.dirname(os.path.abspath(__file__)), '../data/btcusd.csv'))
# print(data)
data = data.drop(['time'], axis=1)
n = len(data)
split_point = int(n*.8)
train = data.iloc[:split_point]
test = data.iloc[split_point:]
train_env = TradeEnvironment(train, transaction_fee=0.0026, episode_length=1000)
train_env = DummyVecEnv([lambda: train_env])
model = PPO2(MlpLstmPolicy, train_env, nminibatches=1)
model.learn(total_timesteps=10000)
test_env = TradeEnvironment(test, transaction_fee=0.0026, episode_length=1000)
test_env = DummyVecEnv([lambda: test_env])
obs = test_env.reset()
done = False
cum_rewards = 0
while not done:
action, _states = model.predict(obs)
obs, reward, done, info = test_env.step(action)
print(obs, reward)
cum_rewards += reward
test_env.render()
print(cum_rewards)
if __name__ == '__main__':
test_trade_environment() | []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
qa/rpc-tests/test_framework/test_framework.py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
)
from .authproxy import JSONRPCException
class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
initialize_chain(self.options.tmpdir, self.num_nodes)
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave infinitecoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop infinitecoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing infinitecoind/infinitecoin-cli (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# backup dir variable for removal at cleanup
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
os.makedirs(self.options.tmpdir, exist_ok=False)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyError as e:
print("key not found: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
else:
print("Note: infinitecoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if not os.listdir(self.options.root):
os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("INFINITECOIND", "infinitecoind"),
help="infinitecoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("INFINITECOIND", "infinitecoind"),
help="infinitecoind binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| []
| []
| [
"PATH",
"INFINITECOIND"
]
| [] | ["PATH", "INFINITECOIND"] | python | 2 | 0 | |
audio_landmarks3d/lm.py | import numpy as np
import cv2
import wave
import subprocess
import os, base64
# landmarks connections
cons = [[48, 49], [49, 50], [50, 51], [51, 52], [52, 53], [53, 54], [54, 55], [55, 56], [56, 57],
[57, 58], [58, 59], [59, 48], [60, 61], [61, 62], [62, 63], [63, 64], [64, 65], [65, 66],
[66, 67], [67, 60], [27, 28], [28, 29], [29, 30], [30, 31], [30, 35], [31, 32], [32, 33],
[33, 34], [34, 35], [27, 31], [27, 35], [17, 18], [18, 19], [19, 20], [20, 21],
[22, 23], [23, 24], [24, 25], [25, 26], [36, 37], [37, 38], [38, 39], [39, 40], [40, 41],
[36, 41], [43, 44], [44, 45], [45, 46], [46, 47], [42, 47], [0, 1], [1, 2], [2, 3], [3, 4],
[4, 5], [5, 6], [6, 7], [7, 8], [8, 9], [9, 10], [10, 11], [11, 12],
[12, 13], [13, 14], [14, 15], [15, 16]]
sr = 8000 # assumes: 8khz mono
num_frames = 7
increment = sr * 0.04 # 25 fps
W,H = 400,400; # drawing
net = cv2.dnn.readNet("model.onnx")
mean_shape = np.load("mean_shape.npy")
eigen_vectors = np.load("eigen_vectors.npy").T
def animate(wfile):
w = wave.open(wfile,"rb")
n = w.getnframes()
b = w.readframes(n)
a = np.frombuffer(b,np.int16)
a = np.array(a,np.float32)
a /= 0x7ffff
a /= a.max()
sample_len = int(num_frames * increment)
sample_pos = int(0)
vid = cv2.VideoWriter("my.avi",cv2.VideoWriter_fourcc(*'MJPG'), 25.0, (W,H))
while (sample_pos < n - sample_len):
data = a[int(sample_pos):int(sample_pos+sample_len)].reshape(1,1,sample_len)
sample_pos += increment;
net.setInput(data)
res = net.forward()
pts = mean_shape.copy()
for i in range(eigen_vectors.shape[0]):
pts[0,i] += res.dot(eigen_vectors[i,:])
pts = pts.reshape(68,3) # 204==68*3
img = np.ones((H,W,3),np.uint8)
img[:,:] = (127,127,127)
for i in range(pts.shape[0]):
x = int(pts[i,0] * W*2 + W/2)
y = int(pts[i,1] * H*2 + H/2)
cv2.circle(img, (x,y), 3, (50,50,255), -1)
for c in cons:
x1 = int(pts[c[0],0] * W*2 + W/2)
y1 = int(pts[c[0],1] * H*2 + H/2)
x2 = int(pts[c[1],0] * W*2 + W/2)
y2 = int(pts[c[1],1] * H*2 + H/2)
cv2.line(img,(x1,y1),(x2,y2),(20,20,180),1)
vid.write(img)
cv2.imshow("draw", img)
cv2.waitKey(6)
vid.release()
#cv2.waitKey();
cmd = 'ffmpeg -y -i my.avi -i '+wfile+' -c:v h264 -c:a aac -strict experimental res_.mp4'
subprocess.call(cmd, shell=True)
animate("S2.wav")
"""
def application(environ, start_response):
request_body=None
retcode = '200 OK'
resp = "dummy\r\n"
ct ="text/html"
try:
request_body_size = int(environ.get('CONTENT_LENGTH', 0))
request_body = environ['wsgi.input'].read(request_body_size)
except (ValueError):
resp = "no response"
url = environ['PATH_INFO'];
if url == "/":
resp = _read("up.html")
elif url == "/dn":
ct = 'image/png'
resp = _read("my.png")
elif url == "/up" and request_body:
ct = 'image/png'
resp = request_body.replace('data:' + ct + ';base64,', "")
data = base64.b64decode(resp)
buf = np.frombuffer(data, dtype=np.uint8)
img = cv2.imdecode(buf, 1)
img = process(img)
cv2.imwrite("my.png", img)
ok, enc = cv2.imencode(".png", img)
resp = base64.b64encode(enc.tostring())
resp = 'data:' + ct + ';base64,' + resp
start_response(retcode, [('Content-Type', ct), ('Content-Length', str(len(resp)))])
return [resp]
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('0.0.0.0', int(os.environ.get("PORT", 9000)), application)
while True: httpd.handle_request()
""" | []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
qa/rpc-tests/maxblocksinflight.py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import logging
'''
In this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. We
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
'''
MAX_REQUESTS = 128
class TestManager(NodeConnCB):
# set up NodeConnCB callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# Log the requests
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
self.log = logging.getLogger("BlockRelayTest")
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
self.connection.rpc.generate(1) # Leave IBD
numBlocksToGenerate = [8, 16, 128, 1024]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1 << 256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
# Wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
print("Round %d: success (total requests: %d)" % (count, total_requests))
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("ZOCD", "zerooned"),
help="Binary to test max block requests behavior")
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager()
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start() # Start up network handling in another thread
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
| []
| []
| [
"ZOCD"
]
| [] | ["ZOCD"] | python | 1 | 0 | |
chengtay/cmd/example-client/main.go | package main
import (
"encoding/json"
"fmt"
"github.com/ChengtayChain/ChengtayChain/chengtay/types"
"github.com/ChengtayChain/ChengtayChain/crypto/ed25519"
tmos "github.com/ChengtayChain/ChengtayChain/libs/os"
"github.com/ChengtayChain/ChengtayChain/libs/rand"
"github.com/ChengtayChain/ChengtayChain/privval"
ctypes "github.com/ChengtayChain/ChengtayChain/rpc/core/types"
"github.com/mitchellh/go-homedir"
"os"
"time"
rpcClient "github.com/ChengtayChain/ChengtayChain/rpc/client/http"
)
func init() {
rand.Seed(time.Now().Unix())
}
func main() {
// get TMHOME
var tmhome string
{
tmhome = os.Getenv("TMHOME")
if len(tmhome) == 0 {
homeDir, err := homedir.Dir()
if err != nil {
panic(err)
}
tmhome = homeDir + string(os.PathSeparator) + ".chengtaychain"
}
}
// get private key
var privKey ed25519.PrivKeyEd25519
{
privValiKeyFile := tmhome + string(os.PathSeparator) + "config" + string(os.PathSeparator) + "priv_validator_key.json"
privValiStateFile := tmhome + string(os.PathSeparator) + "data" + string(os.PathSeparator) + "priv_validator_state.json"
var pv *privval.FilePV
if tmos.FileExists(privValiKeyFile) {
pv = privval.LoadFilePV(privValiKeyFile, privValiStateFile)
} else {
panic(fmt.Errorf("file not found. " + privValiKeyFile))
}
privKey = pv.Key.PrivKey.(ed25519.PrivKeyEd25519)
}
var address = "http://127.0.0.1:26657" // cfg.DefaultRPCConfig().ListenAddress
var client *rpcClient.HTTP
{
var err error
client, err = rpcClient.New(address, "/websocket")
if err != nil {
panic(err)
}
}
for {
rawTransaction := randomRawTransaction(privKey)
ret, err := sendRawTransaction(client, rawTransaction)
if err != nil {
fmt.Println(err.Error())
// ignore the error
} else {
fmt.Printf("%+v\n", ret)
}
time.Sleep(500 * time.Millisecond)
}
}
func sendRawTransaction(client *rpcClient.HTTP, rawTransaction types.RawTransaction) (*ctypes.ResultBroadcastTx, error) {
bytes, err := json.Marshal(&rawTransaction)
if err != nil {
panic(err)
}
ret, err := client.BroadcastTxSync(bytes)
if err != nil {
return ret, err
}
return ret, nil
}
func randomStorageItem() (item types.StorageItem) {
item.CarID = types.ID(rand.Str(128))
item.Timestamp = uint64(time.Now().Unix())
item.ContentType = "whatever"
item.Content = []byte(rand.Str(1 + rand.Intn(32768))) // https://github.com/tendermint/tendermint/pull/5215
item.StorageItemID = types.ID(rand.Str(128))
return item
}
func randomMerkleNode() (merkleNode types.IMerkleNode) {
node := types.StorageItemMerkleNode(randomStorageItem())
return &node
}
func randomMerkleTree() (merkleTree types.IMerkleTree) {
merkleTree = &types.MerkleTree{}
n := merkleTree.GetCapacity()
for i := 0; i < n; i++ {
err := merkleTree.SetMerkleNode(i, randomMerkleNode())
if err != nil {
panic(err)
}
}
_, err := merkleTree.GetMerkleRoot()
if err != nil {
panic(err)
}
return merkleTree
}
func randomRawTransaction(privKey ed25519.PrivKeyEd25519) (rawTransaction types.RawTransaction) {
treeNum := rand.Intn(20)
trees := make([]types.IMerkleTree, 0)
for i := 0; i < treeNum; i++ {
trees = append(trees, randomMerkleTree())
}
value := types.MerkleRootTransactionValue{
Timestamp: uint64(time.Now().Unix()),
Items: make([]types.MerkleRootTransactionItem, 0),
}
bytes256 := rand.Bytes(256)
copy(value.Nonce[:256], bytes256[:256])
for i := 0; i < treeNum; i++ {
root, err := trees[i].GetMerkleRoot()
if err != nil {
panic(err)
}
value.Items = append(value.Items, root)
}
valueBytes, err := json.Marshal(value)
if err != nil {
panic(err)
}
valueBytesHash := types.DefaultHashProvider.Digest(valueBytes)
valueBytesHashSig, err := privKey.Sign(valueBytesHash)
if err != nil {
panic(err)
}
rawTransaction.Type = types.TransactionMerkleroot
rawTransaction.PublicKey = privKey.PubKey().(ed25519.PubKeyEd25519)
rawTransaction.Value = valueBytes
rawTransaction.ValueHash = valueBytesHash
rawTransaction.ValueHashSignature = valueBytesHashSig
return rawTransaction
}
| [
"\"TMHOME\""
]
| []
| [
"TMHOME"
]
| [] | ["TMHOME"] | go | 1 | 0 | |
cmd/server/server.go | package main
import (
"fmt"
"html"
"log"
"net/http"
"os"
)
func main() {
port := os.Getenv("PORT")
if port == "" {
log.Fatal("$PORT must be set")
}
log.Printf("starting")
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
})
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), nil))
log.Printf("shutting down")
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
components/camel-xpath/src/main/java/org/apache/camel/language/xpath/MessageVariableResolver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.language.xpath;
import java.util.HashMap;
import java.util.Map;
import javax.xml.namespace.QName;
import javax.xml.xpath.XPathVariableResolver;
import org.apache.camel.Exchange;
import org.apache.camel.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.camel.support.builder.Namespaces.ENVIRONMENT_VARIABLES;
import static org.apache.camel.support.builder.Namespaces.EXCHANGE_PROPERTY;
import static org.apache.camel.support.builder.Namespaces.IN_NAMESPACE;
import static org.apache.camel.support.builder.Namespaces.OUT_NAMESPACE;
import static org.apache.camel.support.builder.Namespaces.SYSTEM_PROPERTIES_NAMESPACE;
/**
* A variable resolver for XPath expressions which support properties on the message, exchange as well as making system
* properties and environment properties available.
* <p/>
* Implementations of this resolver must be thread safe
*/
public class MessageVariableResolver implements XPathVariableResolver {
private static final Logger LOG = LoggerFactory.getLogger(MessageVariableResolver.class);
private Map<String, Object> variables = new HashMap<>();
private final ThreadLocal<Exchange> exchange;
public MessageVariableResolver(ThreadLocal<Exchange> exchange) {
this.exchange = exchange;
}
@Override
public Object resolveVariable(QName name) {
String uri = name.getNamespaceURI();
String localPart = name.getLocalPart();
Object answer = null;
Message in = exchange.get().getIn();
if (uri == null || uri.length() == 0) {
answer = variables.get(localPart);
if (answer == null) {
Message message = in;
if (message != null) {
answer = message.getHeader(localPart);
}
if (answer == null) {
answer = exchange.get().getProperty(localPart);
}
}
} else if (uri.equals(SYSTEM_PROPERTIES_NAMESPACE)) {
try {
answer = System.getProperty(localPart);
} catch (Exception e) {
LOG.debug("Security exception evaluating system property: " + localPart + ". Reason: " + e, e);
}
} else if (uri.equals(ENVIRONMENT_VARIABLES)) {
answer = System.getenv().get(localPart);
} else if (uri.equals(EXCHANGE_PROPERTY)) {
answer = exchange.get().getProperty(localPart);
} else if (uri.equals(IN_NAMESPACE)) {
answer = in.getHeader(localPart);
if (answer == null && localPart.equals("body")) {
answer = in.getBody();
}
} else if (uri.equals(OUT_NAMESPACE)) {
if (exchange.get().hasOut()) {
Message out = exchange.get().getOut();
answer = out.getHeader(localPart);
if (answer == null && localPart.equals("body")) {
answer = out.getBody();
}
}
}
// if we can't find an answer we must return an empty String.
// if we return null, then the JDK default XPathEngine will throw an exception
if (answer == null) {
return "";
} else {
return answer;
}
}
public void addVariable(String localPart, Object value) {
variables.put(localPart, value);
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
acceptance-tests/actors/bbl.go | package actors
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"strings"
"time"
acceptance "github.com/cloudfoundry/bosh-bootloader/acceptance-tests"
"github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
)
type BBL struct {
stateDirectory string
pathToBBL string
configuration acceptance.Config
envID string
}
func NewBBL(stateDirectory string, pathToBBL string, configuration acceptance.Config, envIDSuffix string) BBL {
envIDPrefix := os.Getenv("BBL_TEST_ENV_ID_PREFIX")
if envIDPrefix == "" {
envIDPrefix = "bbl-test"
}
return BBL{
stateDirectory: stateDirectory,
pathToBBL: pathToBBL,
configuration: configuration,
envID: fmt.Sprintf("%s-%s", envIDPrefix, envIDSuffix),
}
}
func (b BBL) PredefinedEnvID() string {
return b.envID
}
func (b BBL) Up(additionalArgs ...string) *gexec.Session {
args := []string{
"--state-dir", b.stateDirectory,
"--debug",
"up",
}
args = append(args, additionalArgs...)
return b.execute(args, os.Stdout, os.Stderr)
}
func (b BBL) Plan(additionalArgs ...string) *gexec.Session {
args := []string{
"--state-dir", b.stateDirectory,
"--debug",
"plan",
}
args = append(args, additionalArgs...)
return b.execute(args, os.Stdout, os.Stderr)
}
func (b BBL) Rotate() *gexec.Session {
return b.execute([]string{
"--state-dir", b.stateDirectory,
"--debug",
"rotate",
}, os.Stdout, os.Stderr)
}
func (b BBL) Destroy() *gexec.Session {
return b.execute([]string{
"--state-dir", b.stateDirectory,
"--debug",
"destroy",
"--no-confirm",
}, os.Stdout, os.Stderr)
}
func (b BBL) Down() *gexec.Session {
return b.execute([]string{
"--state-dir", b.stateDirectory,
"--debug",
"down",
"--no-confirm",
}, os.Stdout, os.Stderr)
}
func (b BBL) CleanupLeftovers(filter string) *gexec.Session {
return b.execute([]string{
"--state-dir", b.stateDirectory,
"cleanup-leftovers",
"--filter", filter,
"--no-confirm",
}, os.Stdout, os.Stderr)
}
func (b BBL) Lbs() string {
return b.fetchValue("lbs")
}
func (b BBL) DirectorUsername() string {
return b.fetchValue("director-username")
}
func (b BBL) DirectorPassword() string {
return b.fetchValue("director-password")
}
func (b BBL) DirectorAddress() string {
return b.fetchValue("director-address")
}
func (b BBL) DirectorCACert() string {
return b.fetchValue("director-ca-cert")
}
func (b BBL) JumpboxAddress() string {
return b.fetchValue("jumpbox-address")
}
func (b BBL) SSHKey() string {
return b.fetchValue("ssh-key")
}
func (b BBL) DirectorSSHKey() string {
return b.fetchValue("director-ssh-key")
}
func (b BBL) EnvID() string {
return b.fetchValue("env-id")
}
func (b BBL) PrintEnv() string {
return b.fetchValue("print-env")
}
func (b BBL) LatestError() string {
return b.fetchValue("latest-error")
}
func (b BBL) SaveDirectorCA() string {
stdout := bytes.NewBuffer([]byte{})
session := b.execute([]string{
"--state-dir", b.stateDirectory,
"director-ca-cert",
}, stdout, os.Stderr)
Eventually(session, 10*time.Minute).Should(gexec.Exit(0))
file, err := ioutil.TempFile("", "")
defer file.Close()
Expect(err).NotTo(HaveOccurred())
file.Write(stdout.Bytes())
return file.Name()
}
func (b BBL) ExportBoshAllProxy() string {
lines := strings.Split(b.PrintEnv(), "\n")
for _, line := range lines {
if strings.Contains(line, "export BOSH_ALL_PROXY=") {
keyValueParts := strings.Split(line, "export BOSH_ALL_PROXY=")
if len(keyValueParts) > 1 {
os.Setenv("BOSH_ALL_PROXY", keyValueParts[1])
return keyValueParts[1]
}
}
}
return ""
}
func (b BBL) StartSSHTunnel() *gexec.Session {
printEnvLines := strings.Split(b.PrintEnv(), "\n")
os.Setenv("BOSH_ALL_PROXY", getExport("BOSH_ALL_PROXY", printEnvLines))
var sshArgs []string
for i := 0; i < len(printEnvLines); i++ {
if strings.HasPrefix(printEnvLines[i], "ssh ") {
sshCmd := strings.TrimPrefix(printEnvLines[i], "ssh ")
sshCmd = strings.Replace(sshCmd, "$JUMPBOX_PRIVATE_KEY", getExport("JUMPBOX_PRIVATE_KEY", printEnvLines), -1)
sshCmd = strings.Replace(sshCmd, "-f ", "", -1)
sshArgs = strings.Split(sshCmd, " ")
}
}
cmd := exec.Command("ssh", sshArgs...)
sshSession, err := gexec.Start(cmd, ginkgo.GinkgoWriter, ginkgo.GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
return sshSession
}
func getExport(keyName string, lines []string) string {
for _, line := range lines {
if strings.HasPrefix(line, fmt.Sprintf("export %s", keyName)) {
parts := strings.Split(line, " ")
keyValue := parts[1]
keyValueParts := strings.Split(keyValue, "=")
return keyValueParts[1]
}
}
return ""
}
func (b BBL) fetchValue(value string) string {
args := []string{
"--state-dir", b.stateDirectory,
value,
}
stdout := bytes.NewBuffer([]byte{})
stderr := bytes.NewBuffer([]byte{})
b.execute(args, stdout, stderr).Wait(30 * time.Second)
return strings.TrimSpace(string(stdout.Bytes()))
}
func (b BBL) execute(args []string, stdout io.Writer, stderr io.Writer) *gexec.Session {
cmd := exec.Command(b.pathToBBL, args...)
session, err := gexec.Start(cmd, stdout, stderr)
Expect(err).NotTo(HaveOccurred())
return session
}
| [
"\"BBL_TEST_ENV_ID_PREFIX\""
]
| []
| [
"BBL_TEST_ENV_ID_PREFIX"
]
| [] | ["BBL_TEST_ENV_ID_PREFIX"] | go | 1 | 0 | |
Data/Juliet-Java/Juliet-Java-v103/000/129/077/CWE190_Integer_Overflow__int_Environment_add_74a.java | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE190_Integer_Overflow__int_Environment_add_74a.java
Label Definition File: CWE190_Integer_Overflow__int.label.xml
Template File: sources-sinks-74a.tmpl.java
*/
/*
* @description
* CWE: 190 Integer Overflow
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded non-zero, non-min, non-max, even number
* Sinks: add
* GoodSink: Ensure there will not be an overflow before adding 1 to data
* BadSink : Add 1 to data, which can cause an overflow
* Flow Variant: 74 Data flow: data passed in a HashMap from one method to another in different source files in the same package
*
* */
import java.util.HashMap;
import java.util.logging.Level;
public class CWE190_Integer_Overflow__int_Environment_add_74a extends AbstractTestCase
{
public void bad() throws Throwable
{
int data;
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
HashMap<Integer,Integer> dataHashMap = new HashMap<Integer,Integer>();
dataHashMap.put(0, data);
dataHashMap.put(1, data);
dataHashMap.put(2, data);
(new CWE190_Integer_Overflow__int_Environment_add_74b()).badSink(dataHashMap );
}
public void good() throws Throwable
{
goodG2B();
goodB2G();
}
/* goodG2B() - use GoodSource and BadSink */
private void goodG2B() throws Throwable
{
int data;
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
HashMap<Integer,Integer> dataHashMap = new HashMap<Integer,Integer>();
dataHashMap.put(0, data);
dataHashMap.put(1, data);
dataHashMap.put(2, data);
(new CWE190_Integer_Overflow__int_Environment_add_74b()).goodG2BSink(dataHashMap );
}
/* goodB2G() - use BadSource and GoodSink */
private void goodB2G() throws Throwable
{
int data;
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
HashMap<Integer,Integer> dataHashMap = new HashMap<Integer,Integer>();
dataHashMap.put(0, data);
dataHashMap.put(1, data);
dataHashMap.put(2, data);
(new CWE190_Integer_Overflow__int_Environment_add_74b()).goodB2GSink(dataHashMap );
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
| [
"\"ADD\"",
"\"ADD\""
]
| []
| [
"ADD"
]
| [] | ["ADD"] | java | 1 | 0 | |
kubernetes/client.go | package kubernetes
import (
"fmt"
"net"
"os"
osapps_v1 "github.com/openshift/api/apps/v1"
osproject_v1 "github.com/openshift/api/project/v1"
osroutes_v1 "github.com/openshift/api/route/v1"
apps_v1 "k8s.io/api/apps/v1"
auth_v1 "k8s.io/api/authorization/v1"
batch_v1 "k8s.io/api/batch/v1"
batch_v1beta1 "k8s.io/api/batch/v1beta1"
core_v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/version"
kube "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
kialiConfig "github.com/kiali/kiali/config"
"github.com/kiali/kiali/log"
)
var (
emptyListOptions = meta_v1.ListOptions{}
emptyGetOptions = meta_v1.GetOptions{}
)
type PodLogs struct {
Logs string `json:"logs,omitempty"`
}
// IstioClientInterface for mocks (only mocked function are necessary here)
type IstioClientInterface interface {
CreateIstioObject(api, namespace, resourceType, json string) (IstioObject, error)
DeleteIstioObject(api, namespace, resourceType, name string) error
GetAdapter(namespace, adapterType, adapterName string) (IstioObject, error)
GetAdapters(namespace, labelSelector string) ([]IstioObject, error)
GetAuthorizationDetails(namespace string) (*RBACDetails, error)
GetCronJobs(namespace string) ([]batch_v1beta1.CronJob, error)
GetDeployment(namespace string, deploymentName string) (*apps_v1.Deployment, error)
GetDeployments(namespace string) ([]apps_v1.Deployment, error)
GetDeploymentsByLabel(namespace string, labelSelector string) ([]apps_v1.Deployment, error)
GetDeploymentConfig(namespace string, deploymentconfigName string) (*osapps_v1.DeploymentConfig, error)
GetDeploymentConfigs(namespace string) ([]osapps_v1.DeploymentConfig, error)
GetDestinationRule(namespace string, destinationrule string) (IstioObject, error)
GetDestinationRules(namespace string, serviceName string) ([]IstioObject, error)
GetEndpoints(namespace string, serviceName string) (*core_v1.Endpoints, error)
GetGateway(namespace string, gateway string) (IstioObject, error)
GetGateways(namespace string) ([]IstioObject, error)
GetIstioDetails(namespace string, serviceName string) (*IstioDetails, error)
GetIstioRule(namespace string, istiorule string) (IstioObject, error)
GetIstioRules(namespace string, labelSelector string) ([]IstioObject, error)
GetJobs(namespace string) ([]batch_v1.Job, error)
GetNamespace(namespace string) (*core_v1.Namespace, error)
GetNamespaces(labelSelector string) ([]core_v1.Namespace, error)
GetPod(namespace, name string) (*core_v1.Pod, error)
GetPodLogs(namespace, name string, opts *core_v1.PodLogOptions) (*PodLogs, error)
GetPods(namespace, labelSelector string) ([]core_v1.Pod, error)
GetProject(project string) (*osproject_v1.Project, error)
GetProjects(labelSelector string) ([]osproject_v1.Project, error)
GetQuotaSpec(namespace string, quotaSpecName string) (IstioObject, error)
GetQuotaSpecs(namespace string) ([]IstioObject, error)
GetQuotaSpecBinding(namespace string, quotaSpecBindingName string) (IstioObject, error)
GetQuotaSpecBindings(namespace string) ([]IstioObject, error)
GetReplicationControllers(namespace string) ([]core_v1.ReplicationController, error)
GetReplicaSets(namespace string) ([]apps_v1.ReplicaSet, error)
GetRoute(namespace string, name string) (*osroutes_v1.Route, error)
GetSidecar(namespace string, sidecar string) (IstioObject, error)
GetSidecars(namespace string) ([]IstioObject, error)
GetSelfSubjectAccessReview(namespace, api, resourceType string, verbs []string) ([]*auth_v1.SelfSubjectAccessReview, error)
GetService(namespace string, serviceName string) (*core_v1.Service, error)
GetServices(namespace string, selectorLabels map[string]string) ([]core_v1.Service, error)
GetServiceEntries(namespace string) ([]IstioObject, error)
GetServiceEntry(namespace string, serviceEntryName string) (IstioObject, error)
GetStatefulSet(namespace string, statefulsetName string) (*apps_v1.StatefulSet, error)
GetStatefulSets(namespace string) ([]apps_v1.StatefulSet, error)
GetTemplate(namespace, templateType, templateName string) (IstioObject, error)
GetTemplates(namespace, labelSelector string) ([]IstioObject, error)
GetPolicy(namespace string, policyName string) (IstioObject, error)
GetPolicies(namespace string) ([]IstioObject, error)
GetMeshPolicy(policyName string) (IstioObject, error)
GetMeshPolicies() ([]IstioObject, error)
GetClusterRbacConfig(name string) (IstioObject, error)
GetClusterRbacConfigs() ([]IstioObject, error)
GetRbacConfig(namespace string, name string) (IstioObject, error)
GetRbacConfigs(namespace string) ([]IstioObject, error)
GetServiceMeshPolicy(namespace string, name string) (IstioObject, error)
GetServiceMeshPolicies(namespace string) ([]IstioObject, error)
GetServiceMeshRbacConfig(namespace string, name string) (IstioObject, error)
GetServiceMeshRbacConfigs(namespace string) ([]IstioObject, error)
GetServiceRole(namespace string, name string) (IstioObject, error)
GetServiceRoles(namespace string) ([]IstioObject, error)
GetServiceRoleBinding(namespace string, name string) (IstioObject, error)
GetServiceRoleBindings(namespace string) ([]IstioObject, error)
GetServerVersion() (*version.Info, error)
GetToken() string
GetVirtualService(namespace string, virtualservice string) (IstioObject, error)
GetVirtualServices(namespace string, serviceName string) ([]IstioObject, error)
IsMaistraApi() bool
IsOpenShift() bool
UpdateIstioObject(api, namespace, resourceType, name, jsonPatch string) (IstioObject, error)
}
// IstioClient is the client struct for Kubernetes and Istio APIs
// It hides the way it queries each API
type IstioClient struct {
IstioClientInterface
token string
k8s *kube.Clientset
istioConfigApi *rest.RESTClient
istioNetworkingApi *rest.RESTClient
istioAuthenticationApi *rest.RESTClient
istioRbacApi *rest.RESTClient
maistraAuthenticationApi *rest.RESTClient
maistraRbacApi *rest.RESTClient
// isOpenShift private variable will check if kiali is deployed under an OpenShift cluster or not
// It is represented as a pointer to include the initialization phase.
// See kubernetes_service.go#IsOpenShift() for more details.
isOpenShift *bool
// isMaistraApi private variable will check if specific Maistra APIs for authentication and rbac are present.
// It is represented as a pointer to include the initialization phase.
// See kubernetes_service.go#IsMaistraApi() for more details.
isMaistraApi *bool
// rbacResources private variable will check which resources kiali has access to from rbac.istio.io group
// It is represented as a pointer to include the initialization phase.
// See istio_details_service.go#HasRbacResource() for more details.
rbacResources *map[string]bool
}
// GetK8sApi returns the clientset referencing all K8s rest clients
func (client *IstioClient) GetK8sApi() *kube.Clientset {
return client.k8s
}
// GetIstioConfigApi returns the istio config rest client
func (client *IstioClient) GetIstioConfigApi() *rest.RESTClient {
return client.istioConfigApi
}
// GetIstioNetworkingApi returns the istio config rest client
func (client *IstioClient) GetIstioNetworkingApi() *rest.RESTClient {
return client.istioNetworkingApi
}
// GetIstioRbacApi returns the istio rbac rest client
func (client *IstioClient) GetIstioRbacApi() *rest.RESTClient {
return client.istioRbacApi
}
// GetToken returns the BearerToken used from the config
func (client *IstioClient) GetToken() string {
return client.token
}
// ConfigClient return a client with the correct configuration
// Returns configuration if Kiali is in Cluster when InCluster is true
// Returns configuration if Kiali is not int Cluster when InCluster is false
// It returns an error on any problem
func ConfigClient() (*rest.Config, error) {
if kialiConfig.Get().InCluster {
incluster, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
incluster.QPS = kialiConfig.Get().KubernetesConfig.QPS
incluster.Burst = kialiConfig.Get().KubernetesConfig.Burst
return incluster, nil
}
host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")
if len(host) == 0 || len(port) == 0 {
return nil, fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined")
}
return &rest.Config{
// TODO: switch to using cluster DNS.
Host: "http://" + net.JoinHostPort(host, port),
QPS: kialiConfig.Get().KubernetesConfig.QPS,
Burst: kialiConfig.Get().KubernetesConfig.Burst,
}, nil
}
// NewClientFromConfig creates a new client to the Kubernetes and Istio APIs.
// It takes the assumption that Istio is deployed into the cluster.
// It hides the access to Kubernetes/Openshift credentials.
// It hides the low level use of the API of Kubernetes and Istio, it should be considered as an implementation detail.
// It returns an error on any problem.
func NewClientFromConfig(config *rest.Config) (*IstioClient, error) {
client := IstioClient{
token: config.BearerToken,
}
log.Debugf("Rest perf config QPS: %f Burst: %d", config.QPS, config.Burst)
k8s, err := kube.NewForConfig(config)
if err != nil {
return nil, err
}
client.k8s = k8s
// Istio is a CRD extension of Kubernetes API, so any custom type should be registered here.
// KnownTypes registers the Istio objects we use, as soon as we get more info we will increase the number of types.
types := runtime.NewScheme()
schemeBuilder := runtime.NewSchemeBuilder(
func(scheme *runtime.Scheme) error {
// Register networking types
for _, nt := range networkingTypes {
scheme.AddKnownTypeWithName(NetworkingGroupVersion.WithKind(nt.objectKind), &GenericIstioObject{})
scheme.AddKnownTypeWithName(NetworkingGroupVersion.WithKind(nt.collectionKind), &GenericIstioObjectList{})
}
// Register config types
for _, cf := range configTypes {
scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(cf.objectKind), &GenericIstioObject{})
scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(cf.collectionKind), &GenericIstioObjectList{})
}
// Register adapter types
for _, ad := range adapterTypes {
scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(ad.objectKind), &GenericIstioObject{})
scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(ad.collectionKind), &GenericIstioObjectList{})
}
// Register template types
for _, tp := range templateTypes {
scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(tp.objectKind), &GenericIstioObject{})
scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(tp.collectionKind), &GenericIstioObjectList{})
}
// Register authentication types
for _, at := range authenticationTypes {
scheme.AddKnownTypeWithName(AuthenticationGroupVersion.WithKind(at.objectKind), &GenericIstioObject{})
scheme.AddKnownTypeWithName(AuthenticationGroupVersion.WithKind(at.collectionKind), &GenericIstioObjectList{})
}
for _, at := range maistraAuthenticationTypes {
scheme.AddKnownTypeWithName(MaistraAuthenticationGroupVersion.WithKind(at.objectKind), &GenericIstioObject{})
scheme.AddKnownTypeWithName(MaistraAuthenticationGroupVersion.WithKind(at.collectionKind), &GenericIstioObjectList{})
}
// Register rbac types
for _, rt := range rbacTypes {
scheme.AddKnownTypeWithName(RbacGroupVersion.WithKind(rt.objectKind), &GenericIstioObject{})
scheme.AddKnownTypeWithName(RbacGroupVersion.WithKind(rt.collectionKind), &GenericIstioObjectList{})
}
for _, rt := range maistraRbacTypes {
scheme.AddKnownTypeWithName(MaistraRbacGroupVersion.WithKind(rt.objectKind), &GenericIstioObject{})
scheme.AddKnownTypeWithName(MaistraRbacGroupVersion.WithKind(rt.collectionKind), &GenericIstioObjectList{})
}
meta_v1.AddToGroupVersion(scheme, ConfigGroupVersion)
meta_v1.AddToGroupVersion(scheme, NetworkingGroupVersion)
meta_v1.AddToGroupVersion(scheme, AuthenticationGroupVersion)
meta_v1.AddToGroupVersion(scheme, RbacGroupVersion)
meta_v1.AddToGroupVersion(scheme, MaistraAuthenticationGroupVersion)
meta_v1.AddToGroupVersion(scheme, MaistraRbacGroupVersion)
return nil
})
err = schemeBuilder.AddToScheme(types)
if err != nil {
return nil, err
}
// Istio needs another type as it queries a different K8S API.
istioConfigAPI, err := newClientForAPI(config, ConfigGroupVersion, types)
if err != nil {
return nil, err
}
istioNetworkingAPI, err := newClientForAPI(config, NetworkingGroupVersion, types)
if err != nil {
return nil, err
}
istioAuthenticationAPI, err := newClientForAPI(config, AuthenticationGroupVersion, types)
if err != nil {
return nil, err
}
istioRbacApi, err := newClientForAPI(config, RbacGroupVersion, types)
if err != nil {
return nil, err
}
maistraAuthenticationAPI, err := newClientForAPI(config, MaistraAuthenticationGroupVersion, types)
if err != nil {
return nil, err
}
maistraRbacApi, err := newClientForAPI(config, MaistraRbacGroupVersion, types)
if err != nil {
return nil, err
}
client.istioConfigApi = istioConfigAPI
client.istioNetworkingApi = istioNetworkingAPI
client.istioAuthenticationApi = istioAuthenticationAPI
client.istioRbacApi = istioRbacApi
client.maistraAuthenticationApi = maistraAuthenticationAPI
client.maistraRbacApi = maistraRbacApi
return &client, nil
}
func newClientForAPI(fromCfg *rest.Config, groupVersion schema.GroupVersion, scheme *runtime.Scheme) (*rest.RESTClient, error) {
cfg := rest.Config{
Host: fromCfg.Host,
APIPath: "/apis",
ContentConfig: rest.ContentConfig{
GroupVersion: &groupVersion,
NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: serializer.NewCodecFactory(scheme)},
ContentType: runtime.ContentTypeJSON,
},
BearerToken: fromCfg.BearerToken,
TLSClientConfig: fromCfg.TLSClientConfig,
QPS: fromCfg.QPS,
Burst: fromCfg.Burst,
}
return rest.RESTClientFor(&cfg)
}
| [
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
]
| []
| [
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
]
| [] | ["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"] | go | 2 | 0 | |
uwb/ep_recent.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
#
# Copyright (c) 2014 unfoldingWord
# http://creativecommons.org/licenses/MIT/
# See LICENSE file for details.
#
# Contributors:
# Jesse Griffin <[email protected]>
"""
Generates HTML page of recently edited pads.
"""
import os
import sys
import time
import codecs
from etherpad_lite import EtherpadLiteClient
from etherpad_lite import EtherpadException
path = '/var/www/vhosts/pad.door43.org/httpdocs/recent.html'
link = '''<tr>
<td><a href="https://pad.door43.org/p/{0}">{0}</a></td>
<td>{1}</td>
</tr>'''
page_template = '''<!DOCTYPE html>
<html lang="en">
<head>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/cs
s/bootstrap.min.css">
</head>
<body>
<div class="container">
<h1>unfoldingWord Recently Edited Pads</h1>
<div class="row"><div class="col-md-4">
<table class="table table-striped">
<th>Pad Name</th><th>Modified Time (EST)</th>
{0}
</table>
</div></div>
</div>
</body>
</html>'''
def writeFile(f, content):
out = codecs.open(f, encoding='utf-8', mode='w')
out.write(content)
out.close()
if __name__ == '__main__':
try:
pw = open('/root/.ep_api_key', 'r').read().strip()
ep = EtherpadLiteClient(base_params={'apikey': pw},
api_version='1.2.10')
except:
e = sys.exc_info()[0]
print 'Problem logging into Etherpad via API: {0}'.format(e)
sys.exit(1)
os.environ['TZ'] = 'US/Eastern'
pads = ep.listAllPads()
recent = []
for p in pads['padIDs']:
if not p:
continue
recent.append((p, ep.getLastEdited(padID=p)['lastEdited']))
recent_sorted = sorted(recent, key=lambda p: p[1], reverse=True)
recent_html = []
for i in recent_sorted:
t = time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(int(str(i[1])[0:10])))
recent_html.append(link.format(i[0], t))
writeFile(path, page_template.format('\n'.join(recent_html)))
| []
| []
| [
"TZ"
]
| [] | ["TZ"] | python | 1 | 0 | |
net/http/server/main.go | package main
import (
"flag"
"fmt"
"log"
"net/http"
"os"
"strconv"
"time"
"github.com/go-zoo/bone"
)
/*
WARNING:
The default http server has NO TIMEOUTS
*/
func main() {
port := "8080"
pport := flag.String("p", "", "port on which the server will listen")
flag.Parse()
if *pport == "" {
if p := os.Getenv("SERVER_PORT"); p != "" {
//fmt.Printf("using SERVER_PORT environment variable\n")
port = p
} else {
//fmt.Printf("using default PORT\n")
}
} else {
//fmt.Printf("using PORT from -p flag\n")
port = *pport
}
p := 0
var err error
if p, err = strconv.Atoi(port); err != nil {
panic(fmt.Sprintf("invalid port '%s': %s", port, err.Error()))
}
server := http.Server{
Addr: fmt.Sprintf(":%d", p),
ReadTimeout: time.Second * 3,
WriteTimeout: time.Second * 3,
ReadHeaderTimeout: time.Second * 3,
IdleTimeout: time.Second * 3,
Handler: getMux2(),
}
log.Printf("Listening on port %d", p)
if err := server.ListenAndServe(); err != nil {
panic(err)
}
}
func getMux() http.Handler {
mux := http.DefaultServeMux
mux.HandleFunc("/", hello)
return mux
}
func getMux2() http.Handler {
mux := bone.New()
mux.GetFunc("/hello", hello)
mux.GetFunc("/hello/:name", helloName)
return mux
}
func hello(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
name := r.URL.Query().Get("name")
str := ""
if name == "" {
str = "hello\n"
} else {
str = fmt.Sprintf("hello '%s'\n", name)
}
fmt.Fprintf(w, str)
}
func helloName(w http.ResponseWriter, r *http.Request) {
name := bone.GetValue(r, "name")
fmt.Fprintf(w, fmt.Sprintf("hello '%s'\n", name))
}
| [
"\"SERVER_PORT\""
]
| []
| [
"SERVER_PORT"
]
| [] | ["SERVER_PORT"] | go | 1 | 0 | |
pyreball/__main__.py | import sys
from pathlib import Path
import argparse
import os
import re
import json
from typing import Dict, Optional, Union, Tuple, cast
import xml
from xml.dom.minidom import parseString
import pkg_resources
from pyreball.constants import (
PATH_TO_CONFIG_LOCATION,
DEFAULT_PATH_TO_CONFIG,
STYLES_TEMPLATE_FILENAME,
CONFIG_INI_FILENAME,
)
from pyreball.utils.logger import get_logger
from pyreball.utils.utils import (
get_file_config,
check_and_fix_parameters,
merge_parameter_dictionaries,
ChoiceParameter,
IntegerParameter,
carefully_remove_directory_if_exists,
Substitutor,
)
from pyreball.utils.template_utils import get_css, get_html_begin, get_html_end
logger = get_logger()
# keep the indentation in the following snippets!!!
JAVASCRIPT_CHANGE_EXPAND = """
function change_expand(button, table_id){
var table = document.getElementById(table_id);
if (table.classList.contains("expanded")) {
// collapse the table
table.style.maxHeight = "390px";
button.innerHTML = "⟱";
} else {
// expand the table
table.style.maxHeight = "none";
button.innerHTML = "⟰";
}
table.classList.toggle("expanded");
}
"""
JAVASCRIPT_ON_LOAD = """
window.onload = function() {
//dom not only ready, but everything is loaded
scrollers = document.getElementsByClassName("table-scroller");
for (i = 0; i < scrollers.length; i++) {
if (scrollers[i].scrollHeight == scrollers[i].clientHeight) {
// hide the expand button
expander_id = scrollers[i].id.replace('scroller', 'expander');
expander = document.getElementById(expander_id);
expander.style.display = "none";
}
}
};
"""
JAVASCRIPT_ROLLING_PLOTS = """
function next(div_id, button_next_id, button_prev_id) {
var qElems = document.querySelectorAll(div_id + '>div');
for (var i = 0; i < qElems.length; i++) {
if (qElems[i].style.display != 'none') {
qElems[i].style.display = 'none';
qElems[i + 1].style.display = 'block';
if (i == qElems.length - 2) {
document.getElementById(button_next_id).disabled = true;
}
document.getElementById(button_prev_id).disabled = false;
break;
}
}
}
function previous(div_id, button_next_id, button_prev_id) {
var qElems = document.querySelectorAll(div_id + '>div');
for (var i = 0; i < qElems.length; i++) {
if (qElems[i].style.display != 'none') {
qElems[i].style.display = 'none';
qElems[i - 1].style.display = 'block';
if (i == 1) {
document.getElementById(button_prev_id).disabled = true;
}
document.getElementById(button_next_id).disabled = false;
break;
}
}
}
"""
JAVASCRIPT_SORTABLE_TABLE = """
$(document).ready(function () {
$('.sortable_table').DataTable({
"paging": false,
"searching": false,
"info": false,
});
});
"""
def replace_ids(filename: Path) -> None:
# collect all ids in form of table-N-M
all_table_and_img_ids = set()
with open(filename, "r") as f:
for line in f:
# note that we don't need to replace only "table" ids by also "img" etc.
results = re.findall(r'table-id[\d]+-[\d]+', line)
if results:
all_table_and_img_ids.update(results)
results = re.findall(r'img-id[\d]+-[\d]+', line)
if results:
all_table_and_img_ids.update(results)
replacements = []
for element_id in all_table_and_img_ids:
re_results = re.search(r'(.+)-(id\d+)-(\d+)', element_id)
if re_results:
# this must be first
replacements.append(("ref-" + re_results.group(2), re_results.group(1) + "-" + re_results.group(3)))
# this must be second (because it would catch the first case as well)
replacements.append((re_results.group(2) + '(-' + re_results.group(3) + ')?', re_results.group(3)))
# replace all table-N-M with table-M and Table N with Table M
substitutor = Substitutor(replacements=replacements)
modified_lines = []
with open(filename, "r") as f:
for line in f:
modified_lines.append(substitutor.sub(line))
with open(filename, "w") as f:
f.writelines(modified_lines)
def _get_node_text(node: xml.dom.minidom.Element) -> str:
result = []
for child in node.childNodes:
if child.nodeType in (xml.dom.Node.TEXT_NODE, xml.dom.Node.CDATA_SECTION_NODE):
result.append(child.data)
else:
result.extend(_get_node_text(child))
return ''.join(result)
def _parse_heading_info(line: str) -> Optional[Tuple[int, str, str]]:
heading_pattern = r'<h(\d).+</h(\d)>'
m = re.search(heading_pattern, line)
if m:
heading_level = m.group(1)
doc = parseString(m.group(0))
heading = doc.getElementsByTagName("h" + heading_level)[0]
heading_id = heading.getAttribute('id')
content = _get_node_text(heading).replace('¶', '')
return int(heading_level), heading_id, content
else:
return None
def insert_heading_title_and_toc(filename: Path, include_toc: bool = True):
# fetch all lines
with open(filename, "r") as f:
lines = f.readlines()
# try to extract the title from <title> element:
report_title = None
for line in lines:
m = re.match(r'^<title class="custom">([^<]*)</title>$', line)
if m:
report_title = m.group(1)
break
# get all headings in the report
container_start_index = 0
headings = []
for i, line in enumerate(lines):
if '<div class="main_container">' in line:
container_start_index = i
if include_toc:
heading_info = _parse_heading_info(line)
if heading_info:
headings.append(heading_info)
if len(headings) > 0 and report_title is None:
# only when headings were collected (only when include_toc=True) and there was not title set manually
report_title = "Table of Contents"
lines_index = container_start_index + 1
# prepare new HTML lines with TOC
if report_title is not None:
lines.insert(
lines_index,
f'<h1 id="toc_generated_0">{report_title}<a class="anchor-link" href="#toc_generated_0">¶</a></h1>\n'
)
lines_index += 1
current_level = 1
for h in headings:
# do we need to add also <ul> ?
while h[0] > current_level:
lines.insert(lines_index, '<ul style="list-style-type:none; margin:0px">\n')
lines_index += 1
current_level += 1
# do we need to add also </ul> ?
while h[0] < current_level:
lines.insert(lines_index, '</ul>\n')
lines_index += 1
current_level -= 1
# prepare the line:
if h[0] == 1:
current_line = '<a href="#' + h[1] + '">' + h[2] + '</a><br/>\n'
else:
current_line = '<li><a href="#' + h[1] + '">' + h[2] + '</a></li>\n'
lines.insert(lines_index, current_line)
lines_index += 1
# at the end, get back to level 1 if necessary
while 1 < current_level:
lines.insert(lines_index, '</ul>\n')
lines_index += 1
current_level -= 1
with open(filename, "w") as f:
f.writelines(lines)
parameter_specifications = [
ChoiceParameter('--toc', choices=['yes', 'no'], default='no', help='Include table of contents.'),
ChoiceParameter('--align-tables', choices=['left', 'center', 'right'], default='center',
help='Alignment of tables.'),
ChoiceParameter('--numbered-tables', choices=['yes', 'no'], default='no', help='Number the tables.'),
ChoiceParameter('--sortable-tables', choices=['yes', 'no'], default='no', help='Make the tables sortable.'),
ChoiceParameter('--full-tables', choices=['yes', 'no'], default='no', help='Force all tables to be expanded.'),
ChoiceParameter('--align-plots', choices=['left', 'center', 'right'], default='center', help='Alignment of plots.'),
ChoiceParameter('--numbered-plots', choices=['yes', 'no'], default='no', help='Number the plots.'),
ChoiceParameter('--matplotlib-format', choices=['png', 'svg'], default='svg', help='Format of matplotlib plots.'),
ChoiceParameter('--matplotlib-embedded', choices=['yes', 'no'], default='no',
help='Whether to embedded matplotlib images directly into HTML. Only for svg format.'),
ChoiceParameter('--numbered-headings', choices=['yes', 'no'], default='no', help='Number the headings.'),
IntegerParameter('--page-width', boundaries=(40, 100), default=80,
help='Width of the page in percentage. An integer in the range 40..100.'),
ChoiceParameter('--keep-stdout', choices=['yes', 'no'], default='no', help='Print the output to stdout too.'),
]
def parse_arguments() -> Dict[str, Optional[Union[str, int]]]:
parser = argparse.ArgumentParser(description='Generate Python report.')
for input_param in parameter_specifications:
input_param.add_argument_to_parser(parser)
parser.add_argument('--output-path', help='Output path. It must contain also the name of the output file with '
'suffix .html. If not provided, the output file name is derived from '
'the name of the input script and the same directory is used.')
parser.add_argument('filename', help='Input file path.')
parser.add_argument('script_args', nargs=argparse.REMAINDER)
args = parser.parse_args()
return vars(args)
def get_config_directory() -> Path:
"""Get the location of the config files.
If the configs were generated by pyreball-generate-config command, they should be found.
If they were not generated or some of them no longer exist, the default package config will be used.
"""
config_location_file_path = Path(PATH_TO_CONFIG_LOCATION)
if config_location_file_path.exists():
# the config was generated, let's find out its directory
config_directory = Path(Path(PATH_TO_CONFIG_LOCATION).read_text())
if not (config_directory / CONFIG_INI_FILENAME).exists() \
or not (config_directory / STYLES_TEMPLATE_FILENAME).exists():
logger.warning(f'{CONFIG_INI_FILENAME} or {STYLES_TEMPLATE_FILENAME} was not found in {config_directory}. '
f'Try re-generating the configs by pyreball-generate-config command. For now, we will '
f'use the default package configs.')
config_directory = DEFAULT_PATH_TO_CONFIG
else:
config_directory = DEFAULT_PATH_TO_CONFIG
return config_directory
def main() -> None:
args_dict = parse_arguments()
script_args_string = ' '.join(args_dict.pop('script_args'))
input_filename = Path(args_dict.pop('filename')) # type: ignore
output_path = cast(Optional[str], args_dict.pop('output_path'))
if output_path and not output_path.endswith(".html"):
raise ValueError("Value of output path parameter must end with .html suffix.")
cli_parameters = check_and_fix_parameters(parameters=args_dict, parameter_specifications=parameter_specifications,
none_allowed=True)
config_directory = get_config_directory()
file_config_parameters = get_file_config(filename=CONFIG_INI_FILENAME, directory=config_directory,
parameter_specifications=parameter_specifications)
parameters = merge_parameter_dictionaries(primary_parameters=cli_parameters,
secondary_parameters=file_config_parameters,
parameter_specifications=parameter_specifications)
if not input_filename.is_file():
raise ValueError(f"File {input_filename} does not exist.")
if not output_path:
# use the directory of the input file
output_dir_path = input_filename.resolve().parents[0]
title = input_filename.stem
else:
output_path = Path(output_path).resolve()
title = output_path.stem
output_dir_path = output_path.parents[0]
output_dir_path.mkdir(parents=True, exist_ok=True)
path_str = str(output_dir_path / title)
os.environ["_TMP_PYREBALL_GENERATOR_PARAMETERS"] = json.dumps({**parameters, 'html_dir_path': path_str})
html_path = Path(path_str + ".html")
# remove the directory with images if it exists:
carefully_remove_directory_if_exists(directory=Path(path_str))
script_definitions = (JAVASCRIPT_CHANGE_EXPAND + JAVASCRIPT_ON_LOAD + JAVASCRIPT_SORTABLE_TABLE
+ JAVASCRIPT_ROLLING_PLOTS)
css_definitions = get_css(filename=STYLES_TEMPLATE_FILENAME, directory=config_directory,
page_width=cast(int, parameters['page_width']))
html_begin = get_html_begin(template_path=Path(pkg_resources.resource_filename('pyreball',
'cfg/html_begin.template')),
title=title, script_definitions=script_definitions, css_definitions=css_definitions)
html_end = get_html_end(template_path=Path(pkg_resources.resource_filename('pyreball', 'cfg/html_end.template')))
with open(html_path, 'w') as f:
f.write(html_begin)
try:
# Use {sys.executable} instead of just "python" command as it may not work correctly as a PyCharm external tool
os.system(f"{sys.executable} {input_filename} {script_args_string}")
finally:
with open(html_path, 'a') as f:
f.write(html_end)
replace_ids(html_path)
insert_heading_title_and_toc(filename=html_path, include_toc=parameters['toc'] == 'yes')
if __name__ == '__main__':
main()
| []
| []
| [
"_TMP_PYREBALL_GENERATOR_PARAMETERS"
]
| [] | ["_TMP_PYREBALL_GENERATOR_PARAMETERS"] | python | 1 | 0 | |
pkg/utils/kubelessutil.go | /*
Copyright (c) 2016-2017 Bitnami
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"os"
"path"
"strconv"
"strings"
"time"
monitoringv1alpha1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
kubelessApi "github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1"
"github.com/kubeless/kubeless/pkg/langruntime"
"github.com/sirupsen/logrus"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
clientsetAPIExtensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
)
func appendToCommand(orig string, command ...string) string {
if len(orig) > 0 {
return fmt.Sprintf("%s && %s", orig, strings.Join(command, " && "))
}
return strings.Join(command, " && ")
}
func getProvisionContainer(function, checksum, fileName, handler, contentType, runtime, prepareImage string, runtimeVolume, depsVolume v1.VolumeMount, lr *langruntime.Langruntimes) (v1.Container, error) {
prepareCommand := ""
originFile := path.Join(depsVolume.MountPath, fileName)
// Prepare Function file and dependencies
if strings.Contains(contentType, "base64") {
// File is encoded in base64
decodedFile := "/tmp/func.decoded"
prepareCommand = appendToCommand(prepareCommand, fmt.Sprintf("base64 -d < %s > %s", originFile, decodedFile))
originFile = decodedFile
} else if strings.Contains(contentType, "url") {
fromURLFile := "/tmp/func.fromurl"
prepareCommand = appendToCommand(prepareCommand, fmt.Sprintf("curl %s -L --silent --output %s", function, fromURLFile))
originFile = fromURLFile
} else if strings.Contains(contentType, "text") || contentType == "" {
// Assumming that function is plain text
// So we don't need to preprocess it
} else {
return v1.Container{}, fmt.Errorf("Unable to prepare function of type %s: Unknown format", contentType)
}
// Validate checksum
if checksum == "" {
// DEPRECATED: Checksum may be empty
} else {
checksumInfo := strings.Split(checksum, ":")
switch checksumInfo[0] {
case "sha256":
shaFile := "/tmp/func.sha256"
prepareCommand = appendToCommand(prepareCommand,
fmt.Sprintf("echo '%s %s' > %s", checksumInfo[1], originFile, shaFile),
fmt.Sprintf("sha256sum -c %s", shaFile),
)
break
default:
return v1.Container{}, fmt.Errorf("Unable to verify checksum %s: Unknown format", checksum)
}
}
// Extract content in case it is a Zip file
if strings.Contains(contentType, "zip") {
prepareCommand = appendToCommand(prepareCommand,
fmt.Sprintf("unzip -o %s -d %s", originFile, runtimeVolume.MountPath),
)
} else {
// Copy the target as a single file
destFileName, err := getFileName(handler, contentType, runtime, lr)
if err != nil {
return v1.Container{}, err
}
dest := path.Join(runtimeVolume.MountPath, destFileName)
prepareCommand = appendToCommand(prepareCommand,
fmt.Sprintf("cp %s %s", originFile, dest),
)
}
// Copy deps file to the installation path
runtimeInf, err := lr.GetRuntimeInfo(runtime)
if err == nil && runtimeInf.DepName != "" {
depsFile := path.Join(depsVolume.MountPath, runtimeInf.DepName)
prepareCommand = appendToCommand(prepareCommand,
fmt.Sprintf("cp %s %s", depsFile, runtimeVolume.MountPath),
)
}
return v1.Container{
Name: "prepare",
Image: prepareImage,
Command: []string{"sh", "-c"},
Args: []string{prepareCommand},
VolumeMounts: []v1.VolumeMount{runtimeVolume, depsVolume},
ImagePullPolicy: v1.PullIfNotPresent,
}, nil
}
// CreateIngress creates ingress rule for a specific function
func CreateIngress(client kubernetes.Interface, httpTriggerObj *kubelessApi.HTTPTrigger, or []metav1.OwnerReference) error {
funcSvc, err := client.CoreV1().Services(httpTriggerObj.ObjectMeta.Namespace).Get(httpTriggerObj.Spec.FunctionName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("Unable to find the function internal service: %v", funcSvc)
}
ingress := &v1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: httpTriggerObj.Name,
Namespace: httpTriggerObj.Namespace,
OwnerReferences: or,
Labels: httpTriggerObj.ObjectMeta.Labels,
},
Spec: v1beta1.IngressSpec{
Rules: []v1beta1.IngressRule{
{
Host: httpTriggerObj.Spec.HostName,
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{
Paths: []v1beta1.HTTPIngressPath{
{
Path: "/" + httpTriggerObj.Spec.Path,
Backend: v1beta1.IngressBackend{
ServiceName: funcSvc.Name,
ServicePort: funcSvc.Spec.Ports[0].TargetPort,
},
},
},
},
},
},
},
},
}
ingressAnnotations := make(map[string]string)
// If exposed URL in the backend service differs from the specified path in the Ingress rule.
// Without a rewrite any request will return 404. Set the annotation ingress.kubernetes.io/rewrite-target
// to the path expected by the service
ingressAnnotations["nginx.ingress.kubernetes.io/rewrite-target"] = "/"
if len(httpTriggerObj.Spec.BasicAuthSecret) > 0 {
switch gateway := httpTriggerObj.Spec.Gateway; gateway {
case "nginx":
ingressAnnotations["kubernetes.io/ingress.class"] = "nginx"
ingressAnnotations["nginx.ingress.kubernetes.io/auth-secret"] = httpTriggerObj.Spec.BasicAuthSecret
ingressAnnotations["nginx.ingress.kubernetes.io/auth-type"] = "basic"
break
case "traefik":
ingressAnnotations["kubernetes.io/ingress.class"] = "traefik"
ingressAnnotations["ingress.kubernetes.io/auth-secret"] = httpTriggerObj.Spec.BasicAuthSecret
ingressAnnotations["ingress.kubernetes.io/auth-type"] = "basic"
break
case "kong":
return fmt.Errorf("Setting basic authentication with Kong is not yet supported")
}
}
if len(httpTriggerObj.Spec.TLSSecret) > 0 && httpTriggerObj.Spec.TLSAcme {
return fmt.Errorf("Can not create ingress object from HTTP trigger spec with both TLSSecret and IngressTLS specified")
}
// secure an Ingress by specified secret that contains a TLS private key and certificate
if len(httpTriggerObj.Spec.TLSSecret) > 0 {
ingress.Spec.TLS = []v1beta1.IngressTLS{
{
SecretName: httpTriggerObj.Spec.TLSSecret,
Hosts: []string{httpTriggerObj.Spec.HostName},
},
}
}
// add annotations and TLS configuration for kube-lego
if httpTriggerObj.Spec.TLSAcme {
ingressAnnotations["kubernetes.io/tls-acme"] = "true"
ingressAnnotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "true"
ingress.Spec.TLS = []v1beta1.IngressTLS{
{
Hosts: []string{httpTriggerObj.Spec.HostName},
SecretName: httpTriggerObj.Name + "-tls",
},
}
}
ingress.ObjectMeta.Annotations = ingressAnnotations
_, err = client.ExtensionsV1beta1().Ingresses(httpTriggerObj.Namespace).Create(ingress)
if err != nil && k8sErrors.IsAlreadyExists(err) {
var newIngress *v1beta1.Ingress
newIngress, err = client.ExtensionsV1beta1().Ingresses(httpTriggerObj.Namespace).Get(ingress.Name, metav1.GetOptions{})
if err != nil {
return err
}
if len(ingress.ObjectMeta.Labels) > 0 {
newIngress.ObjectMeta.Labels = ingress.ObjectMeta.Labels
}
newIngress.ObjectMeta.OwnerReferences = or
newIngress.Spec = ingress.Spec
_, err = client.ExtensionsV1beta1().Ingresses(httpTriggerObj.Namespace).Update(newIngress)
if err != nil && k8sErrors.IsAlreadyExists(err) {
// The configmap may already exist and there is nothing to update
return nil
}
}
return err
}
func splitHandler(handler string) (string, string, error) {
str := strings.Split(handler, ".")
if len(str) != 2 {
return "", "", fmt.Errorf("failed: incorrect handler format. It should be module_name.handler_name")
}
return str[0], str[1], nil
}
// getFileName returns a file name based on a handler identifier
func getFileName(handler, funcContentType, runtime string, lr *langruntime.Langruntimes) (string, error) {
modName, _, err := splitHandler(handler)
if err != nil {
return "", err
}
filename := modName
if funcContentType == "text" || funcContentType == "" || funcContentType == "url" {
// We can only guess the extension if the function is specified as plain text
runtimeInf, err := lr.GetRuntimeInfo(runtime)
if err == nil {
filename = modName + runtimeInf.FileNameSuffix
}
}
return filename, nil
}
// EnsureFuncConfigMap creates/updates a config map with a function specification
func EnsureFuncConfigMap(client kubernetes.Interface, funcObj *kubelessApi.Function, or []metav1.OwnerReference, lr *langruntime.Langruntimes) error {
configMapData := map[string]string{}
var err error
if funcObj.Spec.Handler != "" {
fileName, err := getFileName(funcObj.Spec.Handler, funcObj.Spec.FunctionContentType, funcObj.Spec.Runtime, lr)
if err != nil {
return err
}
configMapData = map[string]string{
"handler": funcObj.Spec.Handler,
fileName: funcObj.Spec.Function,
}
runtimeInfo, err := lr.GetRuntimeInfo(funcObj.Spec.Runtime)
if err == nil && runtimeInfo.DepName != "" {
configMapData[runtimeInfo.DepName] = funcObj.Spec.Deps
}
}
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: funcObj.ObjectMeta.Name,
Labels: funcObj.ObjectMeta.Labels,
OwnerReferences: or,
},
Data: configMapData,
}
_, err = client.Core().ConfigMaps(funcObj.ObjectMeta.Namespace).Create(configMap)
if err != nil && k8sErrors.IsAlreadyExists(err) {
// In case the ConfigMap already exists we should update
// just certain fields (to avoid race conditions)
var newConfigMap *v1.ConfigMap
newConfigMap, err = client.Core().ConfigMaps(funcObj.ObjectMeta.Namespace).Get(funcObj.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
return err
}
newConfigMap.ObjectMeta.Labels = funcObj.ObjectMeta.Labels
newConfigMap.ObjectMeta.OwnerReferences = or
newConfigMap.Data = configMap.Data
_, err = client.Core().ConfigMaps(funcObj.ObjectMeta.Namespace).Update(newConfigMap)
if err != nil && k8sErrors.IsAlreadyExists(err) {
// The configmap may already exist and there is nothing to update
return nil
}
}
return err
}
// this function resolves backward incompatibility in case user uses old client which doesn't include serviceSpec into funcSpec.
// if serviceSpec is empty, we will use the default serviceSpec whose port is 8080
func serviceSpec(funcObj *kubelessApi.Function) v1.ServiceSpec {
if len(funcObj.Spec.ServiceSpec.Ports) == 0 {
return v1.ServiceSpec{
Ports: []v1.ServicePort{
{
// Note: Prefix: "http-" is added to adapt to Istio so that it can discover the function services
Name: "http-function-port",
Protocol: v1.ProtocolTCP,
Port: 8080,
TargetPort: intstr.FromInt(8080),
},
},
Selector: funcObj.ObjectMeta.Labels,
Type: v1.ServiceTypeClusterIP,
}
}
return funcObj.Spec.ServiceSpec
}
// EnsureFuncService creates/updates a function service
func EnsureFuncService(client kubernetes.Interface, funcObj *kubelessApi.Function, or []metav1.OwnerReference) error {
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: funcObj.ObjectMeta.Name,
Labels: funcObj.ObjectMeta.Labels,
OwnerReferences: or,
},
Spec: serviceSpec(funcObj),
}
_, err := client.Core().Services(funcObj.ObjectMeta.Namespace).Create(svc)
if err != nil && k8sErrors.IsAlreadyExists(err) {
// In case the SVC already exists we should update
// just certain fields (to avoid race conditions)
var newSvc *v1.Service
newSvc, err = client.Core().Services(funcObj.ObjectMeta.Namespace).Get(funcObj.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
return err
}
newSvc.ObjectMeta.Labels = funcObj.ObjectMeta.Labels
newSvc.ObjectMeta.OwnerReferences = or
newSvc.Spec.Ports = svc.Spec.Ports
_, err = client.Core().Services(funcObj.ObjectMeta.Namespace).Update(newSvc)
if err != nil && k8sErrors.IsAlreadyExists(err) {
// The service may already exist and there is nothing to update
return nil
}
}
return err
}
func getRuntimeVolumeMount(name string) v1.VolumeMount {
return v1.VolumeMount{
Name: name,
MountPath: "/kubeless",
}
}
func getChecksum(content string) (string, error) {
h := sha256.New()
_, err := h.Write([]byte(content))
if err != nil {
return "", nil
}
return hex.EncodeToString(h.Sum(nil)), nil
}
// populatePodSpec populates a basic Pod Spec that uses init containers to populate
// the runtime container with the function content and its dependencies.
// The caller should define the runtime container(s).
// It accepts a prepopulated podSpec with default information and volume that the
// runtime container should mount
func populatePodSpec(funcObj *kubelessApi.Function, lr *langruntime.Langruntimes, podSpec *v1.PodSpec, runtimeVolumeMount v1.VolumeMount, provisionImage string, imagePullSecrets []v1.LocalObjectReference) error {
depsVolumeName := funcObj.ObjectMeta.Name + "-deps"
result := podSpec
if len(imagePullSecrets) > 0 {
result.ImagePullSecrets = imagePullSecrets
}
result.Volumes = append(podSpec.Volumes,
v1.Volume{
Name: runtimeVolumeMount.Name,
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
v1.Volume{
Name: depsVolumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: funcObj.ObjectMeta.Name,
},
},
},
},
)
// prepare init-containers if some function is specified
if funcObj.Spec.Function != "" {
fileName, err := getFileName(funcObj.Spec.Handler, funcObj.Spec.FunctionContentType, funcObj.Spec.Runtime, lr)
if err != nil {
return err
}
if err != nil {
return err
}
srcVolumeMount := v1.VolumeMount{
Name: depsVolumeName,
MountPath: "/src",
}
provisionContainer, err := getProvisionContainer(
funcObj.Spec.Function,
funcObj.Spec.Checksum,
fileName,
funcObj.Spec.Handler,
funcObj.Spec.FunctionContentType,
funcObj.Spec.Runtime,
provisionImage,
runtimeVolumeMount,
srcVolumeMount,
lr,
)
if err != nil {
return err
}
result.InitContainers = []v1.Container{provisionContainer}
}
// Add the imagesecrets if present to pull images from private docker registry
if funcObj.Spec.Runtime != "" {
imageSecrets, err := lr.GetImageSecrets(funcObj.Spec.Runtime)
if err != nil {
return fmt.Errorf("Unable to fetch ImagePullSecrets, %v", err)
}
result.ImagePullSecrets = append(result.ImagePullSecrets, imageSecrets...)
}
// ensure that the runtime is supported for installing dependencies
_, err := lr.GetRuntimeInfo(funcObj.Spec.Runtime)
if funcObj.Spec.Deps != "" && err != nil {
return fmt.Errorf("Unable to install dependencies for the runtime %s", funcObj.Spec.Runtime)
} else if funcObj.Spec.Deps != "" {
envVars := []v1.EnvVar{}
if len(result.Containers) > 0 {
envVars = result.Containers[0].Env
}
depsChecksum, err := getChecksum(funcObj.Spec.Deps)
if err != nil {
return fmt.Errorf("Unable to obtain dependencies checksum: %v", err)
}
depsInstallContainer, err := lr.GetBuildContainer(funcObj.Spec.Runtime, depsChecksum, envVars, runtimeVolumeMount)
if err != nil {
return err
}
if depsInstallContainer.Name != "" {
result.InitContainers = append(
result.InitContainers,
depsInstallContainer,
)
}
}
// add compilation init container if needed
if lr.RequiresCompilation(funcObj.Spec.Runtime) {
_, funcName, err := splitHandler(funcObj.Spec.Handler)
compContainer, err := lr.GetCompilationContainer(funcObj.Spec.Runtime, funcName, runtimeVolumeMount)
if err != nil {
return err
}
result.InitContainers = append(
result.InitContainers,
compContainer,
)
}
return nil
}
// EnsureFuncImage creates a Job to build a function image
func EnsureFuncImage(client kubernetes.Interface, funcObj *kubelessApi.Function, lr *langruntime.Langruntimes, or []metav1.OwnerReference, imageName, tag, builderImage, registryHost, dockerSecretName, provisionImage string, registryTLSEnabled bool, imagePullSecrets []v1.LocalObjectReference) error {
if len(tag) < 64 {
return fmt.Errorf("Expecting sha256 as image tag")
}
jobName := fmt.Sprintf("build-%s-%s", funcObj.ObjectMeta.Name, tag[0:10])
_, err := client.BatchV1().Jobs(funcObj.ObjectMeta.Namespace).Get(jobName, metav1.GetOptions{})
if err == nil {
// The job already exists
logrus.Infof("Found a previous job for building %s:%s", imageName, tag)
return nil
}
podSpec := v1.PodSpec{
RestartPolicy: v1.RestartPolicyOnFailure,
}
runtimeVolumeMount := getRuntimeVolumeMount(funcObj.ObjectMeta.Name)
err = populatePodSpec(funcObj, lr, &podSpec, runtimeVolumeMount, provisionImage, imagePullSecrets)
if err != nil {
return err
}
// Add a final initContainer to create the function bundle.tar
prepareContainer := v1.Container{}
for _, c := range podSpec.InitContainers {
if c.Name == "prepare" {
prepareContainer = c
}
}
podSpec.InitContainers = append(podSpec.InitContainers, v1.Container{
Name: "bundle",
Command: []string{"sh", "-c"},
Args: []string{fmt.Sprintf("tar cvf %s/bundle.tar %s/*", runtimeVolumeMount.MountPath, runtimeVolumeMount.MountPath)},
VolumeMounts: prepareContainer.VolumeMounts,
Image: provisionImage,
})
buildJob := batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: jobName,
Namespace: funcObj.ObjectMeta.Namespace,
OwnerReferences: or,
Labels: map[string]string{
"created-by": "kubeless",
"function": funcObj.ObjectMeta.Name,
},
},
Spec: batchv1.JobSpec{
Template: v1.PodTemplateSpec{
Spec: podSpec,
},
},
}
baseImage, err := lr.GetFunctionImage(funcObj.Spec.Runtime)
if err != nil {
return err
}
// Registry volume
dockerCredsVol := dockerSecretName
dockerCredsVolMountPath := "/docker"
registryCredsVolume := v1.Volume{
Name: dockerCredsVol,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: dockerSecretName,
},
},
}
buildJob.Spec.Template.Spec.Volumes = append(buildJob.Spec.Template.Spec.Volumes, registryCredsVolume)
args := []string{
"/imbuilder",
"add-layer",
}
if !registryTLSEnabled {
args = append(args, "--insecure")
}
args = append(args,
"--src", fmt.Sprintf("docker://%s", baseImage),
"--dst", fmt.Sprintf("docker://%s/%s:%s", registryHost, imageName, tag),
fmt.Sprintf("%s/bundle.tar", podSpec.InitContainers[0].VolumeMounts[0].MountPath),
)
// Add main container
buildJob.Spec.Template.Spec.Containers = []v1.Container{
{
Name: "build",
Image: builderImage,
VolumeMounts: append(prepareContainer.VolumeMounts,
v1.VolumeMount{
Name: dockerCredsVol,
MountPath: dockerCredsVolMountPath,
},
),
Env: []v1.EnvVar{
{
Name: "DOCKER_CONFIG_FOLDER",
Value: dockerCredsVolMountPath,
},
},
Args: args,
},
}
// Create the job if doesn't exists yet
_, err = client.BatchV1().Jobs(funcObj.ObjectMeta.Namespace).Create(&buildJob)
if err == nil {
logrus.Infof("Started function build job %s", jobName)
}
return err
}
func svcPort(funcObj *kubelessApi.Function) int32 {
if len(funcObj.Spec.ServiceSpec.Ports) == 0 {
return int32(8080)
}
return funcObj.Spec.ServiceSpec.Ports[0].Port
}
func mergeMap(dst, src map[string]string) map[string]string {
if len(dst) == 0 {
dst = make(map[string]string)
}
for k, v := range src {
dst[k] = v
}
return dst
}
// EnsureFuncDeployment creates/updates a function deployment
func EnsureFuncDeployment(client kubernetes.Interface, funcObj *kubelessApi.Function, or []metav1.OwnerReference, lr *langruntime.Langruntimes, prebuiltRuntimeImage, provisionImage string, imagePullSecrets []v1.LocalObjectReference) error {
var err error
podAnnotations := map[string]string{
// Attempt to attract the attention of prometheus.
// For runtimes that don't support /metrics,
// prometheus will get a 404 and mostly silently
// ignore the pod (still displayed in the list of
// "targets")
"prometheus.io/scrape": "true",
"prometheus.io/path": "/metrics",
"prometheus.io/port": strconv.Itoa(int(svcPort(funcObj))),
}
maxUnavailable := intstr.FromInt(0)
//add deployment and copy all func's Spec.Deployment to the deployment
dpm := funcObj.Spec.Deployment.DeepCopy()
dpm.OwnerReferences = or
dpm.ObjectMeta.Name = funcObj.ObjectMeta.Name
dpm.Spec.Selector = &metav1.LabelSelector{
MatchLabels: funcObj.ObjectMeta.Labels,
}
dpm.Spec.Strategy = v1beta1.DeploymentStrategy{
RollingUpdate: &v1beta1.RollingUpdateDeployment{
MaxUnavailable: &maxUnavailable,
},
}
//append data to dpm deployment
dpm.Labels = mergeMap(dpm.Labels, funcObj.Labels)
dpm.Spec.Template.Labels = mergeMap(dpm.Spec.Template.Labels, funcObj.Labels)
dpm.Annotations = mergeMap(dpm.Annotations, funcObj.Annotations)
dpm.Spec.Template.Annotations = mergeMap(dpm.Spec.Template.Annotations, funcObj.Annotations)
dpm.Spec.Template.Annotations = mergeMap(dpm.Spec.Template.Annotations, podAnnotations)
if len(dpm.Spec.Template.Spec.Containers) == 0 {
dpm.Spec.Template.Spec.Containers = append(dpm.Spec.Template.Spec.Containers, v1.Container{})
}
runtimeVolumeMount := getRuntimeVolumeMount(funcObj.ObjectMeta.Name)
if funcObj.Spec.Handler != "" && funcObj.Spec.Function != "" {
modName, handlerName, err := splitHandler(funcObj.Spec.Handler)
if err != nil {
return err
}
//only resolve the image name and build the function if it has not been built already
if dpm.Spec.Template.Spec.Containers[0].Image == "" && prebuiltRuntimeImage == "" {
err := populatePodSpec(funcObj, lr, &dpm.Spec.Template.Spec, runtimeVolumeMount, provisionImage, imagePullSecrets)
if err != nil {
return err
}
imageName, err := lr.GetFunctionImage(funcObj.Spec.Runtime)
if err != nil {
return err
}
dpm.Spec.Template.Spec.Containers[0].Image = imageName
dpm.Spec.Template.Spec.Containers[0].VolumeMounts = append(dpm.Spec.Template.Spec.Containers[0].VolumeMounts, runtimeVolumeMount)
} else {
if dpm.Spec.Template.Spec.Containers[0].Image == "" {
dpm.Spec.Template.Spec.Containers[0].Image = prebuiltRuntimeImage
}
dpm.Spec.Template.Spec.ImagePullSecrets = imagePullSecrets
}
timeout := funcObj.Spec.Timeout
if timeout == "" {
// Set default timeout to 180 seconds
timeout = defaultTimeout
}
dpm.Spec.Template.Spec.Containers[0].Env = append(dpm.Spec.Template.Spec.Containers[0].Env,
v1.EnvVar{
Name: "FUNC_HANDLER",
Value: handlerName,
},
v1.EnvVar{
Name: "MOD_NAME",
Value: modName,
},
v1.EnvVar{
Name: "FUNC_TIMEOUT",
Value: timeout,
},
v1.EnvVar{
Name: "FUNC_RUNTIME",
Value: funcObj.Spec.Runtime,
},
v1.EnvVar{
Name: "FUNC_MEMORY_LIMIT",
Value: dpm.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(),
},
)
}
dpm.Spec.Template.Spec.Containers[0].Env = append(dpm.Spec.Template.Spec.Containers[0].Env,
v1.EnvVar{
Name: "FUNC_PORT",
Value: strconv.Itoa(int(svcPort(funcObj))),
},
)
dpm.Spec.Template.Spec.Containers[0].Name = funcObj.ObjectMeta.Name
dpm.Spec.Template.Spec.Containers[0].Ports = append(dpm.Spec.Template.Spec.Containers[0].Ports, v1.ContainerPort{
ContainerPort: svcPort(funcObj),
})
// update deployment for loading dependencies
lr.UpdateDeployment(dpm, runtimeVolumeMount.MountPath, funcObj.Spec.Runtime)
livenessProbe := &v1.Probe{
InitialDelaySeconds: int32(3),
PeriodSeconds: int32(30),
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(int(svcPort(funcObj))),
},
},
}
dpm.Spec.Template.Spec.Containers[0].LivenessProbe = livenessProbe
// Add security context
runtimeUser := int64(1000)
if dpm.Spec.Template.Spec.SecurityContext == nil {
dpm.Spec.Template.Spec.SecurityContext = &v1.PodSecurityContext{
RunAsUser: &runtimeUser,
FSGroup: &runtimeUser,
}
}
_, err = client.ExtensionsV1beta1().Deployments(funcObj.ObjectMeta.Namespace).Create(dpm)
if err != nil && k8sErrors.IsAlreadyExists(err) {
// In case the Deployment already exists we should update
// just certain fields (to avoid race conditions)
var newDpm *v1beta1.Deployment
newDpm, err = client.ExtensionsV1beta1().Deployments(funcObj.ObjectMeta.Namespace).Get(funcObj.ObjectMeta.Name, metav1.GetOptions{})
newDpm.ObjectMeta.Labels = funcObj.ObjectMeta.Labels
newDpm.ObjectMeta.Annotations = funcObj.Spec.Deployment.ObjectMeta.Annotations
newDpm.ObjectMeta.OwnerReferences = or
// We should maintain previous selector to avoid duplicated ReplicaSets
selector := newDpm.Spec.Selector
newDpm.Spec = dpm.Spec
newDpm.Spec.Selector = selector
data, err := json.Marshal(newDpm)
if err != nil {
return err
}
// Use `Patch` to do a rolling update
_, err = client.ExtensionsV1beta1().Deployments(funcObj.ObjectMeta.Namespace).Patch(newDpm.Name, types.MergePatchType, data)
if err != nil {
return err
}
}
return err
}
// EnsureCronJob creates/updates a function cron job
func EnsureCronJob(client kubernetes.Interface, funcObj *kubelessApi.Function, schedule, reqImage string, or []metav1.OwnerReference, reqImagePullSecret []v1.LocalObjectReference) error {
var maxSucccessfulHist, maxFailedHist int32
maxSucccessfulHist = 3
maxFailedHist = 1
var timeout int
if funcObj.Spec.Timeout != "" {
var err error
timeout, err = strconv.Atoi(funcObj.Spec.Timeout)
if err != nil {
return fmt.Errorf("Unable convert %s to a valid timeout", funcObj.Spec.Timeout)
}
} else {
timeout, _ = strconv.Atoi(defaultTimeout)
}
activeDeadlineSeconds := int64(timeout)
jobName := fmt.Sprintf("trigger-%s", funcObj.ObjectMeta.Name)
var headersString = ""
timestamp := time.Now().UTC()
eventID, err := GetRandString(11)
if err != nil {
return fmt.Errorf("Failed to create a event-ID %v", err)
}
headersString = headersString + " -H \"event-id: " + eventID + "\""
headersString = headersString + " -H \"event-time: " + timestamp.String() + "\""
headersString = headersString + " -H \"event-type: application/json\""
headersString = headersString + " -H \"event-namespace: cronjobtrigger.kubeless.io\""
job := &batchv1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: jobName,
Namespace: funcObj.ObjectMeta.Namespace,
Labels: funcObj.ObjectMeta.Labels,
OwnerReferences: or,
},
Spec: batchv1beta1.CronJobSpec{
Schedule: schedule,
SuccessfulJobsHistoryLimit: &maxSucccessfulHist,
FailedJobsHistoryLimit: &maxFailedHist,
JobTemplate: batchv1beta1.JobTemplateSpec{
Spec: batchv1.JobSpec{
ActiveDeadlineSeconds: &activeDeadlineSeconds,
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
ImagePullSecrets: reqImagePullSecret,
Containers: []v1.Container{
{
Image: reqImage,
Name: "trigger",
Args: []string{"curl", "-Lv", headersString, fmt.Sprintf("http://%s.%s.svc.cluster.local:8080", funcObj.ObjectMeta.Name, funcObj.ObjectMeta.Namespace)},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
},
},
},
},
}
_, err = client.BatchV1beta1().CronJobs(funcObj.ObjectMeta.Namespace).Create(job)
if err != nil && k8sErrors.IsAlreadyExists(err) {
newCronJob := &batchv1beta1.CronJob{}
newCronJob, err = client.BatchV1beta1().CronJobs(funcObj.ObjectMeta.Namespace).Get(jobName, metav1.GetOptions{})
if err != nil {
return err
}
newCronJob.ObjectMeta.Labels = funcObj.ObjectMeta.Labels
newCronJob.ObjectMeta.OwnerReferences = or
newCronJob.Spec = job.Spec
_, err = client.BatchV1beta1().CronJobs(funcObj.ObjectMeta.Namespace).Update(newCronJob)
}
return err
}
// CreateServiceMonitor creates a Service Monitor for the given function
func CreateServiceMonitor(smclient monitoringv1alpha1.MonitoringV1alpha1Client, funcObj *kubelessApi.Function, ns string, or []metav1.OwnerReference) error {
_, err := smclient.ServiceMonitors(ns).Get(funcObj.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
s := &monitoringv1alpha1.ServiceMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: funcObj.ObjectMeta.Name,
Namespace: ns,
Labels: map[string]string{
"service-monitor": "function",
},
OwnerReferences: or,
},
Spec: monitoringv1alpha1.ServiceMonitorSpec{
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{
"function": funcObj.ObjectMeta.Name,
},
},
Endpoints: []monitoringv1alpha1.Endpoint{
{
Port: "http-function-port",
},
},
},
}
_, err = smclient.ServiceMonitors(ns).Create(s)
if err != nil {
return err
}
}
return nil
}
return fmt.Errorf("service monitor has already existed")
}
// GetOwnerReference returns ownerRef for appending to objects's metadata
func GetOwnerReference(kind, apiVersion, name string, uid types.UID) ([]metav1.OwnerReference, error) {
if name == "" {
return []metav1.OwnerReference{}, fmt.Errorf("name can't be empty")
}
if uid == "" {
return []metav1.OwnerReference{}, fmt.Errorf("uid can't be empty")
}
return []metav1.OwnerReference{
{
Kind: kind,
APIVersion: apiVersion,
Name: name,
UID: uid,
},
}, nil
}
func getConfigLocation(apiExtensionsClientset clientsetAPIExtensions.Interface) (ConfigLocation, error) {
configLocation := ConfigLocation{}
controllerNamespace := os.Getenv("KUBELESS_NAMESPACE")
kubelessConfig := os.Getenv("KUBELESS_CONFIG")
annotationsCRD, err := GetAnnotationsFromCRD(apiExtensionsClientset, "functions.kubeless.io")
if err != nil {
return configLocation, err
}
if len(controllerNamespace) == 0 {
if ns, ok := annotationsCRD["kubeless.io/namespace"]; ok {
controllerNamespace = ns
} else {
controllerNamespace = "kubeless"
}
}
configLocation.Namespace = controllerNamespace
if len(kubelessConfig) == 0 {
if config, ok := annotationsCRD["kubeless.io/config"]; ok {
kubelessConfig = config
} else {
kubelessConfig = "kubeless-config"
}
}
configLocation.Name = kubelessConfig
return configLocation, nil
}
// GetKubelessConfig Returns Kubeless ConfigMap
func GetKubelessConfig(cli kubernetes.Interface, cliAPIExtensions clientsetAPIExtensions.Interface) (*v1.ConfigMap, error) {
configLocation, err := getConfigLocation(cliAPIExtensions)
if err != nil {
return nil, fmt.Errorf("Error while fetching config location: %v", err)
}
controllerNamespace := configLocation.Namespace
kubelessConfig := configLocation.Name
config, err := cli.CoreV1().ConfigMaps(controllerNamespace).Get(kubelessConfig, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("Unable to read the configmap: %s", err)
}
return config, nil
}
| [
"\"KUBELESS_NAMESPACE\"",
"\"KUBELESS_CONFIG\""
]
| []
| [
"KUBELESS_CONFIG",
"KUBELESS_NAMESPACE"
]
| [] | ["KUBELESS_CONFIG", "KUBELESS_NAMESPACE"] | go | 2 | 0 | |
example/main.go | package main
import (
"fmt"
"log"
"os"
"github.com/DiFronzo/blockchair"
)
var clientID string
func init() {
clientID = os.Getenv("API_KEY")
}
func main() {
c := blockchair.New()
c.APIKey = clientID
resp, err := c.GetAddressEthAdv("ethereum", "0x3282791d6fd713f1e94f4bfd565eaa78b3a0599d", map[string]string{"limit": "1", "offset": "0"})
if err != nil {
log.Fatalln(err)
}
for i := range resp.Data {
fmt.Printf("Type: %v\n", resp.Data[i].Address.Type)
fmt.Printf("Spent in USD: %v\n", resp.Data[i].Address.SpentUsd)
fmt.Printf("Number of transactions: %v\n", resp.Data[i].Address.TransactionCount)
for j := range resp.Data[i].Calls {
fmt.Printf("\nTransaction number %v:\n", j+1)
fmt.Printf("ID: %v\n", resp.Data[i].Calls[j].BlockID)
fmt.Printf("Value in USD: %v\n", resp.Data[i].Calls[j].ValueUsd)
}
}
}
| [
"\"API_KEY\""
]
| []
| [
"API_KEY"
]
| [] | ["API_KEY"] | go | 1 | 0 | |
vendor/github.com/influx6/gobuild/srcpath/srcpath.go | package srcpath
import (
"os"
"path/filepath"
)
var (
goPath = os.Getenv("GOPATH")
goRoot = os.Getenv("GOROOT")
goSrcPath = filepath.Join(goPath, "src")
goRootSrcPath = filepath.Join(goRoot, "src")
)
// RootPath returns current go src path.
func RootPath() string {
return goRoot
}
// SrcPath returns current go src path.
func SrcPath() string {
return goSrcPath
}
// FromRootPath returns the giving path as absolute from the GOROOT path
// where the internal packages are stored.
func FromRootPath(pr string) string {
return filepath.Join(goRootSrcPath, pr)
}
// FromSrcPath returns the giving path as absolute from the gosrc path.
func FromSrcPath(pr string) string {
return filepath.Join(goSrcPath, pr)
}
// RelativeToRoot returns a path that is relative to GOROOT/src path.
// Where GOROOT, is where the go runtime src is located.
func RelativeToRoot(path string) (string, error) {
return filepath.Rel(goRootSrcPath, path)
}
// RelativeToSrc returns a path that is relative to the go src path.
func RelativeToSrc(path string) (string, error) {
return filepath.Rel(goSrcPath, path)
}
| [
"\"GOPATH\"",
"\"GOROOT\""
]
| []
| [
"GOPATH",
"GOROOT"
]
| [] | ["GOPATH", "GOROOT"] | go | 2 | 0 | |
istio-1.9.0/samples/bookinfo/src/reviews/reviews-application/src/main/java/application/rest/LibertyRestEndpoint.java | /*******************************************************************************
* Copyright (c) 2017 Istio Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package application.rest;
import javax.json.Json;
import javax.json.JsonObject;
import javax.json.JsonObjectBuilder;
import javax.json.JsonValue;
import javax.json.JsonArrayBuilder;
import javax.json.JsonArray;
import javax.json.JsonReader;
import javax.json.*;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.ProcessingException;
import javax.ws.rs.client.Client;
import javax.ws.rs.client.ClientBuilder;
import javax.ws.rs.client.Invocation;
import javax.ws.rs.client.WebTarget;
import javax.ws.rs.core.Application;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.StringReader;
import java.io.StringWriter;
import java.io.Writer;
import java.util.UUID;
import java.util.Map;
import java.time.Instant;
@Path("/")
public class LibertyRestEndpoint extends Application {
private final static Boolean ratings_enabled = true; //Boolean.valueOf(System.getenv("ENABLE_RATINGS"));
private final static String star_color = System.getenv("STAR_COLOR") == null ? "black" : System.getenv("STAR_COLOR");
private final static String services_domain = System.getenv("SERVICES_DOMAIN") == null ? "" : ("." + System.getenv("SERVICES_DOMAIN"));
private final static String ratings_hostname = System.getenv("RATINGS_HOSTNAME") == null ? "ratings" : System.getenv("RATINGS_HOSTNAME");
//private final static String ratings_service = "http://" + ratings_hostname + services_domain + ":9080/ratings";
private final static String ratings_service = "http://host.docker.internal:8899/ratings";
private final static String service_uuid = "reviewsservice-" + UUID.randomUUID().toString();
private static String ratings_response = "";
private static String ratings_request = "";
// HTTP headers to propagate for distributed tracing are documented at
// https://istio.io/docs/tasks/telemetry/distributed-tracing/overview/#trace-context-propagation
private final static String[] headers_to_propagate = {
"fi-trace",
// All applications should propagate x-request-id. This header is
// included in access log statements and is used for consistent trace
// sampling and log sampling decisions in Istio.
"x-request-id",
// Lightstep tracing header. Propagate this if you use lightstep tracing
// in Istio (see
// https://istio.io/latest/docs/tasks/observability/distributed-tracing/lightstep/)
// Note: this should probably be changed to use B3 or W3C TRACE_CONTEXT.
// Lightstep recommends using B3 or TRACE_CONTEXT and most application
// libraries from lightstep do not support x-ot-span-context.
"x-ot-span-context",
// Datadog tracing header. Propagate these headers if you use Datadog
// tracing.
"x-datadog-trace-id",
"x-datadog-parent-id",
"x-datadog-sampling-priority",
// W3C Trace Context. Compatible with OpenCensusAgent and Stackdriver Istio
// configurations.
"traceparent",
"tracestate",
// Cloud trace context. Compatible with OpenCensusAgent and Stackdriver Istio
// configurations.
"x-cloud-trace-context",
// Grpc binary trace context. Compatible with OpenCensusAgent nad
// Stackdriver Istio configurations.
"grpc-trace-bin",
// b3 trace headers. Compatible with Zipkin, OpenCensusAgent, and
// Stackdriver Istio configurations. Commented out since they are
// propagated by the OpenTracing tracer above.
"x-b3-traceid",
"x-b3-spanid",
"x-b3-parentspanid",
"x-b3-sampled",
"x-b3-flags",
// Application-specific headers to forward.
"end-user",
"user-agent",
};
private JsonObjectBuilder jsonObjectToBuilder(JsonObject obj) {
JsonObjectBuilder job = Json.createObjectBuilder();
for (Map.Entry<String, JsonValue> entry : obj.entrySet()) {
job.add(entry.getKey(), entry.getValue());
}
return job;
}
private String getJsonResponse (String productId, int starsReviewer1, int starsReviewer2) {
String result = "{";
result += "\"id\": \"" + productId + "\",";
result += "\"reviews\": [";
// reviewer 1:
result += "{";
result += " \"reviewer\": \"Reviewer1\",";
result += " \"text\": \"An extremely entertaining play by Shakespeare. The slapstick humour is refreshing!\"";
if (ratings_enabled) {
if (starsReviewer1 != -1) {
result += ", \"rating\": {\"stars\": " + starsReviewer1 + ", \"color\": \"" + star_color + "\"}";
}
else {
result += ", \"rating\": {\"error\": \"Ratings service is currently unavailable\"}";
}
}
result += "},";
// reviewer 2:
result += "{";
result += " \"reviewer\": \"Reviewer2\",";
result += " \"text\": \"Absolutely fun and entertaining. The play lacks thematic depth when compared to other plays by Shakespeare.\"";
if (ratings_enabled) {
if (starsReviewer2 != -1) {
result += ", \"rating\": {\"stars\": " + starsReviewer2 + ", \"color\": \"" + star_color + "\"}";
}
else {
result += ", \"rating\": {\"error\": \"Ratings service is currently unavailable\"}";
}
}
result += "}";
result += "]";
result += "}";
return result;
}
private JsonObject getRatings(String productId, HttpHeaders requestHeaders) {
// Builder for Client HTTP request to Node js app for ratings
ClientBuilder cb = ClientBuilder.newBuilder();
Integer timeout = star_color.equals("black") ? 10000 : 2500;
cb.property("com.ibm.ws.jaxrs.client.connection.timeout", timeout);
cb.property("com.ibm.ws.jaxrs.client.receive.timeout", timeout);
// Client Connection
Client client = cb.build();
// Target for ratings application
// "http://" + ratings_hostname + services_domain + ":9080/ratings" / productId
WebTarget ratingsTarget = client.target(ratings_service + "/" + productId);
// Request for Json response
Invocation.Builder builder = ratingsTarget.request(MediaType.APPLICATION_JSON);
for (String header : headers_to_propagate) {
String value = requestHeaders.getHeaderString(header);
if (header.equals("fi-trace")) {
builder.header(header, ratings_request);
continue;
}
if (value != null) {
builder.header(header,value);
}
}
try {
Response r = builder.get();
ratings_response = r.getHeaderString("fi-trace");
int statusCode = r.getStatusInfo().getStatusCode();
if (statusCode == Response.Status.OK.getStatusCode()) {
try (StringReader stringReader = new StringReader(r.readEntity(String.class));
JsonReader jsonReader = Json.createReader(stringReader)) {
return jsonReader.readObject();
}
} else {
System.out.println("Error: unable to contact " + ratings_service + " got status of " + statusCode);
return null;
}
} catch (ProcessingException e) {
System.err.println("Error: unable to contact " + ratings_service + " got exception " + e);
return null;
}
}
@GET
@Path("/health")
public Response health() {
return Response.ok().type(MediaType.APPLICATION_JSON).entity("{\"status\": \"Reviews is healthy\"}").build();
}
@GET
@Path("/reviews/{productId}")
public Response bookReviewsById(@PathParam("productId") int productId, @Context HttpHeaders requestHeaders) {
int starsReviewer1 = -1;
int starsReviewer2 = -1;
/*
1. Handle record of Request
*/
String traceMeta = requestHeaders.getHeaderString("fi-trace");
JsonReader reader = Json.createReader(new StringReader(traceMeta));
JsonObject jsonObject = reader.readObject();
// New object builder
JsonObjectBuilder job = Json.createObjectBuilder();
JsonArrayBuilder recordBuilder = Json.createArrayBuilder();
String last_uuid = "";
String original_uuid = "";
// Iterate through incoming records
for (String key : jsonObject.keySet()) {
// If records we need to add a new record for the request
if (key.equals("records")) {
JsonArray jsonArray = jsonObject.getJsonArray(key);
for (JsonValue jsonVal: jsonArray) {
recordBuilder.add(jsonVal);
JsonObject recObject = (JsonObject) jsonVal;
last_uuid = recObject.getString("uuid");
}
JsonObject requestRecord = Json.createObjectBuilder()
.add("message_name", "Products Reviews Request")
.add("service", service_uuid)
.add("timestamp", System.currentTimeMillis())
.add("type", 2)
.add("uuid", last_uuid)
.build();
recordBuilder.add(requestRecord);
} else {
job.add(key, jsonObject.get(key));
}
}
original_uuid = last_uuid;
JsonArray newArray;
Writer writer = new StringWriter();
String jsonString = "";
if (ratings_enabled) {
// Call to Node JS Application
JsonObject ratingsRequestRecord = Json.createObjectBuilder()
.add("message_name", "Products Ratings Request")
.add("service", service_uuid)
.add("timestamp", System.currentTimeMillis())
.add("type", 1)
.add("uuid", UUID.randomUUID().toString())
.build();
recordBuilder.add(ratingsRequestRecord);
newArray = recordBuilder.build();
job.add("records", newArray);
// Newly created Object with new record
jsonObject = job.build();
writer = new StringWriter();
Json.createWriter(writer).write(jsonObject);
/*
Finish adding record or Response
*/
jsonString = writer.toString();
ratings_request = jsonString;
JsonObject ratingsResponse = getRatings(Integer.toString(productId), requestHeaders);
if (ratingsResponse != null) {
if (ratingsResponse.containsKey("ratings")) {
JsonObject ratings = ratingsResponse.getJsonObject("ratings");
if (ratings.containsKey("Reviewer1")){
starsReviewer1 = ratings.getInt("Reviewer1");
}
if (ratings.containsKey("Reviewer2")){
starsReviewer2 = ratings.getInt("Reviewer2");
}
}
}
}
/*
Get fi-trace response from ratings service
*/
String jsonResStr = getJsonResponse(Integer.toString(productId), starsReviewer1, starsReviewer2);
reader = Json.createReader(new StringReader(ratings_response));
jsonObject = reader.readObject();
// New object builder
job = Json.createObjectBuilder();
recordBuilder = Json.createArrayBuilder();
last_uuid = "";
// Iterate through incoming records
for (String key : jsonObject.keySet()) {
// If records we need to add a new record for the request
if (key.equals("records")) {
JsonArray jsonArray = jsonObject.getJsonArray(key);
for (JsonValue jsonVal: jsonArray) {
recordBuilder.add(jsonVal);
JsonObject recObject = (JsonObject) jsonVal;
last_uuid = recObject.getString("uuid");
}
JsonObject ratingsResponse = Json.createObjectBuilder()
.add("message_name", "Products Ratings Response")
.add("service", service_uuid)
.add("timestamp", System.currentTimeMillis())
.add("type", 2)
.add("uuid", last_uuid)
.build();
recordBuilder.add(ratingsResponse);
} else {
job.add(key, jsonObject.get(key));
}
}
JsonObject productpageResponse = Json.createObjectBuilder()
.add("message_name", "Products Review Response")
.add("service", service_uuid)
.add("timestamp", System.currentTimeMillis())
.add("type", 1)
.add("uuid", original_uuid)
.build();
recordBuilder.add(productpageResponse);
newArray = recordBuilder.build();
job.add("records", newArray);
// Newly created Object with new record
jsonObject = job.build();
writer = new StringWriter();
Json.createWriter(writer).write(jsonObject);
jsonString = writer.toString();
return Response.ok().type(MediaType.APPLICATION_JSON).entity(jsonResStr).header("fi-trace", jsonString).build();
}
}
| [
"\"ENABLE_RATINGS\"",
"\"STAR_COLOR\"",
"\"STAR_COLOR\"",
"\"SERVICES_DOMAIN\"",
"\"SERVICES_DOMAIN\"",
"\"RATINGS_HOSTNAME\"",
"\"RATINGS_HOSTNAME\""
]
| []
| [
"SERVICES_DOMAIN",
"ENABLE_RATINGS",
"STAR_COLOR",
"RATINGS_HOSTNAME"
]
| [] | ["SERVICES_DOMAIN", "ENABLE_RATINGS", "STAR_COLOR", "RATINGS_HOSTNAME"] | java | 4 | 0 | |
python/scannerpy/table.py |
import struct
from timeit import default_timer as now
from scannerpy.common import *
from scannerpy.column import Column
class Table:
"""
A table in a Database.
Can be part of many Collection objects.
"""
def __init__(self, db, name, id):
self._db = db
# We pass name and id to avoid having to read the descriptor
self._name = name
self._id = id
self._descriptor = None
self._video_descriptors = None
def id(self):
return self._id
def name(self):
return self._name
def _need_descriptor(self):
if self._descriptor is None:
self._descriptor = self._db._load_table_metadata([self._name])[0]
def _load_column(self, name):
if not self.committed():
raise ScannerException('Table has not committed yet.')
self._need_descriptor()
if self._video_descriptors is None:
self._video_descriptors = []
for c in self._descriptor.columns:
video_descriptor = None
if c.type == self._db.protobufs.Video:
video_descriptor = self._db._load_descriptor(
self._db.protobufs.VideoDescriptor,
'tables/{:d}/{:d}_0_video_metadata.bin'.format(
self._id, c.id))
self._video_descriptors.append(video_descriptor)
for i, c in enumerate(self._descriptor.columns):
if c.name == name:
return c, self._video_descriptors[i]
raise ScannerException('Column {} not found in Table {}'.format(
name, self._name))
def _load_job(self):
self._need_descriptor()
if self._descriptor.job_id != -1:
self._job = self._db._load_descriptor(
self._db.protobufs.JobDescriptor,
'jobs/{}/descriptor.bin'.format(self._descriptor.job_id))
self._task = None
for task in self._job.tasks:
if task.output_table_name == self._name:
self._task = task
if self._task is None:
raise ScannerException('Table {} not found in job {}'.format(
self._name, self._descriptor.job_id))
else:
self._job = None
# HACK(wcrichto): reading from TableDescriptor to avoid loading VideoDescriptors
def column_names(self):
self._need_descriptor()
return [c.name for c in self._descriptor.columns]
def column(self, name):
return Column(self, name)
def num_rows(self):
self._need_descriptor()
if len(self._descriptor.end_rows) > 0:
return self._descriptor.end_rows[-1]
else:
return 0
def _parse_index(self, bufs, db):
return struct.unpack("=Q", bufs[0])[0]
def committed(self):
return self._db._table_committed[self._id]
def parent_rows(self):
self._need_descriptor()
if self._descriptor.job_id == -1:
raise ScannerException('Table {} has no parent'.format(
self.name()))
return [i for _, i in self.load(['index'], fn=self._parse_index)]
def profiler(self):
if not self.committed():
raise ScannerException('Table has not committed yet.')
self._need_descriptor()
if self._descriptor.job_id != -1:
return self._db.profiler(self._descriptor.job_id)
else:
raise ScannerException('Ingested videos do not have profile data')
def load(self, columns, fn=None, rows=None):
if not self.committed():
raise ScannerException('Table has not committed yet.')
cols = [self.column(c).load(rows=rows) for c in columns]
for tup in zip(*cols):
if fn is not None:
yield fn(tup, self._db)
else:
yield tup
| []
| []
| []
| [] | [] | python | null | null | null |
pretrained-model/stt/hubert/conformer-tiny-ctc.py | import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import pyroomacoustics as pra
import numpy as np
from pydub import AudioSegment
from sklearn.utils import shuffle
from glob import glob
import random
import json
from malaya_speech.train.model.conformer.model import Model as ConformerModel
from malaya_speech.train.model import hubert, ctc
import malaya_speech.train as train
import malaya_speech.config
import malaya_speech.augmentation.waveform as augmentation
import malaya_speech
import tensorflow as tf
import os
import string
sr = 16000
maxlen = 18
minlen_text = 1
prob_aug = 0.95
unique_vocab = [''] + list(string.ascii_lowercase + string.digits) + [' ']
def augment_room(y, scale=1.0):
corners = np.array(
[[0, 0], [0, 5 * scale], [3 * scale, 5 * scale], [3 * scale, 0]]
).T
room = pra.Room.from_corners(
corners,
fs=sr,
materials=pra.Material(0.2, 0.15),
ray_tracing=True,
air_absorption=True,
)
room.extrude(3.5, materials=pra.Material(0.2, 0.15))
room.set_ray_tracing(
receiver_radius=0.5, n_rays=1000, energy_thres=1e-5
)
room.add_source([1.5 * scale, 4 * scale, 0.5], signal=y)
R = np.array([[1.5 * scale], [0.5 * scale], [0.5]])
room.add_microphone(R)
room.simulate()
return room.mic_array.signals[0]
def random_amplitude_threshold(sample, low=1, high=2, threshold=0.4):
y_aug = sample.copy()
dyn_change = np.random.uniform(low=low, high=high)
y_aug[np.abs(y_aug) >= threshold] = (
y_aug[np.abs(y_aug) >= threshold] * dyn_change
)
return np.clip(y_aug, -1, 1)
def add_uniform_noise(
sample, power=0.01, return_noise=False, scale=False
):
y_noise = sample.copy()
noise_amp = power * np.random.uniform() * np.amax(y_noise)
noise = noise_amp * np.random.normal(size=y_noise.shape[0])
y_noise = y_noise + noise
if scale:
y_noise = y_noise / (np.max(np.abs(y_noise)) + 1e-9)
if return_noise:
if scale:
noise = noise / (np.max(np.abs(y_noise)) + 1e-9)
return y_noise, noise
else:
return y_noise
def calc(signal, add_uniform=True):
choice = random.randint(0, 10)
print('choice', choice)
if choice == 0:
x = augmentation.sox_augment_high(
signal,
min_bass_gain=random.randint(25, 50),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=1,
)
if choice == 1:
x = augmentation.sox_augment_high(
signal,
min_bass_gain=random.randint(25, 70),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=0,
)
if choice == 2:
x = augmentation.sox_augment_low(
signal,
min_bass_gain=random.randint(5, 30),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=random.randint(0, 1),
)
if choice == 3:
x = augmentation.sox_augment_combine(
signal,
min_bass_gain_high=random.randint(25, 70),
min_bass_gain_low=random.randint(5, 30),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 90),
)
if choice == 4:
x = augmentation.sox_reverb(
signal,
reverberance=random.randint(10, 80),
hf_damping=10,
room_scale=random.randint(10, 90),
)
if choice == 5:
x = random_amplitude_threshold(
signal, threshold=random.uniform(0.35, 0.8)
)
if choice == 6:
x = augmentation.lowpass_filter(
signal, sr=sr, cutoff=random.randint(200, 551)
)
if choice == 7:
x = augmentation.highpass_filter(
signal, sr=sr, cutoff=random.randint(551, 1653)
)
if choice == 8:
x = augmentation.bandpass_filter(
signal,
sr=sr,
cutoff_low=random.randint(200, 551),
cutoff_high=random.randint(551, 1653),
)
if choice == 9:
x = augment_room(signal)
if choice == 10:
x = signal
if choice not in [5] and random.gauss(0.5, 0.14) > 0.6:
x = random_amplitude_threshold(
x, low=1.0, high=2.0, threshold=random.uniform(0.6, 0.9)
)
if random.gauss(0.5, 0.14) > 0.6 and add_uniform:
x = add_uniform_noise(x, power=random.uniform(0.005, 0.015))
return x
def mp3_to_wav(file, sr=sr):
audio = AudioSegment.from_file(file)
audio = audio.set_frame_rate(sr).set_channels(1)
sample = np.array(audio.get_array_of_samples())
return malaya_speech.astype.int_to_float(sample), sr
def generate(file):
with open(file) as fopen:
dataset = json.load(fopen)
audios, cleaned_texts = dataset['X'], dataset['Y']
while True:
audios, cleaned_texts = shuffle(audios, cleaned_texts)
for i in range(len(audios)):
try:
if audios[i].endswith('.mp3'):
# print('found mp3', audios[i])
wav_data, _ = mp3_to_wav(audios[i])
else:
wav_data, _ = malaya_speech.load(audios[i], sr=sr)
if len(cleaned_texts[i]) < minlen_text:
# print(f'skipped text too short {audios[i]}')
continue
if (len(wav_data) / sr) > maxlen:
continue
t = [unique_vocab.index(c) for c in cleaned_texts[i]]
yield {
'waveforms': wav_data,
'waveforms_length': [len(wav_data)],
'targets': t,
'targets_length': [len(t)],
}
except Exception as e:
print(e)
def get_dataset(
file,
batch_size=12,
shuffle_size=20,
thread_count=24,
maxlen_feature=1800,
):
def get():
dataset = tf.data.Dataset.from_generator(
generate,
{
'waveforms': tf.float32,
'waveforms_length': tf.int32,
'targets': tf.int32,
'targets_length': tf.int32,
},
output_shapes={
'waveforms': tf.TensorShape([None]),
'waveforms_length': tf.TensorShape([None]),
'targets': tf.TensorShape([None]),
'targets_length': tf.TensorShape([None]),
},
args=(file,),
)
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
dataset = dataset.padded_batch(
batch_size,
padded_shapes={
'waveforms': tf.TensorShape([None]),
'waveforms_length': tf.TensorShape([None]),
'targets': tf.TensorShape([None]),
'targets_length': tf.TensorShape([None]),
},
padding_values={
'waveforms': tf.constant(0, dtype=tf.float32),
'waveforms_length': tf.constant(0, dtype=tf.int32),
'targets': tf.constant(0, dtype=tf.int32),
'targets_length': tf.constant(0, dtype=tf.int32),
},
)
return dataset
return get
class Encoder:
def __init__(self, config):
self.config = config
self.encoder = ConformerModel(**self.config)
def __call__(self, x, input_mask, training=True):
return self.encoder(x, training=training)
total_steps = 2000000
def model_fn(features, labels, mode, params):
config_conformer = malaya_speech.config.conformer_tiny_encoder_config
config_conformer['subsampling']['type'] = 'none'
config_conformer['dropout'] = 0.0
encoder = Encoder(config_conformer)
cfg = hubert.HuBERTConfig(
extractor_mode='layer_norm',
dropout=0.0,
attention_dropout=0.0,
encoder_layerdrop=0.0,
dropout_input=0.0,
dropout_features=0.0,
final_dim=128,
)
model = hubert.Model(cfg, encoder, ['pad', 'eos', 'unk'] + [str(i) for i in range(100)])
X = features['waveforms']
X_len = features['waveforms_length'][:, 0]
targets = features['targets']
targets_int32 = tf.cast(targets, tf.int32)
targets_length = features['targets_length'][:, 0]
r = model(X, padding_mask=X_len, features_only=True, mask=False)
logits = tf.layers.dense(r['x'], len(unique_vocab) + 1)
seq_lens = tf.reduce_sum(
tf.cast(tf.logical_not(r['padding_mask']), tf.int32), axis=1
)
mean_error, sum_error, sum_weight = ctc.loss.ctc_loss(
logits, seq_lens, targets_int32, targets_length
)
loss = mean_error
accuracy = ctc.metrics.ctc_sequence_accuracy(
logits, seq_lens, targets_int32, targets_length,
)
tf.identity(loss, 'train_loss')
tf.identity(accuracy, name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy)
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
init_checkpoint = 'hubert-conformer-tiny/model.ckpt-1000000'
assignment_map, initialized_variable_names = train.get_assignment_map_from_checkpoint(
variables, init_checkpoint
)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = train.optimizer.adamw.create_optimizer(
loss,
init_lr=5e-5,
num_train_steps=total_steps,
num_warmup_steps=100000,
end_learning_rate=0.0,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
clip_norm=1.0,
)
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy': ctc.metrics.ctc_sequence_accuracy_estimator(
logits, seq_lens, targets_int32, targets_length
)
},
)
return estimator_spec
train_hooks = [
tf.train.LoggingTensorHook(
['train_accuracy', 'train_loss'], every_n_iter=1
)
]
train_dataset = get_dataset('bahasa-asr-train-combined.json')
dev_dataset = get_dataset('bahasa-asr-test.json')
train.run_training(
train_fn=train_dataset,
model_fn=model_fn,
model_dir='hubert-conformer-tiny-ctc-char',
num_gpus=1,
log_step=1,
save_checkpoint_step=20000,
max_steps=total_steps,
eval_fn=dev_dataset,
train_hooks=train_hooks,
)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
oscar/lib/python3.6/site-packages/_pytest/config/__init__.py | """ command line options, ini-file and conftest.py processing. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import copy
import inspect
import os
import shlex
import sys
import types
import warnings
import py
import six
from pkg_resources import parse_version
from pluggy import HookimplMarker
from pluggy import HookspecMarker
from pluggy import PluginManager
import _pytest._code
import _pytest.assertion
import _pytest.hookspec # the extension point definitions
from .exceptions import PrintHelp
from .exceptions import UsageError
from .findpaths import determine_setup
from .findpaths import exists
from _pytest._code import ExceptionInfo
from _pytest._code import filter_traceback
from _pytest.compat import lru_cache
from _pytest.compat import safe_str
from _pytest.outcomes import Skipped
hookimpl = HookimplMarker("pytest")
hookspec = HookspecMarker("pytest")
class ConftestImportFailure(Exception):
def __init__(self, path, excinfo):
Exception.__init__(self, path, excinfo)
self.path = path
self.excinfo = excinfo
def main(args=None, plugins=None):
""" return exit code, after performing an in-process test run.
:arg args: list of command line arguments.
:arg plugins: list of plugin objects to be auto-registered during
initialization.
"""
from _pytest.main import EXIT_USAGEERROR
try:
try:
config = _prepareconfig(args, plugins)
except ConftestImportFailure as e:
exc_info = ExceptionInfo(e.excinfo)
tw = py.io.TerminalWriter(sys.stderr)
tw.line(
"ImportError while loading conftest '{e.path}'.".format(e=e), red=True
)
exc_info.traceback = exc_info.traceback.filter(filter_traceback)
exc_repr = (
exc_info.getrepr(style="short", chain=False)
if exc_info.traceback
else exc_info.exconly()
)
formatted_tb = safe_str(exc_repr)
for line in formatted_tb.splitlines():
tw.line(line.rstrip(), red=True)
return 4
else:
try:
return config.hook.pytest_cmdline_main(config=config)
finally:
config._ensure_unconfigure()
except UsageError as e:
tw = py.io.TerminalWriter(sys.stderr)
for msg in e.args:
tw.line("ERROR: {}\n".format(msg), red=True)
return EXIT_USAGEERROR
class cmdline(object): # compatibility namespace
main = staticmethod(main)
def filename_arg(path, optname):
""" Argparse type validator for filename arguments.
:path: path of filename
:optname: name of the option
"""
if os.path.isdir(path):
raise UsageError("{} must be a filename, given: {}".format(optname, path))
return path
def directory_arg(path, optname):
"""Argparse type validator for directory arguments.
:path: path of directory
:optname: name of the option
"""
if not os.path.isdir(path):
raise UsageError("{} must be a directory, given: {}".format(optname, path))
return path
default_plugins = (
"mark",
"main",
"terminal",
"runner",
"python",
"fixtures",
"debugging",
"unittest",
"capture",
"skipping",
"tmpdir",
"monkeypatch",
"recwarn",
"pastebin",
"helpconfig",
"nose",
"assertion",
"junitxml",
"resultlog",
"doctest",
"cacheprovider",
"freeze_support",
"setuponly",
"setupplan",
"stepwise",
"warnings",
"logging",
)
builtin_plugins = set(default_plugins)
builtin_plugins.add("pytester")
def get_config():
# subsequent calls to main will create a fresh instance
pluginmanager = PytestPluginManager()
config = Config(pluginmanager)
for spec in default_plugins:
pluginmanager.import_plugin(spec)
return config
def get_plugin_manager():
"""
Obtain a new instance of the
:py:class:`_pytest.config.PytestPluginManager`, with default plugins
already loaded.
This function can be used by integration with other tools, like hooking
into pytest to run tests into an IDE.
"""
return get_config().pluginmanager
def _prepareconfig(args=None, plugins=None):
warning = None
if args is None:
args = sys.argv[1:]
elif isinstance(args, py.path.local):
args = [str(args)]
elif not isinstance(args, (tuple, list)):
if not isinstance(args, str):
raise ValueError("not a string or argument list: %r" % (args,))
args = shlex.split(args, posix=sys.platform != "win32")
from _pytest import deprecated
warning = deprecated.MAIN_STR_ARGS
config = get_config()
pluginmanager = config.pluginmanager
try:
if plugins:
for plugin in plugins:
if isinstance(plugin, six.string_types):
pluginmanager.consider_pluginarg(plugin)
else:
pluginmanager.register(plugin)
if warning:
from _pytest.warnings import _issue_config_warning
_issue_config_warning(warning, config=config, stacklevel=4)
return pluginmanager.hook.pytest_cmdline_parse(
pluginmanager=pluginmanager, args=args
)
except BaseException:
config._ensure_unconfigure()
raise
class PytestPluginManager(PluginManager):
"""
Overwrites :py:class:`pluggy.PluginManager <pluggy.PluginManager>` to add pytest-specific
functionality:
* loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and
``pytest_plugins`` global variables found in plugins being loaded;
* ``conftest.py`` loading during start-up;
"""
def __init__(self):
super(PytestPluginManager, self).__init__("pytest")
self._conftest_plugins = set()
# state related to local conftest plugins
self._dirpath2confmods = {}
self._conftestpath2mod = {}
self._confcutdir = None
self._noconftest = False
self._duplicatepaths = set()
self.add_hookspecs(_pytest.hookspec)
self.register(self)
if os.environ.get("PYTEST_DEBUG"):
err = sys.stderr
encoding = getattr(err, "encoding", "utf8")
try:
err = py.io.dupfile(err, encoding=encoding)
except Exception:
pass
self.trace.root.setwriter(err.write)
self.enable_tracing()
# Config._consider_importhook will set a real object if required.
self.rewrite_hook = _pytest.assertion.DummyRewriteHook()
# Used to know when we are importing conftests after the pytest_configure stage
self._configured = False
def addhooks(self, module_or_class):
"""
.. deprecated:: 2.8
Use :py:meth:`pluggy.PluginManager.add_hookspecs <PluginManager.add_hookspecs>`
instead.
"""
warning = dict(
code="I2",
fslocation=_pytest._code.getfslineno(sys._getframe(1)),
nodeid=None,
message="use pluginmanager.add_hookspecs instead of "
"deprecated addhooks() method.",
)
self._warn(warning)
return self.add_hookspecs(module_or_class)
def parse_hookimpl_opts(self, plugin, name):
# pytest hooks are always prefixed with pytest_
# so we avoid accessing possibly non-readable attributes
# (see issue #1073)
if not name.startswith("pytest_"):
return
# ignore some historic special names which can not be hooks anyway
if name == "pytest_plugins" or name.startswith("pytest_funcarg__"):
return
method = getattr(plugin, name)
opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name)
# consider only actual functions for hooks (#3775)
if not inspect.isroutine(method):
return
# collect unmarked hooks as long as they have the `pytest_' prefix
if opts is None and name.startswith("pytest_"):
opts = {}
if opts is not None:
for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"):
opts.setdefault(name, hasattr(method, name))
return opts
def parse_hookspec_opts(self, module_or_class, name):
opts = super(PytestPluginManager, self).parse_hookspec_opts(
module_or_class, name
)
if opts is None:
method = getattr(module_or_class, name)
if name.startswith("pytest_"):
opts = {
"firstresult": hasattr(method, "firstresult"),
"historic": hasattr(method, "historic"),
}
return opts
def register(self, plugin, name=None):
if name in ["pytest_catchlog", "pytest_capturelog"]:
self._warn(
"{} plugin has been merged into the core, "
"please remove it from your requirements.".format(
name.replace("_", "-")
)
)
return
ret = super(PytestPluginManager, self).register(plugin, name)
if ret:
self.hook.pytest_plugin_registered.call_historic(
kwargs=dict(plugin=plugin, manager=self)
)
if isinstance(plugin, types.ModuleType):
self.consider_module(plugin)
return ret
def getplugin(self, name):
# support deprecated naming because plugins (xdist e.g.) use it
return self.get_plugin(name)
def hasplugin(self, name):
"""Return True if the plugin with the given name is registered."""
return bool(self.get_plugin(name))
def pytest_configure(self, config):
# XXX now that the pluginmanager exposes hookimpl(tryfirst...)
# we should remove tryfirst/trylast as markers
config.addinivalue_line(
"markers",
"tryfirst: mark a hook implementation function such that the "
"plugin machinery will try to call it first/as early as possible.",
)
config.addinivalue_line(
"markers",
"trylast: mark a hook implementation function such that the "
"plugin machinery will try to call it last/as late as possible.",
)
self._configured = True
def _warn(self, message):
kwargs = (
message
if isinstance(message, dict)
else {"code": "I1", "message": message, "fslocation": None, "nodeid": None}
)
self.hook.pytest_logwarning.call_historic(kwargs=kwargs)
#
# internal API for local conftest plugin handling
#
def _set_initial_conftests(self, namespace):
""" load initial conftest files given a preparsed "namespace".
As conftest files may add their own command line options
which have arguments ('--my-opt somepath') we might get some
false positives. All builtin and 3rd party plugins will have
been loaded, however, so common options will not confuse our logic
here.
"""
current = py.path.local()
self._confcutdir = (
current.join(namespace.confcutdir, abs=True)
if namespace.confcutdir
else None
)
self._noconftest = namespace.noconftest
self._using_pyargs = namespace.pyargs
testpaths = namespace.file_or_dir
foundanchor = False
for path in testpaths:
path = str(path)
# remove node-id syntax
i = path.find("::")
if i != -1:
path = path[:i]
anchor = current.join(path, abs=1)
if exists(anchor): # we found some file object
self._try_load_conftest(anchor)
foundanchor = True
if not foundanchor:
self._try_load_conftest(current)
def _try_load_conftest(self, anchor):
self._getconftestmodules(anchor)
# let's also consider test* subdirs
if anchor.check(dir=1):
for x in anchor.listdir("test*"):
if x.check(dir=1):
self._getconftestmodules(x)
@lru_cache(maxsize=128)
def _getconftestmodules(self, path):
if self._noconftest:
return []
if path.isfile():
directory = path.dirpath()
else:
directory = path
if six.PY2: # py2 is not using lru_cache.
try:
return self._dirpath2confmods[directory]
except KeyError:
pass
# XXX these days we may rather want to use config.rootdir
# and allow users to opt into looking into the rootdir parent
# directories instead of requiring to specify confcutdir
clist = []
for parent in directory.realpath().parts():
if self._confcutdir and self._confcutdir.relto(parent):
continue
conftestpath = parent.join("conftest.py")
if conftestpath.isfile():
mod = self._importconftest(conftestpath)
clist.append(mod)
self._dirpath2confmods[directory] = clist
return clist
def _rget_with_confmod(self, name, path):
modules = self._getconftestmodules(path)
for mod in reversed(modules):
try:
return mod, getattr(mod, name)
except AttributeError:
continue
raise KeyError(name)
def _importconftest(self, conftestpath):
try:
return self._conftestpath2mod[conftestpath]
except KeyError:
pkgpath = conftestpath.pypkgpath()
if pkgpath is None:
_ensure_removed_sysmodule(conftestpath.purebasename)
try:
mod = conftestpath.pyimport()
if (
hasattr(mod, "pytest_plugins")
and self._configured
and not self._using_pyargs
):
from _pytest.deprecated import (
PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
)
warnings.warn_explicit(
PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST,
category=None,
filename=str(conftestpath),
lineno=0,
)
except Exception:
raise ConftestImportFailure(conftestpath, sys.exc_info())
self._conftest_plugins.add(mod)
self._conftestpath2mod[conftestpath] = mod
dirpath = conftestpath.dirpath()
if dirpath in self._dirpath2confmods:
for path, mods in self._dirpath2confmods.items():
if path and path.relto(dirpath) or path == dirpath:
assert mod not in mods
mods.append(mod)
self.trace("loaded conftestmodule %r" % (mod))
self.consider_conftest(mod)
return mod
#
# API for bootstrapping plugin loading
#
#
def consider_preparse(self, args):
for opt1, opt2 in zip(args, args[1:]):
if opt1 == "-p":
self.consider_pluginarg(opt2)
def consider_pluginarg(self, arg):
if arg.startswith("no:"):
name = arg[3:]
# PR #4304 : remove stepwise if cacheprovider is blocked
if name == "cacheprovider":
self.set_blocked("stepwise")
self.set_blocked("pytest_stepwise")
self.set_blocked(name)
if not name.startswith("pytest_"):
self.set_blocked("pytest_" + name)
else:
self.import_plugin(arg)
def consider_conftest(self, conftestmodule):
self.register(conftestmodule, name=conftestmodule.__file__)
def consider_env(self):
self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS"))
def consider_module(self, mod):
self._import_plugin_specs(getattr(mod, "pytest_plugins", []))
def _import_plugin_specs(self, spec):
plugins = _get_plugin_specs_as_list(spec)
for import_spec in plugins:
self.import_plugin(import_spec)
def import_plugin(self, modname):
# most often modname refers to builtin modules, e.g. "pytester",
# "terminal" or "capture". Those plugins are registered under their
# basename for historic purposes but must be imported with the
# _pytest prefix.
assert isinstance(modname, (six.text_type, str)), (
"module name as text required, got %r" % modname
)
modname = str(modname)
if self.is_blocked(modname) or self.get_plugin(modname) is not None:
return
if modname in builtin_plugins:
importspec = "_pytest." + modname
else:
importspec = modname
self.rewrite_hook.mark_rewrite(importspec)
try:
__import__(importspec)
except ImportError as e:
new_exc_type = ImportError
new_exc_message = 'Error importing plugin "%s": %s' % (
modname,
safe_str(e.args[0]),
)
new_exc = new_exc_type(new_exc_message)
six.reraise(new_exc_type, new_exc, sys.exc_info()[2])
except Skipped as e:
self._warn("skipped plugin %r: %s" % ((modname, e.msg)))
else:
mod = sys.modules[importspec]
self.register(mod, modname)
def _get_plugin_specs_as_list(specs):
"""
Parses a list of "plugin specs" and returns a list of plugin names.
Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in
which case it is returned as a list. Specs can also be `None` in which case an
empty list is returned.
"""
if specs is not None:
if isinstance(specs, str):
specs = specs.split(",") if specs else []
if not isinstance(specs, (list, tuple)):
raise UsageError(
"Plugin specs must be a ','-separated string or a "
"list/tuple of strings for plugin names. Given: %r" % specs
)
return list(specs)
return []
def _ensure_removed_sysmodule(modname):
try:
del sys.modules[modname]
except KeyError:
pass
class Notset(object):
def __repr__(self):
return "<NOTSET>"
notset = Notset()
def _iter_rewritable_modules(package_files):
for fn in package_files:
is_simple_module = "/" not in fn and fn.endswith(".py")
is_package = fn.count("/") == 1 and fn.endswith("__init__.py")
if is_simple_module:
module_name, _ = os.path.splitext(fn)
yield module_name
elif is_package:
package_name = os.path.dirname(fn)
yield package_name
class Config(object):
""" access to configuration values, pluginmanager and plugin hooks. """
def __init__(self, pluginmanager):
#: access to command line option as attributes.
#: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead
self.option = argparse.Namespace()
from .argparsing import Parser, FILE_OR_DIR
_a = FILE_OR_DIR
self._parser = Parser(
usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a),
processopt=self._processopt,
)
#: a pluginmanager instance
self.pluginmanager = pluginmanager
self.trace = self.pluginmanager.trace.root.get("config")
self.hook = self.pluginmanager.hook
self._inicache = {}
self._override_ini = ()
self._opt2dest = {}
self._cleanup = []
self._warn = self.pluginmanager._warn
self.pluginmanager.register(self, "pytestconfig")
self._configured = False
def do_setns(dic):
import pytest
setns(pytest, dic)
self.hook.pytest_namespace.call_historic(do_setns, {})
self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser))
def add_cleanup(self, func):
""" Add a function to be called when the config object gets out of
use (usually coninciding with pytest_unconfigure)."""
self._cleanup.append(func)
def _do_configure(self):
assert not self._configured
self._configured = True
self.hook.pytest_configure.call_historic(kwargs=dict(config=self))
def _ensure_unconfigure(self):
if self._configured:
self._configured = False
self.hook.pytest_unconfigure(config=self)
self.hook.pytest_configure._call_history = []
while self._cleanup:
fin = self._cleanup.pop()
fin()
def warn(self, code, message, fslocation=None, nodeid=None):
"""
.. deprecated:: 3.8
Use :py:func:`warnings.warn` or :py:func:`warnings.warn_explicit` directly instead.
Generate a warning for this test session.
"""
from _pytest.warning_types import RemovedInPytest4Warning
if isinstance(fslocation, (tuple, list)) and len(fslocation) > 2:
filename, lineno = fslocation[:2]
else:
filename = "unknown file"
lineno = 0
msg = "config.warn has been deprecated, use warnings.warn instead"
if nodeid:
msg = "{}: {}".format(nodeid, msg)
warnings.warn_explicit(
RemovedInPytest4Warning(msg),
category=None,
filename=filename,
lineno=lineno,
)
self.hook.pytest_logwarning.call_historic(
kwargs=dict(
code=code, message=message, fslocation=fslocation, nodeid=nodeid
)
)
def get_terminal_writer(self):
return self.pluginmanager.get_plugin("terminalreporter")._tw
def pytest_cmdline_parse(self, pluginmanager, args):
# REF1 assert self == pluginmanager.config, (self, pluginmanager.config)
self.parse(args)
return self
def notify_exception(self, excinfo, option=None):
if option and option.fulltrace:
style = "long"
else:
style = "native"
excrepr = excinfo.getrepr(
funcargs=True, showlocals=getattr(option, "showlocals", False), style=style
)
res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo)
if not any(res):
for line in str(excrepr).split("\n"):
sys.stderr.write("INTERNALERROR> %s\n" % line)
sys.stderr.flush()
def cwd_relative_nodeid(self, nodeid):
# nodeid's are relative to the rootpath, compute relative to cwd
if self.invocation_dir != self.rootdir:
fullpath = self.rootdir.join(nodeid)
nodeid = self.invocation_dir.bestrelpath(fullpath)
return nodeid
@classmethod
def fromdictargs(cls, option_dict, args):
""" constructor useable for subprocesses. """
config = get_config()
config.option.__dict__.update(option_dict)
config.parse(args, addopts=False)
for x in config.option.plugins:
config.pluginmanager.consider_pluginarg(x)
return config
def _processopt(self, opt):
for name in opt._short_opts + opt._long_opts:
self._opt2dest[name] = opt.dest
if hasattr(opt, "default") and opt.dest:
if not hasattr(self.option, opt.dest):
setattr(self.option, opt.dest, opt.default)
@hookimpl(trylast=True)
def pytest_load_initial_conftests(self, early_config):
self.pluginmanager._set_initial_conftests(early_config.known_args_namespace)
def _initini(self, args):
ns, unknown_args = self._parser.parse_known_and_unknown_args(
args, namespace=copy.copy(self.option)
)
r = determine_setup(
ns.inifilename,
ns.file_or_dir + unknown_args,
rootdir_cmd_arg=ns.rootdir or None,
config=self,
)
self.rootdir, self.inifile, self.inicfg = r
self._parser.extra_info["rootdir"] = self.rootdir
self._parser.extra_info["inifile"] = self.inifile
self.invocation_dir = py.path.local()
self._parser.addini("addopts", "extra command line options", "args")
self._parser.addini("minversion", "minimally required pytest version")
self._override_ini = ns.override_ini or ()
def _consider_importhook(self, args):
"""Install the PEP 302 import hook if using assertion rewriting.
Needs to parse the --assert=<mode> option from the commandline
and find all the installed plugins to mark them for rewriting
by the importhook.
"""
ns, unknown_args = self._parser.parse_known_and_unknown_args(args)
mode = ns.assertmode
if mode == "rewrite":
try:
hook = _pytest.assertion.install_importhook(self)
except SystemError:
mode = "plain"
else:
self._mark_plugins_for_rewrite(hook)
_warn_about_missing_assertion(mode)
def _mark_plugins_for_rewrite(self, hook):
"""
Given an importhook, mark for rewrite any top-level
modules or packages in the distribution package for
all pytest plugins.
"""
import pkg_resources
self.pluginmanager.rewrite_hook = hook
if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"):
# We don't autoload from setuptools entry points, no need to continue.
return
# 'RECORD' available for plugins installed normally (pip install)
# 'SOURCES.txt' available for plugins installed in dev mode (pip install -e)
# for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa
# so it shouldn't be an issue
metadata_files = "RECORD", "SOURCES.txt"
package_files = (
entry.split(",")[0]
for entrypoint in pkg_resources.iter_entry_points("pytest11")
for metadata in metadata_files
for entry in entrypoint.dist._get_metadata(metadata)
)
for name in _iter_rewritable_modules(package_files):
hook.mark_rewrite(name)
def _validate_args(self, args):
"""Validate known args."""
self._parser.parse_known_and_unknown_args(
args, namespace=copy.copy(self.option)
)
return args
def _preparse(self, args, addopts=True):
if addopts:
env_addopts = os.environ.get("PYTEST_ADDOPTS", "")
if len(env_addopts):
args[:] = self._validate_args(shlex.split(env_addopts)) + args
self._initini(args)
if addopts:
args[:] = self._validate_args(self.getini("addopts")) + args
self._checkversion()
self._consider_importhook(args)
self.pluginmanager.consider_preparse(args)
if not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"):
# Don't autoload from setuptools entry point. Only explicitly specified
# plugins are going to be loaded.
self.pluginmanager.load_setuptools_entrypoints("pytest11")
self.pluginmanager.consider_env()
self.known_args_namespace = ns = self._parser.parse_known_args(
args, namespace=copy.copy(self.option)
)
if self.known_args_namespace.confcutdir is None and self.inifile:
confcutdir = py.path.local(self.inifile).dirname
self.known_args_namespace.confcutdir = confcutdir
try:
self.hook.pytest_load_initial_conftests(
early_config=self, args=args, parser=self._parser
)
except ConftestImportFailure:
e = sys.exc_info()[1]
if ns.help or ns.version:
# we don't want to prevent --help/--version to work
# so just let is pass and print a warning at the end
self._warn("could not load initial conftests (%s)\n" % e.path)
else:
raise
def _checkversion(self):
import pytest
minver = self.inicfg.get("minversion", None)
if minver:
if parse_version(minver) > parse_version(pytest.__version__):
raise pytest.UsageError(
"%s:%d: requires pytest-%s, actual pytest-%s'"
% (
self.inicfg.config.path,
self.inicfg.lineof("minversion"),
minver,
pytest.__version__,
)
)
def parse(self, args, addopts=True):
# parse given cmdline arguments into this config object.
assert not hasattr(
self, "args"
), "can only parse cmdline args at most once per Config object"
self._origargs = args
self.hook.pytest_addhooks.call_historic(
kwargs=dict(pluginmanager=self.pluginmanager)
)
self._preparse(args, addopts=addopts)
# XXX deprecated hook:
self.hook.pytest_cmdline_preparse(config=self, args=args)
self._parser.after_preparse = True
try:
args = self._parser.parse_setoption(
args, self.option, namespace=self.option
)
if not args:
if self.invocation_dir == self.rootdir:
args = self.getini("testpaths")
if not args:
args = [str(self.invocation_dir)]
self.args = args
except PrintHelp:
pass
def addinivalue_line(self, name, line):
""" add a line to an ini-file option. The option must have been
declared but might not yet be set in which case the line becomes the
the first line in its value. """
x = self.getini(name)
assert isinstance(x, list)
x.append(line) # modifies the cached list inline
def getini(self, name):
""" return configuration value from an :ref:`ini file <inifiles>`. If the
specified name hasn't been registered through a prior
:py:func:`parser.addini <_pytest.config.Parser.addini>`
call (usually from a plugin), a ValueError is raised. """
try:
return self._inicache[name]
except KeyError:
self._inicache[name] = val = self._getini(name)
return val
def _getini(self, name):
try:
description, type, default = self._parser._inidict[name]
except KeyError:
raise ValueError("unknown configuration value: %r" % (name,))
value = self._get_override_ini_value(name)
if value is None:
try:
value = self.inicfg[name]
except KeyError:
if default is not None:
return default
if type is None:
return ""
return []
if type == "pathlist":
dp = py.path.local(self.inicfg.config.path).dirpath()
values = []
for relpath in shlex.split(value):
values.append(dp.join(relpath, abs=True))
return values
elif type == "args":
return shlex.split(value)
elif type == "linelist":
return [t for t in map(lambda x: x.strip(), value.split("\n")) if t]
elif type == "bool":
return bool(_strtobool(value.strip()))
else:
assert type is None
return value
def _getconftest_pathlist(self, name, path):
try:
mod, relroots = self.pluginmanager._rget_with_confmod(name, path)
except KeyError:
return None
modpath = py.path.local(mod.__file__).dirpath()
values = []
for relroot in relroots:
if not isinstance(relroot, py.path.local):
relroot = relroot.replace("/", py.path.local.sep)
relroot = modpath.join(relroot, abs=True)
values.append(relroot)
return values
def _get_override_ini_value(self, name):
value = None
# override_ini is a list of "ini=value" options
# always use the last item if multiple values are set for same ini-name,
# e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2
for ini_config in self._override_ini:
try:
key, user_ini_value = ini_config.split("=", 1)
except ValueError:
raise UsageError("-o/--override-ini expects option=value style.")
else:
if key == name:
value = user_ini_value
return value
def getoption(self, name, default=notset, skip=False):
""" return command line option value.
:arg name: name of the option. You may also specify
the literal ``--OPT`` option instead of the "dest" option name.
:arg default: default value if no option of that name exists.
:arg skip: if True raise pytest.skip if option does not exists
or has a None value.
"""
name = self._opt2dest.get(name, name)
try:
val = getattr(self.option, name)
if val is None and skip:
raise AttributeError(name)
return val
except AttributeError:
if default is not notset:
return default
if skip:
import pytest
pytest.skip("no %r option found" % (name,))
raise ValueError("no option named %r" % (name,))
def getvalue(self, name, path=None):
""" (deprecated, use getoption()) """
return self.getoption(name)
def getvalueorskip(self, name, path=None):
""" (deprecated, use getoption(skip=True)) """
return self.getoption(name, skip=True)
def _assertion_supported():
try:
assert False
except AssertionError:
return True
else:
return False
def _warn_about_missing_assertion(mode):
if not _assertion_supported():
if mode == "plain":
sys.stderr.write(
"WARNING: ASSERTIONS ARE NOT EXECUTED"
" and FAILING TESTS WILL PASS. Are you"
" using python -O?"
)
else:
sys.stderr.write(
"WARNING: assertions not in test modules or"
" plugins will be ignored"
" because assert statements are not executed "
"by the underlying Python interpreter "
"(are you using python -O?)\n"
)
def setns(obj, dic):
import pytest
for name, value in dic.items():
if isinstance(value, dict):
mod = getattr(obj, name, None)
if mod is None:
modname = "pytest.%s" % name
mod = types.ModuleType(modname)
sys.modules[modname] = mod
mod.__all__ = []
setattr(obj, name, mod)
obj.__all__.append(name)
setns(mod, value)
else:
setattr(obj, name, value)
obj.__all__.append(name)
# if obj != pytest:
# pytest.__all__.append(name)
setattr(pytest, name, value)
def create_terminal_writer(config, *args, **kwargs):
"""Create a TerminalWriter instance configured according to the options
in the config object. Every code which requires a TerminalWriter object
and has access to a config object should use this function.
"""
tw = py.io.TerminalWriter(*args, **kwargs)
if config.option.color == "yes":
tw.hasmarkup = True
if config.option.color == "no":
tw.hasmarkup = False
return tw
def _strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
.. note:: copied from distutils.util
"""
val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"):
return 1
elif val in ("n", "no", "f", "false", "off", "0"):
return 0
else:
raise ValueError("invalid truth value %r" % (val,))
| []
| []
| [
"PYTEST_ADDOPTS",
"PYTEST_DEBUG",
"PYTEST_DISABLE_PLUGIN_AUTOLOAD",
"PYTEST_PLUGINS"
]
| [] | ["PYTEST_ADDOPTS", "PYTEST_DEBUG", "PYTEST_DISABLE_PLUGIN_AUTOLOAD", "PYTEST_PLUGINS"] | python | 4 | 0 | |
tasks.py | import os
import sys
import datetime
import json
import re
import time
import zipfile
import threading
import hashlib
import shutil
import subprocess
import pprint
from invoke import task
import boto3
import botocore.exceptions
import multiprocessing
import io
import ai2thor.build
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s [%(process)d] %(funcName)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
def add_files(zipf, start_dir):
for root, dirs, files in os.walk(start_dir):
for f in files:
fn = os.path.join(root, f)
arcname = os.path.relpath(fn, start_dir)
# print("adding %s" % arcname)
zipf.write(fn, arcname)
def push_build(build_archive_name, zip_data, include_private_scenes):
import boto3
# subprocess.run("ls %s" % build_archive_name, shell=True)
# subprocess.run("gsha256sum %s" % build_archive_name)
s3 = boto3.resource("s3")
acl = "public-read"
bucket = ai2thor.build.PUBLIC_S3_BUCKET
if include_private_scenes:
bucket = ai2thor.build.PRIVATE_S3_BUCKET
acl = "private"
archive_base = os.path.basename(build_archive_name)
key = "builds/%s" % (archive_base,)
sha256_key = "builds/%s.sha256" % (os.path.splitext(archive_base)[0],)
s3.Object(bucket, key).put(Body=zip_data, ACL=acl)
s3.Object(bucket, sha256_key).put(
Body=hashlib.sha256(zip_data).hexdigest(), ACL=acl, ContentType="text/plain"
)
logger.info("pushed build %s to %s" % (bucket, build_archive_name))
def _webgl_local_build_path(prefix, source_dir="builds"):
return os.path.join(
os.getcwd(), "unity/{}/thor-{}-WebGL/".format(source_dir, prefix)
)
def _unity_version():
import yaml
with open("unity/ProjectSettings/ProjectVersion.txt") as pf:
project_version = yaml.load(pf.read(), Loader=yaml.FullLoader)
return project_version["m_EditorVersion"]
def _unity_path():
unity_version = _unity_version()
standalone_path = None
if sys.platform.startswith("darwin"):
unity_hub_path = (
"/Applications/Unity/Hub/Editor/{}/Unity.app/Contents/MacOS/Unity".format(
unity_version
)
)
standalone_path = (
"/Applications/Unity-{}/Unity.app/Contents/MacOS/Unity".format(
unity_version
)
)
elif "win" in sys.platform:
unity_hub_path = "C:/PROGRA~1/Unity/Hub/Editor/{}/Editor/Unity.exe".format(
unity_version
)
# TODO: Verify windows unity standalone path
standalone_path = "C:/PROGRA~1/{}/Editor/Unity.exe".format(unity_version)
elif sys.platform.startswith("linux"):
unity_hub_path = "{}/Unity/Hub/Editor/{}/Editor/Unity".format(
os.environ["HOME"], unity_version
)
if standalone_path and os.path.exists(standalone_path):
unity_path = standalone_path
else:
unity_path = unity_hub_path
return unity_path
def _build(unity_path, arch, build_dir, build_name, env={}):
import yaml
project_path = os.path.join(os.getcwd(), unity_path)
command = (
"%s -quit -batchmode -logFile %s.log -projectpath %s -executeMethod Build.%s"
% (_unity_path(), build_name, project_path, arch)
)
target_path = os.path.join(build_dir, build_name)
full_env = os.environ.copy()
full_env.update(env)
full_env["UNITY_BUILD_NAME"] = target_path
result_code = subprocess.check_call(command, shell=True, env=full_env)
print("Exited with code {}".format(result_code))
success = result_code == 0
if success:
generate_build_metadata(os.path.join(project_path, build_dir, "metadata.json"))
return success
def generate_build_metadata(metadata_path):
# this server_types metadata is maintained
# to allow future versions of the Python API
# to launch older versions of the Unity build
# and know whether the Fifo server is available
server_types = ["WSGI"]
try:
import ai2thor.fifo_server
server_types.append("FIFO")
except Exception as e:
pass
with open(os.path.join(metadata_path), "w") as f:
f.write(json.dumps(dict(server_types=server_types)))
def class_dataset_images_for_scene(scene_name):
import ai2thor.controller
from itertools import product
from collections import defaultdict
import numpy as np
import cv2
env = ai2thor.controller.Controller(quality="Low")
player_size = 300
zoom_size = 1000
target_size = 256
rotations = [0, 90, 180, 270]
horizons = [330, 0, 30]
buffer = 15
# object must be at least 40% in view
min_size = ((target_size * 0.4) / zoom_size) * player_size
env.start(width=player_size, height=player_size)
env.reset(scene_name)
event = env.step(
dict(
action="Initialize",
gridSize=0.25,
renderInstanceSegmentation=True,
renderSemanticSegmentation=False,
renderImage=False,
)
)
for o in event.metadata["objects"]:
if o["receptacle"] and o["receptacleObjectIds"] and o["openable"]:
print("opening %s" % o["objectId"])
env.step(
dict(action="OpenObject", objectId=o["objectId"], forceAction=True)
)
event = env.step(dict(action="GetReachablePositions", gridSize=0.25))
visible_object_locations = []
for point in event.metadata["actionReturn"]:
for rot, hor in product(rotations, horizons):
exclude_colors = set(
map(tuple, np.unique(event.instance_segmentation_frame[0], axis=0))
)
exclude_colors.update(
set(
map(
tuple,
np.unique(event.instance_segmentation_frame[:, -1, :], axis=0),
)
)
)
exclude_colors.update(
set(
map(tuple, np.unique(event.instance_segmentation_frame[-1], axis=0))
)
)
exclude_colors.update(
set(
map(
tuple,
np.unique(event.instance_segmentation_frame[:, 0, :], axis=0),
)
)
)
event = env.step(
dict(
action="TeleportFull",
x=point["x"],
y=point["y"],
z=point["z"],
rotation=rot,
horizon=hor,
forceAction=True,
),
raise_for_failure=True,
)
visible_objects = []
for o in event.metadata["objects"]:
if o["visible"] and o["objectId"] and o["pickupable"]:
color = event.object_id_to_color[o["objectId"]]
mask = (
(event.instance_segmentation_frame[:, :, 0] == color[0])
& (event.instance_segmentation_frame[:, :, 1] == color[1])
& (event.instance_segmentation_frame[:, :, 2] == color[2])
)
points = np.argwhere(mask)
if len(points) > 0:
min_y = int(np.min(points[:, 0]))
max_y = int(np.max(points[:, 0]))
min_x = int(np.min(points[:, 1]))
max_x = int(np.max(points[:, 1]))
max_dim = max((max_y - min_y), (max_x - min_x))
if (
max_dim > min_size
and min_y > buffer
and min_x > buffer
and max_x < (player_size - buffer)
and max_y < (player_size - buffer)
):
visible_objects.append(
dict(
objectId=o["objectId"],
min_x=min_x,
min_y=min_y,
max_x=max_x,
max_y=max_y,
)
)
print(
"[%s] including object id %s %s"
% (scene_name, o["objectId"], max_dim)
)
if visible_objects:
visible_object_locations.append(
dict(point=point, rot=rot, hor=hor, visible_objects=visible_objects)
)
env.stop()
env = ai2thor.controller.Controller()
env.start(width=zoom_size, height=zoom_size)
env.reset(scene_name)
event = env.step(dict(action="Initialize", gridSize=0.25))
for o in event.metadata["objects"]:
if o["receptacle"] and o["receptacleObjectIds"] and o["openable"]:
print("opening %s" % o["objectId"])
env.step(
dict(action="OpenObject", objectId=o["objectId"], forceAction=True)
)
for vol in visible_object_locations:
point = vol["point"]
event = env.step(
dict(
action="TeleportFull",
x=point["x"],
y=point["y"],
z=point["z"],
rotation=vol["rot"],
horizon=vol["hor"],
forceAction=True,
),
raise_for_failure=True,
)
for v in vol["visible_objects"]:
object_id = v["objectId"]
min_y = int(round(v["min_y"] * (zoom_size / player_size)))
max_y = int(round(v["max_y"] * (zoom_size / player_size)))
max_x = int(round(v["max_x"] * (zoom_size / player_size)))
min_x = int(round(v["min_x"] * (zoom_size / player_size)))
delta_y = max_y - min_y
delta_x = max_x - min_x
scaled_target_size = max(delta_x, delta_y, target_size) + buffer * 2
if min_x > (zoom_size - max_x):
start_x = min_x - (scaled_target_size - delta_x)
end_x = max_x + buffer
else:
end_x = max_x + (scaled_target_size - delta_x)
start_x = min_x - buffer
if min_y > (zoom_size - max_y):
start_y = min_y - (scaled_target_size - delta_y)
end_y = max_y + buffer
else:
end_y = max_y + (scaled_target_size - delta_y)
start_y = min_y - buffer
# print("max x %s max y %s min x %s min y %s" % (max_x, max_y, min_x, min_y))
# print("start x %s start_y %s end_x %s end y %s" % (start_x, start_y, end_x, end_y))
print("storing %s " % object_id)
img = event.cv2img[start_y:end_y, start_x:end_x, :]
dst = cv2.resize(
img, (target_size, target_size), interpolation=cv2.INTER_LANCZOS4
)
object_type = object_id.split("|")[0].lower()
target_dir = os.path.join("images", scene_name, object_type)
h = hashlib.md5()
h.update(json.dumps(point, sort_keys=True).encode("utf8"))
h.update(json.dumps(v, sort_keys=True).encode("utf8"))
os.makedirs(target_dir, exist_ok=True)
cv2.imwrite(os.path.join(target_dir, h.hexdigest() + ".png"), dst)
env.stop()
return scene_name
@task
def build_class_dataset(context):
import concurrent.futures
import ai2thor.controller
multiprocessing.set_start_method("spawn")
controller = ai2thor.controller.Controller()
executor = concurrent.futures.ProcessPoolExecutor(max_workers=4)
futures = []
for scene in controller.scene_names():
print("processing scene %s" % scene)
futures.append(executor.submit(class_dataset_images_for_scene, scene))
for f in concurrent.futures.as_completed(futures):
scene = f.result()
print("scene name complete: %s" % scene)
def local_build_name(prefix, arch):
return "thor-%s-%s" % (prefix, arch)
@task
def local_build_test(context, prefix="local", arch="OSXIntel64"):
from ai2thor.tests.constants import TEST_SCENE
local_build(context, prefix, arch, [TEST_SCENE])
@task(iterable=["scenes"])
def local_build(
context, prefix="local", arch="OSXIntel64", scenes=None, scripts_only=False
):
import ai2thor.controller
build = ai2thor.build.Build(arch, prefix, False)
env = dict()
if os.path.isdir("unity/Assets/Private/Scenes"):
env["INCLUDE_PRIVATE_SCENES"] = "true"
build_dir = os.path.join("builds", build.name)
if scripts_only:
env["BUILD_SCRIPTS_ONLY"] = "true"
if scenes:
env["BUILD_SCENES"] = ",".join(
map(ai2thor.controller.Controller.normalize_scene, scenes)
)
if _build("unity", arch, build_dir, build.name, env=env):
print("Build Successful")
else:
print("Build Failure")
generate_quality_settings(context)
def fix_webgl_unity_loader_regex(unity_loader_path):
# Bug in the UnityLoader.js causes Chrome on Big Sur to fail to load
# https://issuetracker.unity3d.com/issues/unity-webgl-builds-do-not-run-on-macos-big-sur
with open(unity_loader_path) as f:
loader = f.read()
loader = loader.replace("Mac OS X (10[\.\_\d]+)", "Mac OS X (1[\.\_\d][\.\_\d]+)")
with open(unity_loader_path, "w") as f:
f.write(loader)
@task
def webgl_build(
context,
scenes="",
room_ranges=None,
directory="builds",
prefix="local",
verbose=False,
content_addressable=False,
crowdsource_build=False,
):
"""
Creates a WebGL build
:param context:
:param scenes: String of scenes to include in the build as a comma separated list
:param prefix: Prefix name for the build
:param content_addressable: Whether to change the unityweb build files to be content-addressable
have their content hashes as part of their names.
:return:
"""
from functools import reduce
def file_to_content_addressable(file_path, json_metadata_file_path, json_key):
# name_split = os.path.splitext(file_path)
path_split = os.path.split(file_path)
directory = path_split[0]
file_name = path_split[1]
print("File name {} ".format(file_name))
with open(file_path, "rb") as f:
h = hashlib.md5()
h.update(f.read())
md5_id = h.hexdigest()
new_file_name = "{}_{}".format(md5_id, file_name)
os.rename(file_path, os.path.join(directory, new_file_name))
with open(json_metadata_file_path, "r+") as f:
unity_json = json.load(f)
print("UNITY json {}".format(unity_json))
unity_json[json_key] = new_file_name
print("UNITY L {}".format(unity_json))
f.seek(0)
json.dump(unity_json, f, indent=4)
arch = "WebGL"
build_name = local_build_name(prefix, arch)
if room_ranges is not None:
floor_plans = [
"FloorPlan{}_physics".format(i)
for i in reduce(
lambda x, y: x + y,
map(
lambda x: x + [x[-1] + 1],
[
list(range(*tuple(int(y) for y in x.split("-"))))
for x in room_ranges.split(",")
],
),
)
]
scenes = ",".join(floor_plans)
if verbose:
print(scenes)
env = dict(BUILD_SCENES=scenes)
if crowdsource_build:
env["DEFINES"] = "CROWDSOURCE_TASK"
if _build("unity", arch, directory, build_name, env=env):
print("Build Successful")
else:
print("Build Failure")
build_path = _webgl_local_build_path(prefix, directory)
fix_webgl_unity_loader_regex(os.path.join(build_path, "Build/UnityLoader.js"))
generate_quality_settings(context)
# the remainder of this is only used to generate scene metadata, but it
# is not part of building webgl player
rooms = {
"kitchens": {"name": "Kitchens", "roomRanges": range(1, 31)},
"livingRooms": {"name": "Living Rooms", "roomRanges": range(201, 231)},
"bedrooms": {"name": "Bedrooms", "roomRanges": range(301, 331)},
"bathrooms": {"name": "Bathrooms", "roomRanges": range(401, 431)},
"foyers": {"name": "Foyers", "roomRanges": range(501, 531)},
}
room_type_by_id = {}
for room_type, room_data in rooms.items():
for room_num in room_data["roomRanges"]:
room_id = "FloorPlan{}_physics".format(room_num)
room_type_by_id[room_id] = {"type": room_type, "name": room_data["name"]}
scene_metadata = {}
for scene_name in scenes.split(","):
if scene_name not in room_type_by_id:
# allows for arbitrary scenes to be included dynamically
room_type = {"type": "Other", "name": None}
else:
room_type = room_type_by_id[scene_name]
if room_type["type"] not in scene_metadata:
scene_metadata[room_type["type"]] = {
"scenes": [],
"name": room_type["name"],
}
scene_metadata[room_type["type"]]["scenes"].append(scene_name)
if verbose:
print(scene_metadata)
to_content_addressable = [
("{}.data.unityweb".format(build_name), "dataUrl"),
("{}.wasm.code.unityweb".format(build_name), "wasmCodeUrl"),
("{}.wasm.framework.unityweb".format(build_name), "wasmFrameworkUrl"),
]
for file_name, key in to_content_addressable:
file_to_content_addressable(
os.path.join(build_path, "Build/{}".format(file_name)),
os.path.join(build_path, "Build/{}.json".format(build_name)),
key,
)
with open(os.path.join(build_path, "scenes.json"), "w") as f:
f.write(json.dumps(scene_metadata, sort_keys=False, indent=4))
@task
def generate_quality_settings(ctx):
import yaml
class YamlUnity3dTag(yaml.SafeLoader):
def let_through(self, node):
return self.construct_mapping(node)
YamlUnity3dTag.add_constructor(
"tag:unity3d.com,2011:47", YamlUnity3dTag.let_through
)
qs = yaml.load(
open("unity/ProjectSettings/QualitySettings.asset").read(),
Loader=YamlUnity3dTag,
)
quality_settings = {}
default = "Ultra"
for i, q in enumerate(qs["QualitySettings"]["m_QualitySettings"]):
quality_settings[q["name"]] = i
assert default in quality_settings
with open("ai2thor/_quality_settings.py", "w") as f:
f.write("# GENERATED FILE - DO NOT EDIT\n")
f.write("DEFAULT_QUALITY = '%s'\n" % default)
f.write("QUALITY_SETTINGS = " + pprint.pformat(quality_settings))
def git_commit_comment():
comment = (
subprocess.check_output("git log -n 1 --format=%B", shell=True)
.decode("utf8")
.strip()
)
return comment
def git_commit_id():
commit_id = (
subprocess.check_output("git log -n 1 --format=%H", shell=True)
.decode("ascii")
.strip()
)
return commit_id
@task
def deploy_pip(context):
if "TWINE_PASSWORD" not in os.environ:
raise Exception("Twine token not specified in environment")
subprocess.check_call("twine upload -u __token__ dist/*", shell=True)
@task
def push_pip_commit(context):
import glob
commit_id = git_commit_id()
s3 = boto3.resource("s3")
for g in glob.glob("dist/ai2thor-0+%s*" % commit_id):
acl = "public-read"
pip_name = os.path.basename(g)
logger.info("pushing pip file %s" % g)
with open(g, "rb") as f:
s3.Object(
ai2thor.build.PYPI_S3_BUCKET, os.path.join("ai2thor", pip_name)
).put(Body=f, ACL=acl)
@task
def build_pip_commit(context):
commit_id = git_commit_id()
if os.path.isdir("dist"):
shutil.rmtree("dist")
generate_quality_settings(context)
# must use this form to create valid PEP440 version specifier
version = "0+" + commit_id
with open("ai2thor/_builds.py", "w") as fi:
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("COMMIT_ID = '%s'\n" % commit_id)
with open("ai2thor/_version.py", "w") as fi:
fi.write("# Copyright Allen Institute for Artificial Intelligence 2021\n")
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("__version__ = '%s'\n" % (version))
subprocess.check_call("python setup.py clean --all", shell=True)
subprocess.check_call("python setup.py sdist bdist_wheel --universal", shell=True)
@task
def build_pip(context, version):
import xml.etree.ElementTree as ET
import requests
res = requests.get("https://pypi.org/rss/project/ai2thor/releases.xml")
res.raise_for_status()
root = ET.fromstring(res.content)
latest_version = None
for title in root.findall("./channel/item/title"):
latest_version = title.text
break
# make sure that the tag is on this commit
commit_tags = (
subprocess.check_output("git tag --points-at", shell=True)
.decode("ascii")
.strip()
.split("\n")
)
if version not in commit_tags:
raise Exception("tag %s is not on current commit" % version)
commit_id = git_commit_id()
res = requests.get("https://api.github.com/repos/allenai/ai2thor/commits?sha=main")
res.raise_for_status()
if commit_id not in map(lambda c: c["sha"], res.json()):
raise Exception("tag %s is not off the main branch" % version)
if not re.match(r"^[0-9]{1,3}\.+[0-9]{1,3}\.[0-9]{1,3}$", version):
raise Exception("invalid version: %s" % version)
for plat in ai2thor.build.AUTO_BUILD_PLATFORMS:
commit_build = ai2thor.build.Build(plat, commit_id, False)
if not commit_build.exists():
raise Exception("Build does not exist for %s/%s" % (commit_id, plat.name()))
current_maj, current_min, current_sub = list(map(int, latest_version.split(".")))
next_maj, next_min, next_sub = list(map(int, version.split(".")))
if (
(next_maj == current_maj + 1)
or (next_maj == current_maj and next_min == current_min + 1)
or (
next_maj == current_maj
and next_min == current_min
and next_sub >= current_sub + 1
)
):
if os.path.isdir("dist"):
shutil.rmtree("dist")
generate_quality_settings(context)
with open("ai2thor/_builds.py", "w") as fi:
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("COMMIT_ID = '%s'\n" % commit_id)
with open("ai2thor/_version.py", "w") as fi:
fi.write("# Copyright Allen Institute for Artificial Intelligence 2021\n")
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("__version__ = '%s'\n" % (version))
subprocess.check_call("python setup.py clean --all", shell=True)
subprocess.check_call(
"python setup.py sdist bdist_wheel --universal", shell=True
)
else:
raise Exception(
"Invalid version increment: new version=%s,current version=%s; must increment the major, minor or patch by only 1"
% (version, latest_version)
)
@task
def fetch_source_textures(context):
import ai2thor.downloader
zip_data = ai2thor.downloader.download(
"http://s3-us-west-2.amazonaws.com/ai2-thor/assets/source-textures.zip",
"source-textures",
"75476d60a05747873f1173ba2e1dbe3686500f63bcde3fc3b010eea45fa58de7",
)
z = zipfile.ZipFile(io.BytesIO(zip_data))
z.extractall(os.getcwd())
def build_log_push(build_info, include_private_scenes):
with open(build_info["log"]) as f:
build_log = f.read() + "\n" + build_info.get("build_exception", "")
build_log_key = "builds/" + build_info["log"]
s3 = boto3.resource("s3")
bucket = ai2thor.build.PUBLIC_S3_BUCKET
acl = "public-read"
if include_private_scenes:
bucket = ai2thor.build.PRIVATE_S3_BUCKET
acl = "private"
s3.Object(bucket, build_log_key).put(
Body=build_log, ACL=acl, ContentType="text/plain"
)
def archive_push(unity_path, build_path, build_dir, build_info, include_private_scenes):
threading.current_thread().success = False
archive_name = os.path.join(unity_path, build_path)
zip_buf = io.BytesIO()
zipf = zipfile.ZipFile(zip_buf, "w", zipfile.ZIP_DEFLATED)
add_files(zipf, os.path.join(unity_path, build_dir))
zipf.close()
zip_buf.seek(0)
zip_data = zip_buf.read()
push_build(archive_name, zip_data, include_private_scenes)
build_log_push(build_info, include_private_scenes)
print("Build successful")
threading.current_thread().success = True
@task
def pre_test(context):
import ai2thor.controller
c = ai2thor.controller.Controller()
os.makedirs("unity/builds/%s" % c.build_name())
shutil.move(
os.path.join("unity", "builds", c.build_name() + ".app"),
"unity/builds/%s" % c.build_name(),
)
def clean():
import scripts.update_private
# a deploy key is used on the build server and an .ssh/config entry has been added
# to point to the deploy key caclled ai2thor-private-github
scripts.update_private.private_repo_url = (
"git@ai2thor-private-github:allenai/ai2thor-private.git"
)
subprocess.check_call("git reset --hard", shell=True)
subprocess.check_call("git clean -f -d -x", shell=True)
shutil.rmtree("unity/builds", ignore_errors=True)
shutil.rmtree(scripts.update_private.private_dir, ignore_errors=True)
scripts.update_private.checkout_branch()
def ci_prune_cache(cache_dir):
entries = {}
for e in os.scandir(cache_dir):
if os.path.isdir(e.path):
mtime = os.stat(e.path).st_mtime
entries[e.path] = mtime
# keeping the most recent 60 entries (this keeps the cache around 300GB-500GB)
sorted_paths = sorted(entries.keys(), key=lambda x: entries[x])[:-60]
for path in sorted_paths:
if os.path.basename(path) != "main":
logger.info("pruning cache directory: %s" % path)
shutil.rmtree(path)
def link_build_cache(branch):
library_path = os.path.join("unity", "Library")
logger.info("linking build cache for %s" % branch)
if os.path.lexists(library_path):
os.unlink(library_path)
# this takes takes care of branches with '/' in it
# to avoid implicitly creating directories under the cache dir
encoded_branch = re.sub(r"[^a-zA-Z0-9_\-.]", "_", re.sub("_", "__", branch))
cache_base_dir = os.path.join(os.environ["HOME"], "cache")
ci_prune_cache(cache_base_dir)
main_cache_dir = os.path.join(cache_base_dir, "main")
branch_cache_dir = os.path.join(cache_base_dir, encoded_branch)
# use the main cache as a starting point to avoid
# having to re-import all assets, which can take up to 1 hour
if not os.path.exists(branch_cache_dir) and os.path.exists(main_cache_dir):
logger.info("copying main cache for %s" % encoded_branch)
subprocess.check_call(
"cp -a %s %s" % (main_cache_dir, branch_cache_dir), shell=True
)
logger.info("copying main cache complete for %s" % encoded_branch)
branch_library_cache_dir = os.path.join(branch_cache_dir, "Library")
os.makedirs(branch_library_cache_dir, exist_ok=True)
os.symlink(branch_library_cache_dir, library_path)
# update atime/mtime to simplify cache pruning
os.utime(branch_cache_dir)
def travis_build(build_id):
import requests
res = requests.get(
"https://api.travis-ci.com/build/%s" % build_id,
headers={
"Accept": "application/json",
"Content-Type": "application/json",
"Travis-API-Version": "3",
},
)
res.raise_for_status()
return res.json()
def pending_travis_build():
import requests
res = requests.get(
"https://api.travis-ci.com/repo/3459357/builds?include=build.id%2Cbuild.commit%2Cbuild.branch%2Cbuild.request%2Cbuild.created_by%2Cbuild.repository&build.state=started&sort_by=started_at:desc",
headers={
"Accept": "application/json",
"Content-Type": "application/json",
"Travis-API-Version": "3",
},
)
for b in res.json()["builds"]:
tag = None
if b["tag"]:
tag = b["tag"]["name"]
return {
"branch": b["branch"]["name"],
"commit_id": b["commit"]["sha"],
"tag": tag,
"id": b["id"],
}
def pytest_s3_object(commit_id):
s3 = boto3.resource("s3")
pytest_key = "builds/pytest-%s.json" % commit_id
return s3.Object(ai2thor.build.PUBLIC_S3_BUCKET, pytest_key)
def ci_pytest(build):
import requests
logger.info("running pytest for %s %s" % (build["branch"], build["commit_id"]))
commit_id = git_commit_id()
s3_obj = pytest_s3_object(commit_id)
s3_pytest_url = "http://s3-us-west-2.amazonaws.com/%s/%s" % (
s3_obj.bucket_name,
s3_obj.key,
)
logger.info("pytest url %s" % s3_pytest_url)
res = requests.get(s3_pytest_url)
if res.status_code == 200 and res.json()["success"]:
# if we already have a successful pytest, skip running
logger.info(
"pytest results already exist for %s %s"
% (build["branch"], build["commit_id"])
)
return
proc = subprocess.run(
"pytest", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
result = dict(
success=proc.returncode == 0,
stdout=proc.stdout.decode("ascii"),
stderr=proc.stderr.decode("ascii"),
)
s3_obj.put(
Body=json.dumps(result), ACL="public-read", ContentType="application/json"
)
logger.info("finished pytest for %s %s" % (build["branch"], build["commit_id"]))
@task
def ci_build(context):
import fcntl
lock_f = open(os.path.join(os.environ["HOME"], ".ci-build.lock"), "w")
try:
fcntl.flock(lock_f, fcntl.LOCK_EX | fcntl.LOCK_NB)
build = pending_travis_build()
blacklist_branches = ["vids", "video"]
if build and build["branch"] not in blacklist_branches:
logger.info(
"pending build for %s %s" % (build["branch"], build["commit_id"])
)
clean()
subprocess.check_call("git fetch", shell=True)
subprocess.check_call("git checkout %s --" % build["branch"], shell=True)
subprocess.check_call(
"git checkout -qf %s" % build["commit_id"], shell=True
)
private_scene_options = [False]
if build["branch"] == "erick/challenge2021":
os.environ["INCLUDE_PRIVATE_SCENES"] = "true"
elif build["branch"] == "erick/challenge2021-eval":
private_scene_options = [False, True]
procs = []
for include_private_scenes in private_scene_options:
for arch in ["OSXIntel64", "Linux64"]:
logger.info(
"starting build for %s %s %s"
% (arch, build["branch"], build["commit_id"])
)
rdir = os.path.normpath(
os.path.dirname(os.path.realpath(__file__)) + "/unity/builds"
)
commit_build = ai2thor.build.Build(
arch,
build["commit_id"],
include_private_scenes=include_private_scenes,
releases_dir=rdir,
)
if commit_build.exists():
logger.info(
"found build for commit %s %s" % (build["commit_id"], arch)
)
# download the build so that we can run the tests
if arch == "OSXIntel64":
commit_build.download()
else:
# this is done here so that when a tag build request arrives and the commit_id has already
# been built, we avoid bootstrapping the cache since we short circuited on the line above
link_build_cache(build["branch"])
p = ci_build_arch(arch, include_private_scenes)
logger.info(
"finished build for %s %s %s"
% (arch, build["branch"], build["commit_id"])
)
procs.append(p)
# don't run tests for a tag since results should exist
# for the branch commit
if build["tag"] is None:
# its possible that the cache doesn't get linked if the builds
# succeeded during an earlier runh
link_build_cache(build["branch"])
ci_test_utf(context, build)
pytest_proc = multiprocessing.Process(target=ci_pytest, args=(build,))
pytest_proc.start()
procs.append(pytest_proc)
# give the travis poller time to see the result
for i in range(6):
b = travis_build(build["id"])
logger.info("build state for %s: %s" % (build["id"], b["state"]))
if b["state"] != "started":
break
time.sleep(10)
# allow webgl to be force deployed with #webgl-deploy in the commit comment
if (
build["branch"] in ["main", "demo-updates"]
and "#webgl-deploy" in git_commit_comment()
):
ci_build_webgl(context, build["commit_id"])
for p in procs:
if p:
logger.info(
"joining proc %s for %s %s"
% (p.pid, build["branch"], build["commit_id"])
)
p.join()
build_pip_commit(context)
push_pip_commit(context)
generate_pypi_index(context)
logger.info("build complete %s %s" % (build["branch"], build["commit_id"]))
# if we are in off hours, allow the nightly webgl build to be performed
# elif datetime.datetime.now().hour == 2:
# clean()
# subprocess.check_call("git checkout main", shell=True)
# subprocess.check_call("git pull origin main", shell=True)
# if current_webgl_autodeploy_commit_id() != git_commit_id():
# ci_build_webgl(context, git_commit_id())
fcntl.flock(lock_f, fcntl.LOCK_UN)
except io.BlockingIOError as e:
pass
lock_f.close()
@task
def ci_build_webgl(context, commit_id):
branch = "main"
logger.info("starting auto-build webgl build deploy %s %s" % (branch, commit_id))
# linking here in the event we didn't link above since the builds had
# already completed. Omitting this will cause the webgl build
# to import all assets from scratch into a new unity/Library
link_build_cache(branch)
webgl_build_deploy_demo(context, verbose=True, content_addressable=True, force=True)
logger.info("finished webgl build deploy %s %s" % (branch, commit_id))
update_webgl_autodeploy_commit_id(commit_id)
def ci_build_arch(arch, include_private_scenes=False):
commit_id = git_commit_id()
unity_path = "unity"
build_name = ai2thor.build.build_name(arch, commit_id, include_private_scenes)
build_dir = os.path.join("builds", build_name)
build_path = build_dir + ".zip"
build_info = {}
proc = None
try:
build_info["log"] = "%s.log" % (build_name,)
env = {}
if include_private_scenes:
env["INCLUDE_PRIVATE_SCENES"] = "true"
_build(unity_path, arch, build_dir, build_name, env)
print("pushing archive")
proc = multiprocessing.Process(
target=archive_push,
args=(
unity_path,
build_path,
build_dir,
build_info,
include_private_scenes,
),
)
proc.start()
except Exception as e:
print("Caught exception %s" % e)
build_info["build_exception"] = "Exception building: %s" % e
build_log_push(build_info, include_private_scenes)
return proc
@task
def poll_ci_build(context):
import requests.exceptions
import requests
commit_id = git_commit_id()
last_emit_time = 0
for i in range(360):
missing = False
# must emit something at least once every 10 minutes
# otherwise travis will time out the build
if (time.time() - last_emit_time) > 540:
print(".", end="")
last_emit_time = time.time()
for plat in ai2thor.build.AUTO_BUILD_PLATFORMS:
commit_build = ai2thor.build.Build(plat, commit_id, False)
try:
if not commit_build.log_exists():
missing = True
# we observe errors when polling AWS periodically - we don't want these to stop
# the build
except requests.exceptions.ConnectionError as e:
print("Caught exception %s" % e)
if not missing:
break
sys.stdout.flush()
time.sleep(10)
for plat in ai2thor.build.AUTO_BUILD_PLATFORMS:
commit_build = ai2thor.build.Build(plat, commit_id, False)
if not commit_build.exists():
print("Build log url: %s" % commit_build.log_url)
raise Exception("Failed to build %s for commit: %s " % (arch, commit_id))
pytest_missing = True
for i in range(30):
s3_obj = pytest_s3_object(commit_id)
s3_pytest_url = "http://s3-us-west-2.amazonaws.com/%s/%s" % (
s3_obj.bucket_name,
s3_obj.key,
)
print("pytest url %s" % s3_pytest_url)
res = requests.get(s3_pytest_url)
if res.status_code == 200:
pytest_missing = False
pytest_result = res.json()
print(pytest_result["stdout"]) # print so that it appears in travis log
print(pytest_result["stderr"])
if not pytest_result["success"]:
raise Exception("pytest failure")
break
time.sleep(10)
if pytest_missing:
raise Exception("Missing pytest output")
@task
def build(context, local=False):
version = datetime.datetime.now().strftime("%Y%m%d%H%M")
builds = {"Docker": {"tag": version}}
threads = []
for include_private_scenes in (True, False):
for plat in ai2thor.build.AUTO_BUILD_PLATFORMS:
env = {}
if include_private_scenes:
env["INCLUDE_PRIVATE_SCENES"] = "true"
unity_path = "unity"
build_name = ai2thor.build.build_name(plat.name(), version, include_private_scenes)
build_dir = os.path.join("builds", build_name)
build_path = build_dir + ".zip"
build_info = builds[plat.name()] = {}
build_info["log"] = "%s.log" % (build_name,)
_build(unity_path, plat.name(), build_dir, build_name, env=env)
t = threading.Thread(
target=archive_push,
args=(
unity_path,
build_path,
build_dir,
build_info,
include_private_scenes,
),
)
t.start()
threads.append(t)
# dp.join()
# if dp.exitcode != 0:
# raise Exception("Exception with docker build")
for t in threads:
t.join()
if not t.success:
raise Exception("Error with thread")
generate_quality_settings(context)
@task
def interact(
ctx,
scene,
editor_mode=False,
local_build=False,
image=False,
depth_image=False,
class_image=False,
object_image=False,
metadata=False,
robot=False,
port=8200,
host="127.0.0.1",
image_directory=".",
width=300,
height=300,
include_private_scenes=False,
noise=False,
):
import ai2thor.controller
import ai2thor.robot_controller
if image_directory != ".":
if os.path.exists(image_directory):
shutil.rmtree(image_directory)
os.makedirs(image_directory)
if not robot:
env = ai2thor.controller.Controller(
host=host,
port=port,
width=width,
height=height,
local_build=local_build,
image_dir=image_directory,
start_unity=False if editor_mode else True,
save_image_per_frame=True,
include_private_scenes=include_private_scenes,
add_depth_noise=noise,
scene=scene,
)
else:
env = ai2thor.robot_controller.Controller(
host=host,
port=port,
width=width,
height=height,
image_dir=image_directory,
save_image_per_frame=True,
)
env.reset(scene)
initialize_event = env.step(
dict(
action="Initialize",
gridSize=0.25,
renderInstanceSegmentation=object_image,
renderSemanticSegmentation=class_image,
renderDepthImage=depth_image,
)
)
from ai2thor.interact import InteractiveControllerPrompt
InteractiveControllerPrompt.write_image(
initialize_event,
image_directory,
"_init",
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
env.interact(
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
depth_frame=depth_image,
color_frame=image,
metadata=metadata,
)
env.stop()
@task
def get_depth(
ctx,
scene=None,
image=False,
depth_image=False,
class_image=False,
object_image=False,
metadata=False,
port=8200,
host="127.0.0.1",
image_directory=".",
number=1,
local_build=False,
teleport=None,
rotation=0,
):
import ai2thor.controller
import ai2thor.robot_controller
if image_directory != ".":
if os.path.exists(image_directory):
shutil.rmtree(image_directory)
os.makedirs(image_directory)
if scene is None:
env = ai2thor.robot_controller.Controller(
host=host,
port=port,
width=600,
height=600,
image_dir=image_directory,
save_image_per_frame=True,
)
else:
env = ai2thor.controller.Controller(
width=600, height=600, local_build=local_build
)
if scene is not None:
env.reset(scene)
initialize_event = env.step(
dict(
action="Initialize",
gridSize=0.25,
renderInstanceSegmentation=object_image,
renderSemanticSegmentation=class_image,
renderDepthImage=depth_image,
agentMode="locobot",
fieldOfView=59,
continuous=True,
snapToGrid=False,
)
)
from ai2thor.interact import InteractiveControllerPrompt
if scene is not None:
teleport_arg = dict(
action="TeleportFull", y=0.9010001, rotation=dict(x=0, y=rotation, z=0)
)
if teleport is not None:
teleport = [float(pos) for pos in teleport.split(",")]
t_size = len(teleport)
if 1 <= t_size:
teleport_arg["x"] = teleport[0]
if 2 <= t_size:
teleport_arg["z"] = teleport[1]
if 3 <= t_size:
teleport_arg["y"] = teleport[2]
evt = env.step(teleport_arg)
InteractiveControllerPrompt.write_image(
evt,
image_directory,
"_{}".format("teleport"),
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
InteractiveControllerPrompt.write_image(
initialize_event,
image_directory,
"_init",
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
for i in range(number):
event = env.step(action="MoveAhead", moveMagnitude=0.0)
InteractiveControllerPrompt.write_image(
event,
image_directory,
"_{}".format(i),
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
env.stop()
@task
def inspect_depth(
ctx, directory, all=False, indices=None, jet=False, under_score=False
):
import numpy as np
import cv2
import glob
under_prefix = "_" if under_score else ""
regex_str = "depth{}(.*)\.png".format(under_prefix)
def sort_key_function(name):
split_name = name.split("/")
x = re.search(regex_str, split_name[len(split_name) - 1]).group(1)
try:
val = int(x)
return val
except ValueError:
return -1
if indices is None or all:
images = sorted(
glob.glob("{}/depth{}*.png".format(directory, under_prefix)),
key=sort_key_function,
)
print(images)
else:
images = ["depth{}{}.png".format(under_prefix, i) for i in indices.split(",")]
for depth_filename in images:
# depth_filename = os.path.join(directory, "depth_{}.png".format(index))
split_fn = depth_filename.split("/")
index = re.search(regex_str, split_fn[len(split_fn) - 1]).group(1)
print("index {}".format(index))
print("Inspecting: '{}'".format(depth_filename))
depth_raw_filename = os.path.join(
directory, "depth_raw{}{}.npy".format("_" if under_score else "", index)
)
raw_depth = np.load(depth_raw_filename)
if jet:
mn = np.min(raw_depth)
mx = np.max(raw_depth)
print("min depth value: {}, max depth: {}".format(mn, mx))
norm = (((raw_depth - mn).astype(np.float32) / (mx - mn)) * 255.0).astype(
np.uint8
)
img = cv2.applyColorMap(norm, cv2.COLORMAP_JET)
else:
grayscale = (
255.0 / raw_depth.max() * (raw_depth - raw_depth.min())
).astype(np.uint8)
print("max {} min {}".format(raw_depth.max(), raw_depth.min()))
img = grayscale
print(raw_depth.shape)
def inspect_pixel(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print("Pixel at x: {}, y: {} ".format(y, x))
print(raw_depth[y][x])
cv2.namedWindow("image")
cv2.setMouseCallback("image", inspect_pixel)
cv2.imshow("image", img)
cv2.waitKey(0)
@task
def real_2_sim(
ctx, source_dir, index, scene, output_dir, rotation=0, local_build=False, jet=False
):
import numpy as np
import cv2
from ai2thor.util.transforms import transform_real_2_sim
depth_metadata_fn = os.path.join(source_dir, "metadata_{}.json".format(index))
color_real_fn = os.path.join(source_dir, "color_{}.png".format(index))
color_sim_fn = os.path.join(output_dir, "color_teleport.png".format(index))
with open(depth_metadata_fn, "r") as f:
metadata = json.load(f)
pos = metadata["agent"]["position"]
sim_pos = transform_real_2_sim(pos)
teleport_arg = "{},{},{}".format(sim_pos["x"], sim_pos["z"], sim_pos["y"])
print(sim_pos)
print(teleport_arg)
inspect_depth(ctx, source_dir, indices=index, under_score=True, jet=jet)
get_depth(
ctx,
scene=scene,
image=True,
depth_image=True,
class_image=False,
object_image=False,
metadata=True,
image_directory=output_dir,
number=1,
local_build=local_build,
teleport=teleport_arg,
rotation=rotation,
)
im = cv2.imread(color_real_fn)
cv2.imshow("color_real.png", im)
im2 = cv2.imread(color_sim_fn)
cv2.imshow("color_sim.png", im2)
inspect_depth(ctx, output_dir, indices="teleport", under_score=True, jet=jet)
@task
def noise_depth(ctx, directory, show=False):
import glob
import cv2
import numpy as np
def imshow_components(labels):
# Map component labels to hue val
label_hue = np.uint8(179 * labels / np.max(labels))
blank_ch = 255 * np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
# cvt to BGR for display
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
# set bg label to black
labeled_img[label_hue == 0] = 0
if show:
cv2.imshow("labeled.png", labeled_img)
cv2.waitKey()
images = glob.glob("{}/depth_*.png".format(directory))
indices = []
for image_file in images:
print(image_file)
grayscale_img = cv2.imread(image_file, 0)
img = grayscale_img
img_size = img.shape
img = cv2.threshold(img, 30, 255, cv2.THRESH_BINARY_INV)[1]
ret, labels = cv2.connectedComponents(img)
print("Components: {}".format(ret))
imshow_components(labels)
print(img_size[0])
indices_top_left = np.where(labels == labels[0][0])
indices_top_right = np.where(labels == labels[0][img_size[1] - 1])
indices_bottom_left = np.where(labels == labels[img_size[0] - 1][0])
indices_bottom_right = np.where(
labels == labels[img_size[0] - 1][img_size[1] - 1]
)
indices = [
indices_top_left,
indices_top_right,
indices_bottom_left,
indices_bottom_right,
]
blank_image = np.zeros((300, 300, 1), np.uint8)
blank_image.fill(255)
blank_image[indices_top_left] = 0
blank_image[indices_top_right] = 0
blank_image[indices_bottom_left] = 0
blank_image[indices_bottom_right] = 0
if show:
cv2.imshow("labeled.png", blank_image)
cv2.waitKey()
break
compressed = []
for indices_arr in indices:
unique_e, counts = np.unique(indices_arr[0], return_counts=True)
compressed.append(counts)
np.save("depth_noise", compressed)
@task
def release(ctx):
x = subprocess.check_output("git status --porcelain", shell=True).decode("ASCII")
for line in x.split("\n"):
if line.strip().startswith("??") or len(line.strip()) == 0:
continue
raise Exception(
"Found locally modified changes from 'git status' - please commit and push or revert"
)
import ai2thor._version
tag = "v" + ai2thor._version.__version__
subprocess.check_call('git tag -a %s -m "release %s"' % (tag, tag), shell=True)
subprocess.check_call("git push origin main --tags", shell=True)
subprocess.check_call(
"twine upload -u ai2thor dist/ai2thor-{ver}-* dist/ai2thor-{ver}.*".format(
ver=ai2thor._version.__version__
),
shell=True,
)
@task
def check_visible_objects_closed_receptacles(ctx, start_scene, end_scene):
from itertools import product
import ai2thor.controller
controller = ai2thor.controller.BFSController()
controller.start()
for i in range(int(start_scene), int(end_scene)):
print("working on floorplan %s" % i)
controller.search_all_closed("FloorPlan%s" % i)
visibility_object_id = None
visibility_object_types = ["Mug", "CellPhone", "SoapBar"]
for obj in controller.last_event.metadata["objects"]:
if obj["pickupable"]:
controller.step(
action=dict(
action="PickupObject",
objectId=obj["objectId"],
forceVisible=True,
)
)
if (
visibility_object_id is None
and obj["objectType"] in visibility_object_types
):
visibility_object_id = obj["objectId"]
if visibility_object_id is None:
raise Exception("Couldn't get a visibility_object")
bad_receptacles = set()
for point in controller.grid_points:
controller.step(
dict(action="Teleport", x=point["x"], y=point["y"], z=point["z"]),
raise_for_failure=True,
)
for rot, hor in product(controller.rotations, controller.horizons):
event = controller.step(
dict(action="RotateLook", rotation=rot, horizon=hor),
raise_for_failure=True,
)
for j in event.metadata["objects"]:
if j["receptacle"] and j["visible"] and j["openable"]:
controller.step(
action=dict(
action="Replace",
forceVisible=True,
pivot=0,
receptacleObjectId=j["objectId"],
objectId=visibility_object_id,
)
)
replace_success = controller.last_event.metadata[
"lastActionSuccess"
]
if replace_success:
if (
controller.is_object_visible(visibility_object_id)
and j["objectId"] not in bad_receptacles
):
bad_receptacles.add(j["objectId"])
print("Got bad receptacle: %s" % j["objectId"])
# import cv2
# cv2.imshow('aoeu', controller.last_event.cv2image())
# cv2.waitKey(0)
controller.step(
action=dict(
action="PickupObject",
objectId=visibility_object_id,
forceVisible=True,
)
)
@task
def benchmark(
ctx,
screen_width=600,
screen_height=600,
editor_mode=False,
out="benchmark.json",
verbose=False,
local_build=False,
commit_id=ai2thor.build.COMMIT_ID,
):
import ai2thor.controller
import random
move_actions = ["MoveAhead", "MoveBack", "MoveLeft", "MoveRight"]
rotate_actions = ["RotateRight", "RotateLeft"]
look_actions = ["LookUp", "LookDown"]
all_actions = move_actions + rotate_actions + look_actions
def test_routine(env, test_actions, n=100):
average_frame_time = 0
for i in range(n):
action = random.choice(test_actions)
start = time.time()
env.step(dict(action=action))
end = time.time()
frame_time = end - start
average_frame_time += frame_time
average_frame_time = average_frame_time / float(n)
return average_frame_time
def benchmark_actions(env, action_name, actions, n=100):
if verbose:
print("--- Actions {}".format(actions))
frame_time = test_routine(env, actions)
if verbose:
print("{} average: {}".format(action_name, 1 / frame_time))
return 1 / frame_time
args = {}
if editor_mode:
args["port"] = 8200
args["start_unity"] = False
elif local_build:
args["local_build"] = local_build
else:
args["commit_id"] = commit_id
env = ai2thor.controller.Controller(
width=screen_width, height=screen_height, **args
)
# Kitchens: FloorPlan1 - FloorPlan30
# Living rooms: FloorPlan201 - FloorPlan230
# Bedrooms: FloorPlan301 - FloorPlan330
# Bathrooms: FloorPLan401 - FloorPlan430
room_ranges = [(1, 30), (201, 230), (301, 330), (401, 430)]
benchmark_map = {"scenes": {}}
total_average_ft = 0
scene_count = 0
print("Start loop")
for room_range in room_ranges:
for i in range(room_range[0], room_range[1]):
scene = "FloorPlan{}_physics".format(i)
scene_benchmark = {}
if verbose:
print("Loading scene {}".format(scene))
# env.reset(scene)
env.step(dict(action="Initialize", gridSize=0.25))
if verbose:
print("------ {}".format(scene))
sample_number = 100
action_tuples = [
("move", move_actions, sample_number),
("rotate", rotate_actions, sample_number),
("look", look_actions, sample_number),
("all", all_actions, sample_number),
]
scene_average_fr = 0
for action_name, actions, n in action_tuples:
ft = benchmark_actions(env, action_name, actions, n)
scene_benchmark[action_name] = ft
scene_average_fr += ft
scene_average_fr = scene_average_fr / float(len(action_tuples))
total_average_ft += scene_average_fr
if verbose:
print("Total average frametime: {}".format(scene_average_fr))
benchmark_map["scenes"][scene] = scene_benchmark
scene_count += 1
benchmark_map["average_framerate_seconds"] = total_average_ft / scene_count
with open(out, "w") as f:
f.write(json.dumps(benchmark_map, indent=4, sort_keys=True))
env.stop()
def list_objects_with_metadata(bucket):
keys = {}
s3c = boto3.client("s3")
continuation_token = None
while True:
if continuation_token:
objects = s3c.list_objects_v2(
Bucket=bucket, ContinuationToken=continuation_token
)
else:
objects = s3c.list_objects_v2(Bucket=bucket)
for i in objects.get("Contents", []):
keys[i["Key"]] = i
if "NextContinuationToken" in objects:
continuation_token = objects["NextContinuationToken"]
else:
break
return keys
def s3_etag_data(data):
h = hashlib.md5()
h.update(data)
return '"' + h.hexdigest() + '"'
cache_seconds = 31536000
@task
def webgl_deploy(
ctx,
bucket=ai2thor.build.PUBLIC_WEBGL_S3_BUCKET,
prefix="local",
source_dir="builds",
target_dir="",
verbose=False,
force=False,
extensions_no_cache="",
):
from pathlib import Path
from os.path import isfile, join, isdir
content_types = {
".js": "application/javascript; charset=utf-8",
".html": "text/html; charset=utf-8",
".ico": "image/x-icon",
".svg": "image/svg+xml; charset=utf-8",
".css": "text/css; charset=utf-8",
".png": "image/png",
".txt": "text/plain",
".jpg": "image/jpeg",
".unityweb": "application/octet-stream",
".json": "application/json",
}
content_encoding = {".unityweb": "gzip"}
bucket_name = bucket
s3 = boto3.resource("s3")
current_objects = list_objects_with_metadata(bucket_name)
no_cache_extensions = {".txt", ".html", ".json", ".js"}
no_cache_extensions.union(set(extensions_no_cache.split(",")))
def walk_recursive(path, func, parent_dir=""):
for file_name in os.listdir(path):
f_path = join(path, file_name)
relative_path = join(parent_dir, file_name)
if isfile(f_path):
key = Path(join(target_dir, relative_path))
func(f_path, key.as_posix())
elif isdir(f_path):
walk_recursive(f_path, func, relative_path)
def upload_file(f_path, key):
_, ext = os.path.splitext(f_path)
if verbose:
print("'{}'".format(key))
with open(f_path, "rb") as f:
file_data = f.read()
etag = s3_etag_data(file_data)
kwargs = {}
if ext in content_encoding:
kwargs["ContentEncoding"] = content_encoding[ext]
if (
not force
and key in current_objects
and etag == current_objects[key]["ETag"]
):
if verbose:
print("ETag match - skipping %s" % key)
return
if ext in content_types:
cache = (
"no-cache, no-store, must-revalidate"
if ext in no_cache_extensions
else "public, max-age={}".format(cache_seconds)
)
now = datetime.datetime.utcnow()
expires = (
now
if ext == ".html" or ext == ".txt"
else now + datetime.timedelta(seconds=cache_seconds)
)
s3.Object(bucket_name, key).put(
Body=file_data,
ACL="public-read",
ContentType=content_types[ext],
CacheControl=cache,
Expires=expires,
**kwargs,
)
else:
if verbose:
print(
"Warning: Content type for extension '{}' not defined,"
" uploading with no content type".format(ext)
)
s3.Object(bucket_name, key).put(Body=f.read(), ACL="public-read")
if prefix is not None:
build_path = _webgl_local_build_path(prefix, source_dir)
else:
build_path = source_dir
if verbose:
print("Build path: '{}'".format(build_path))
print("Uploading...")
walk_recursive(build_path, upload_file)
@task
def webgl_build_deploy_demo(ctx, verbose=False, force=False, content_addressable=False):
# Main demo
demo_selected_scene_indices = [
1,
3,
7,
29,
30,
204,
209,
221,
224,
227,
301,
302,
308,
326,
330,
401,
403,
411,
422,
430,
]
scenes = ["FloorPlan{}_physics".format(x) for x in demo_selected_scene_indices]
webgl_build(
ctx,
scenes=",".join(scenes),
directory="builds/demo",
content_addressable=content_addressable,
)
webgl_deploy(
ctx, source_dir="builds/demo", target_dir="demo", verbose=verbose, force=force
)
if verbose:
print("Deployed selected scenes to bucket's 'demo' directory")
# Full framework demo
kitchens = [f"FloorPlan{i}_physics" for i in range(1, 31)]
living_rooms = [f"FloorPlan{200 + i}_physics" for i in range(1, 31)]
bedrooms = [f"FloorPlan{300 + i}_physics" for i in range(1, 31)]
bathrooms = [f"FloorPlan{400 + i}_physics" for i in range(1, 31)]
robothor_train = [
f"FloorPlan_Train{i}_{j}" for i in range(1, 13) for j in range(1, 6)
]
robothor_val = [f"FloorPlan_Val{i}_{j}" for i in range(1, 4) for j in range(1, 6)]
scenes = (
kitchens + living_rooms + bedrooms + bathrooms + robothor_train + robothor_val
)
webgl_build(
ctx,
scenes=",".join(scenes),
content_addressable=content_addressable,
)
webgl_deploy(ctx, verbose=verbose, force=force, target_dir="full")
if verbose:
print("Deployed all scenes to bucket's root.")
def current_webgl_autodeploy_commit_id():
s3 = boto3.resource("s3")
try:
res = s3.Object(ai2thor.build.PUBLIC_WEBGL_S3_BUCKET, "autodeploy.json").get()
return json.loads(res["Body"].read())["commit_id"]
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "NoSuchKey":
return None
else:
raise e
def update_webgl_autodeploy_commit_id(commit_id):
s3 = boto3.resource("s3")
s3.Object(ai2thor.build.PUBLIC_WEBGL_S3_BUCKET, "autodeploy.json").put(
Body=json.dumps(dict(timestamp=time.time(), commit_id=commit_id)),
ContentType="application/json",
)
@task
def webgl_deploy_all(ctx, verbose=False, individual_rooms=False):
rooms = {
"kitchens": (1, 30),
"livingRooms": (201, 230),
"bedrooms": (301, 330),
"bathrooms": (401, 430),
"foyers": (501, 530),
}
for key, room_range in rooms.items():
range_str = "{}-{}".format(room_range[0], room_range[1])
if verbose:
print("Building for rooms: {}".format(range_str))
build_dir = "builds/{}".format(key)
if individual_rooms:
for i in range(room_range[0], room_range[1]):
floorPlanName = "FloorPlan{}_physics".format(i)
target_s3_dir = "{}/{}".format(key, floorPlanName)
build_dir = "builds/{}".format(target_s3_dir)
webgl_build(ctx, scenes=floorPlanName, directory=build_dir)
webgl_deploy(
ctx, source_dir=build_dir, target_dir=target_s3_dir, verbose=verbose
)
else:
webgl_build(ctx, room_ranges=range_str, directory=build_dir)
webgl_deploy(ctx, source_dir=build_dir, target_dir=key, verbose=verbose)
@task
def webgl_s3_deploy(
ctx, bucket, target_dir, scenes="", verbose=False, all=False, deploy_skip=False
):
"""
Builds and deploys a WebGL unity site
:param context:
:param target_dir: Target s3 bucket
:param target_dir: Target directory in bucket
:param scenes: String of scene numbers to include in the build as a comma separated list e.g. "4,6,230"
:param verbose: verbose build
:param all: overrides 'scenes' parameter and builds and deploys all separate rooms
:param deploy_skip: Whether to skip deployment and do build only.
:return:
"""
rooms = {
"kitchens": (1, 30),
"livingRooms": (201, 230),
"bedrooms": (301, 330),
"bathrooms": (401, 430),
}
if all:
flatten = lambda l: [item for sublist in l for item in sublist]
room_numbers = flatten(
[
[i for i in range(room_range[0], room_range[1])]
for key, room_range in rooms.items()
]
)
else:
room_numbers = [s.strip() for s in scenes.split(",")]
if verbose:
print("Rooms in build: '{}'".format(room_numbers))
for i in room_numbers:
floor_plan_name = "FloorPlan{}_physics".format(i)
if verbose:
print("Building room '{}'...".format(floor_plan_name))
target_s3_dir = "{}/{}".format(target_dir, floor_plan_name)
build_dir = "builds/{}".format(target_s3_dir)
webgl_build(
ctx, scenes=floor_plan_name, directory=build_dir, crowdsource_build=True
)
if verbose:
print("Deploying room '{}'...".format(floor_plan_name))
if not deploy_skip:
webgl_deploy(
ctx,
bucket=bucket,
source_dir=build_dir,
target_dir=target_s3_dir,
verbose=verbose,
extensions_no_cache=".css",
)
@task
def webgl_site_deploy(
context,
template_name,
output_dir,
bucket,
unity_build_dir="",
s3_target_dir="",
force=False,
verbose=False,
):
from pathlib import Path
from os.path import isfile, join, isdir
template_dir = Path("unity/Assets/WebGLTemplates/{}".format(template_name))
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
# os.mkdir(output_dir)
ignore_func = lambda d, files: [
f for f in files if isfile(join(d, f)) and f.endswith(".meta")
]
if unity_build_dir != "":
shutil.copytree(unity_build_dir, output_dir, ignore=ignore_func)
# shutil.copytree(os.path.join(unity_build_dir, "Build"), os.path.join(output_dir, "Build"), ignore=ignore_func)
else:
shutil.copytree(template_dir, output_dir, ignore=ignore_func)
webgl_deploy(
context,
bucket=bucket,
prefix=None,
source_dir=output_dir,
target_dir=s3_target_dir,
verbose=verbose,
force=force,
extensions_no_cache=".css",
)
@task
def mock_client_request(context):
import msgpack
import numpy as np
import requests
import cv2
r = requests.post(
"http://127.0.0.1:9200/step", json=dict(action="MoveAhead", sequenceId=1)
)
payload = msgpack.unpackb(r.content, raw=False)
metadata = payload["metadata"]["agents"][0]
image = np.frombuffer(payload["frames"][0], dtype=np.uint8).reshape(
metadata["screenHeight"], metadata["screenWidth"], 3
)
pprint.pprint(metadata)
cv2.imshow("aoeu", image)
cv2.waitKey(1000)
@task
def start_mock_real_server(context):
import ai2thor.mock_real_server
m = ai2thor.mock_real_server.MockServer(height=300, width=300)
print("Started mock server on port: http://" + m.host + ":" + str(m.port))
m.start()
@task
def create_robothor_dataset(
context,
local_build=False,
editor_mode=False,
width=300,
height=300,
output="robothor-dataset.json",
intermediate_directory=".",
visibility_distance=1.0,
objects_filter=None,
scene_filter=None,
filter_file=None,
):
"""
Creates a dataset for the robothor challenge in `intermediate_directory`
named `robothor-dataset.json`
"""
import ai2thor.controller
import ai2thor.util.metrics as metrics
scene = "FloorPlan_Train1_1"
angle = 45
gridSize = 0.25
# Restrict points visibility_multiplier_filter * visibility_distance away from the target object
visibility_multiplier_filter = 2
scene_object_filter = {}
if filter_file is not None:
with open(filter_file, "r") as f:
scene_object_filter = json.load(f)
print("Filter:")
pprint.pprint(scene_object_filter)
print("Visibility distance: {}".format(visibility_distance))
controller = ai2thor.controller.Controller(
width=width,
height=height,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=visibility_distance,
)
targets = [
"Apple",
"Baseball Bat",
"BasketBall",
"Bowl",
"Garbage Can",
"House Plant",
"Laptop",
"Mug",
"Remote",
"Spray Bottle",
"Vase",
"Alarm Clock",
"Television",
"Pillow",
]
failed_points = []
if objects_filter is not None:
obj_filter = set([o for o in objects_filter.split(",")])
targets = [o for o in targets if o.replace(" ", "") in obj_filter]
desired_points = 30
event = controller.step(
dict(
action="GetScenesInBuild",
)
)
scenes_in_build = event.metadata["actionReturn"]
objects_types_in_scene = set()
def sqr_dist(a, b):
x = a[0] - b[0]
z = a[2] - b[2]
return x * x + z * z
def sqr_dist_dict(a, b):
x = a["x"] - b["x"]
z = a["z"] - b["z"]
return x * x + z * z
def get_points(contoller, object_type, scene):
print("Getting points in scene: '{}'...: ".format(scene))
controller.reset(scene)
event = controller.step(
dict(
action="ObjectTypeToObjectIds", objectType=object_type.replace(" ", "")
)
)
object_ids = event.metadata["actionReturn"]
if object_ids is None or len(object_ids) > 1 or len(object_ids) == 0:
print("Object type '{}' not available in scene.".format(object_type))
return None
objects_types_in_scene.add(object_type)
object_id = object_ids[0]
event_reachable = controller.step(
dict(action="GetReachablePositions", gridSize=0.25)
)
target_position = controller.step(
action="GetObjectPosition", objectId=object_id
).metadata["actionReturn"]
reachable_positions = event_reachable.metadata["actionReturn"]
reachable_pos_set = set(
[
(pos["x"], pos["y"], pos["z"])
for pos in reachable_positions
# if sqr_dist_dict(pos, target_position) >= visibility_distance * visibility_multiplier_filter
]
)
def filter_points(selected_points, point_set, minimum_distance):
result = set()
for selected in selected_points:
if selected in point_set:
result.add(selected)
remove_set = set(
[
p
for p in point_set
if sqr_dist(p, selected)
<= minimum_distance * minimum_distance
]
)
point_set = point_set.difference(remove_set)
return result
import random
points = random.sample(reachable_pos_set, desired_points * 4)
final_point_set = filter_points(points, reachable_pos_set, gridSize * 2)
print("Total number of points: {}".format(len(final_point_set)))
print("Id {}".format(event.metadata["actionReturn"]))
point_objects = []
eps = 0.0001
counter = 0
for (x, y, z) in final_point_set:
possible_orientations = [0, 90, 180, 270]
pos_unity = dict(x=x, y=y, z=z)
try:
path = metrics.get_shortest_path_to_object(
controller, object_id, pos_unity, {"x": 0, "y": 0, "z": 0}
)
minimum_path_length = metrics.path_distance(path)
rotation_allowed = False
while not rotation_allowed:
if len(possible_orientations) == 0:
break
roatation_y = random.choice(possible_orientations)
possible_orientations.remove(roatation_y)
evt = controller.step(
action="TeleportFull",
x=pos_unity["x"],
y=pos_unity["y"],
z=pos_unity["z"],
rotation=dict(x=0, y=roatation_y, z=0),
)
rotation_allowed = evt.metadata["lastActionSuccess"]
if not evt.metadata["lastActionSuccess"]:
print(evt.metadata["errorMessage"])
print(
"--------- Rotation not allowed! for pos {} rot {} ".format(
pos_unity, roatation_y
)
)
if minimum_path_length > eps and rotation_allowed:
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", scene)
point_id = "{}_{}_{}_{}_{}".format(
m.group(1), m.group(2), m.group(3), object_type, counter
)
point_objects.append(
{
"id": point_id,
"scene": scene,
"object_type": object_type,
"object_id": object_id,
"target_position": target_position,
"initial_position": pos_unity,
"initial_orientation": roatation_y,
"shortest_path": path,
"shortest_path_length": minimum_path_length,
}
)
counter += 1
except ValueError:
print("-----Invalid path discarding point...")
failed_points.append(
{
"scene": scene,
"object_type": object_type,
"object_id": object_id,
"target_position": target_position,
"initial_position": pos_unity,
}
)
sorted_objs = sorted(point_objects, key=lambda m: m["shortest_path_length"])
third = int(len(sorted_objs) / 3.0)
for i, obj in enumerate(sorted_objs):
if i < third:
level = "easy"
elif i < 2 * third:
level = "medium"
else:
level = "hard"
sorted_objs[i]["difficulty"] = level
return sorted_objs
dataset = {}
dataset_flat = []
if intermediate_directory is not None:
if intermediate_directory != ".":
if os.path.exists(intermediate_directory):
shutil.rmtree(intermediate_directory)
os.makedirs(intermediate_directory)
def key_sort_func(scene_name):
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", scene_name)
return m.group(1), int(m.group(2)), int(m.group(3))
scenes = sorted(
[scene for scene in scenes_in_build if "physics" not in scene],
key=key_sort_func,
)
if scene_filter is not None:
scene_filter_set = set(scene_filter.split(","))
scenes = [s for s in scenes if s in scene_filter_set]
print("Sorted scenes: {}".format(scenes))
for scene in scenes:
dataset[scene] = {}
dataset["object_types"] = targets
objects = []
for objectType in targets:
if filter_file is None or (
objectType in scene_object_filter
and scene in scene_object_filter[objectType]
):
dataset[scene][objectType] = []
obj = get_points(controller, objectType, scene)
if obj is not None:
objects = objects + obj
dataset_flat = dataset_flat + objects
if intermediate_directory != ".":
with open(
os.path.join(intermediate_directory, "{}.json".format(scene)), "w"
) as f:
json.dump(objects, f, indent=4)
with open(os.path.join(intermediate_directory, output), "w") as f:
json.dump(dataset_flat, f, indent=4)
print("Object types in scene union: {}".format(objects_types_in_scene))
print("Total unique objects: {}".format(len(objects_types_in_scene)))
print("Total scenes: {}".format(len(scenes)))
print("Total datapoints: {}".format(len(dataset_flat)))
print(failed_points)
with open(os.path.join(intermediate_directory, "failed.json"), "w") as f:
json.dump(failed_points, f, indent=4)
@task
def shortest_path_to_object(
context,
scene,
object,
x,
z,
y=0.9103442,
rotation=0,
editor_mode=False,
local_build=False,
visibility_distance=1.0,
grid_size=0.25,
):
p = dict(x=x, y=y, z=z)
import ai2thor.controller
import ai2thor.util.metrics as metrics
angle = 45
gridSize = grid_size
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=visibility_distance,
)
path = metrics.get_shortest_path_to_object_type(
controller, object, p, {"x": 0, "y": 0, "z": 0}
)
minimum_path_length = metrics.path_distance(path)
print("Path: {}".format(path))
print("Path lenght: {}".format(minimum_path_length))
@task
def filter_dataset(ctx, filename, output_filename, ids=False):
"""
Filters objects in dataset that are not reachable in at least one of the scenes (have
zero occurrences in the dataset)
"""
with open(filename, "r") as f:
obj = json.load(f)
targets = [
"Apple",
"Baseball Bat",
"BasketBall",
"Bowl",
"Garbage Can",
"House Plant",
"Laptop",
"Mug",
"Spray Bottle",
"Vase",
"Alarm Clock",
"Television",
"Pillow",
]
counter = {}
for f in obj:
obj_type = f["object_type"]
if f["scene"] not in counter:
counter[f["scene"]] = {target: 0 for target in targets}
scene_counter = counter[f["scene"]]
if obj_type not in scene_counter:
scene_counter[obj_type] = 1
else:
scene_counter[obj_type] += 1
objects_with_zero = set()
objects_with_zero_by_obj = {}
for k, item in counter.items():
# print("Key {} ".format(k))
for obj_type, count in item.items():
# print("obj {} count {}".format(obj_type, count))
if count == 0:
if obj_type not in objects_with_zero_by_obj:
objects_with_zero_by_obj[obj_type] = set()
# print("With zero for obj: {} in scene {}".format(obj_type, k))
objects_with_zero_by_obj[obj_type].add(k)
objects_with_zero.add(obj_type)
print("Objects with zero: {}".format(objects_with_zero))
with open("with_zero.json", "w") as fw:
dict_list = {k: list(v) for k, v in objects_with_zero_by_obj.items()}
json.dump(dict_list, fw, sort_keys=True, indent=4)
pprint.pprint(objects_with_zero_by_obj)
filtered = [o for o in obj if o["object_type"] not in objects_with_zero]
counter = 0
current_scene = ""
current_object_type = ""
for i, o in enumerate(filtered):
if current_scene != o["scene"] or current_object_type != o["object_type"]:
counter = 0
current_scene = o["scene"]
current_object_type = o["object_type"]
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", o["scene"])
point_id = "{}_{}_{}_{}_{}".format(
m.group(1), m.group(2), m.group(3), o["object_type"], counter
)
counter += 1
o["id"] = point_id
with open(output_filename, "w") as f:
json.dump(filtered, f, indent=4)
@task
def fix_dataset_object_types(
ctx, input_file, output_file, editor_mode=False, local_build=False
):
import ai2thor.controller
with open(input_file, "r") as f:
obj = json.load(f)
scene = "FloorPlan_Train1_1"
angle = 45
gridSize = 0.25
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=1,
)
current_scene = None
object_map = {}
for i, point in enumerate(obj):
if current_scene != point["scene"]:
print("Fixing for scene '{}'...".format(point["scene"]))
controller.reset(point["scene"])
current_scene = point["scene"]
object_map = {
o["objectType"].lower(): {
"id": o["objectId"],
"type": o["objectType"],
}
for o in controller.last_event.metadata["objects"]
}
key = point["object_type"].replace(" ", "").lower()
point["object_id"] = object_map[key]["id"]
point["object_type"] = object_map[key]["type"]
with open(output_file, "w") as fw:
json.dump(obj, fw, indent=True)
@task
def test_dataset(
ctx, filename, scenes=None, objects=None, editor_mode=False, local_build=False
):
import ai2thor.controller
import ai2thor.util.metrics as metrics
scene = "FloorPlan_Train1_1" if scenes is None else scenes.split(",")[0]
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=0.25,
fieldOfView=60,
rotateStepDegrees=45,
agentMode="bot",
visibilityDistance=1,
)
with open(filename, "r") as f:
dataset = json.load(f)
filtered_dataset = dataset
if scenes is not None:
scene_set = set(scenes.split(","))
print("Filtering {}".format(scene_set))
filtered_dataset = [d for d in dataset if d["scene"] in scene_set]
if objects is not None:
object_set = set(objects.split(","))
print("Filtering {}".format(object_set))
filtered_dataset = [
d for d in filtered_dataset if d["object_type"] in object_set
]
current_scene = None
current_object = None
point_counter = 0
print(len(filtered_dataset))
for point in filtered_dataset:
if current_scene != point["scene"]:
current_scene = point["scene"]
print("Testing for scene '{}'...".format(current_scene))
if current_object != point["object_type"]:
current_object = point["object_type"]
point_counter = 0
print(" Object '{}'...".format(current_object))
try:
path = metrics.get_shortest_path_to_object_type(
controller,
point["object_type"],
point["initial_position"],
{"x": 0, "y": point["initial_orientation"], "z": 0},
)
path_dist = metrics.path_distance(path)
point_counter += 1
print(" Total points: {}".format(point_counter))
print(path_dist)
except ValueError:
print("Cannot find path from point")
@task
def visualize_shortest_paths(
ctx,
dataset_path,
width=600,
height=300,
editor_mode=False,
local_build=False,
scenes=None,
gridSize=0.25,
output_dir=".",
object_types=None,
):
angle = 45
import ai2thor.controller
from PIL import Image
controller = ai2thor.controller.Controller(
width=width,
height=height,
local_build=local_build,
start_unity=False if editor_mode else True,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=1,
)
if output_dir != "." and os.path.exists(output_dir):
shutil.rmtree(output_dir)
if output_dir != ".":
os.mkdir(output_dir)
evt = controller.step(
action="AddThirdPartyCamera",
rotation=dict(x=90, y=0, z=0),
position=dict(x=5.40, y=3.25, z=-3.0),
fieldOfView=2.25,
orthographic=True,
)
evt = controller.step(action="SetTopLevelView", topView=True)
evt = controller.step(action="ToggleMapView")
# im = Image.fromarray(evt.third_party_camera_frames[0])
# im.save(os.path.join(output_dir, "top_view.jpg"))
with open(dataset_path, "r") as f:
dataset = json.load(f)
dataset_filtered = dataset
if scenes is not None:
scene_f_set = set(scenes.split(","))
dataset_filtered = [d for d in dataset if d["scene"] in scene_f_set]
if object_types is not None:
object_f_set = set(object_types.split(","))
dataset_filtered = [
d for d in dataset_filtered if d["object_type"] in object_f_set
]
print("Running for {} points...".format(len(dataset_filtered)))
index = 0
print(index)
print(len(dataset_filtered))
datapoint = dataset_filtered[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
failed = {}
while index < len(dataset_filtered):
previous_index = index
controller.reset(current_scene)
while (
current_scene == datapoint["scene"]
and current_object == datapoint["object_type"]
):
index += 1
if index > len(dataset_filtered) - 1:
break
datapoint = dataset_filtered[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
key = "{}_{}".format(current_scene, current_object)
failed[key] = []
print(
"Points for '{}' in scene '{}'...".format(current_object, current_scene)
)
evt = controller.step(
action="AddThirdPartyCamera",
rotation=dict(x=90, y=0, z=0),
position=dict(x=5.40, y=3.25, z=-3.0),
fieldOfView=2.25,
orthographic=True,
)
sc = dataset_filtered[previous_index]["scene"]
obj_type = dataset_filtered[previous_index]["object_type"]
positions = [
d["initial_position"] for d in dataset_filtered[previous_index:index]
]
# print("{} : {} : {}".format(sc, obj_type, positions))
evt = controller.step(
action="VisualizeShortestPaths",
objectType=obj_type,
positions=positions,
grid=True,
)
im = Image.fromarray(evt.third_party_camera_frames[0])
im.save(os.path.join(output_dir, "{}-{}.jpg".format(sc, obj_type)))
# print("Retur {}, {} ".format(evt.metadata['actionReturn'], evt.metadata['lastActionSuccess']))
# print(evt.metadata['errorMessage'])
failed[key] = [
positions[i]
for i, success in enumerate(evt.metadata["actionReturn"])
if not success
]
pprint.pprint(failed)
@task
def fill_in_dataset(
ctx,
dataset_dir,
dataset_filename,
filter_filename,
intermediate_dir,
output_filename="filled.json",
local_build=False,
editor_mode=False,
visibility_distance=1.0,
):
import glob
import ai2thor.controller
dataset_path = os.path.join(dataset_dir, dataset_filename)
def key_sort_func(scene_name):
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", scene_name)
return m.group(1), int(m.group(2)), int(m.group(3))
targets = [
"Apple",
"Baseball Bat",
"Basketball",
"Bowl",
"Garbage Can",
"House Plant",
"Laptop",
"Mug",
"Remote",
"Spray Bottle",
"Vase",
"Alarm Clock",
"Television",
"Pillow",
]
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=0.25,
fieldOfView=60,
rotateStepDegrees=45,
agentMode="bot",
visibilityDistance=1,
)
scenes = sorted(
[scene for scene in controller._scenes_in_build if "physics" not in scene],
key=key_sort_func,
)
missing_datapoints_by_scene = {}
partial_dataset_by_scene = {}
for scene in scenes:
missing_datapoints_by_scene[scene] = []
partial_dataset_by_scene[scene] = []
with open(dataset_path, "r") as f:
create_dataset(
ctx,
local_build=local_build,
editor_mode=editor_mode,
output=output_filename,
intermediate_directory=intermediate_dir,
visibility_distance=visibility_distance,
)
for datapoint in filter_dataset:
missing_datapoints_by_scene[datapoint["scene"]].append(datapoint)
partial_dataset_filenames = sorted(
glob.glob("{}/FloorPlan_*.png".format(dataset_dir))
)
print("Datas")
difficulty_order_map = {"easy": 0, "medium": 1, "hard": 2}
for d_filename in partial_dataset_filenames:
with open(d_filename, "r") as fp:
partial_dataset = json.load(fp)
partial_dataset[0]["scene"] = partial_dataset
final_dataset = []
for scene in scenes:
for object_type in targets:
arr = [
p for p in partial_dataset[scene] if p["object_type"] == object_type
] + [
p
for p in missing_datapoints_by_scene[scene]
if p["object_type"] == object_type
]
final_dataset = final_dataset + sorted(
arr,
key=lambda p: (
p["object_type"],
difficulty_order_map[p["difficulty"]],
),
)
@task
def test_teleport(ctx, editor_mode=False, local_build=False):
import ai2thor.controller
import time
controller = ai2thor.controller.Controller(
rotateStepDegrees=30,
visibilityDistance=1.0,
gridSize=0.25,
port=8200,
host="127.0.0.1",
local_build=local_build,
start_unity=False if editor_mode else True,
agentType="stochastic",
continuousMode=True,
continuous=False,
snapToGrid=False,
agentMode="locobot",
scene="FloorPlan_Train1_2",
width=640,
height=480,
continus=True,
)
controller.step(action="GetReachablePositions", gridSize=0.25)
params = {
"x": 8.0,
"y": 0.924999952,
"z": -1.75,
"rotation": {"x": 0.0, "y": 240.0, "z": 0.0},
"horizon": 330.0,
}
evt = controller.step(action="TeleportFull", **params)
print("New pos: {}".format(evt.metadata["agent"]["position"]))
@task
def resort_dataset(ctx, dataset_path, output_path, editor_mode=False, local_build=True):
with open(dataset_path, "r") as f:
dataset = json.load(f)
index = 0
previous_index = 0
datapoint = dataset[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
# controller.reset(current_scene)
sum_t = 0
new_dataset = []
while index < len(dataset):
previous_index = index
while (
current_scene == datapoint["scene"]
and current_object == datapoint["object_type"]
):
index += 1
if index > len(dataset) - 1:
break
datapoint = dataset[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
print("Scene '{}'...".format(current_scene))
sorted_datapoints = sorted(
dataset[previous_index:index], key=lambda dp: dp["shortest_path_length"]
)
third = int(len(sorted_datapoints) / 3.0)
for i, obj in enumerate(sorted_datapoints):
if i < third:
level = "easy"
elif i < 2 * third:
level = "medium"
else:
level = "hard"
sorted_datapoints[i]["difficulty"] = level
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", obj["scene"])
point_id = "{}_{}_{}_{}_{}".format(
m.group(1), m.group(2), m.group(3), obj["object_type"], i
)
sorted_datapoints[i]["id"] = point_id
sorted_datapoints[i]["difficulty"] = level
new_dataset = new_dataset + sorted_datapoints
sum_t += len(sorted_datapoints)
print("original len: {}, new len: {}".format(len(dataset), sum_t))
with open(output_path, "w") as fw:
json.dump(new_dataset, fw, indent=4)
@task
def remove_dataset_spaces(ctx, dataset_dir):
train = os.path.join(dataset_dir, "train.json")
test = os.path.join(dataset_dir, "val.json")
with open(train, "r") as f:
train_data = json.load(f)
with open(test, "r") as f:
test_data = json.load(f)
id_set = set()
for o in train_data:
o["id"] = o["id"].replace(" ", "")
id_set.add(o["id"])
print(sorted(id_set))
id_set = set()
for o in test_data:
o["id"] = o["id"].replace(" ", "")
id_set.add(o["id"])
print(sorted(id_set))
with open("train.json", "w") as fw:
json.dump(train_data, fw, indent=4, sort_keys=True)
with open("val.json", "w") as fw:
json.dump(test_data, fw, indent=4, sort_keys=True)
@task
def shortest_path_to_point(ctx, scene, x0, y0, z0, x1, y1, z1, editor_mode=False):
import ai2thor.util.metrics as metrics
import ai2thor.controller
controller = ai2thor.controller.Controller(
rotateStepDegrees=30,
visibilityDistance=1.0,
gridSize=0.25,
port=8200,
host="127.0.0.1",
local_build=local_build,
start_unity=False if editor_mode else True,
agentType="stochastic",
continuousMode=True,
continuous=False,
snapToGrid=False,
agentMode="locobot",
scene=scene,
width=300,
height=300,
continus=True,
)
evt = metrics.get_shortest_path_to_point(
controller, dict(x=x0, y=y0, z=z0), dict(x=x1, y=y1, z=z1)
)
print(evt.metadata["lastActionSuccess"])
print(evt.metadata["errorMessage"])
@task
def reachable_pos(ctx, scene, editor_mode=False, local_build=False):
import ai2thor.util.metrics as metrics
import ai2thor.controller
gridSize = 0.25
controller = ai2thor.controller.Controller(
rotateStepDegrees=30,
visibilityDistance=1.0,
gridSize=gridSize,
port=8200,
host="127.0.0.1",
local_build=local_build,
start_unity=False if editor_mode else True,
agentType="stochastic",
continuousMode=True,
continuous=False,
snapToGrid=False,
agentMode="locobot",
scene=scene,
width=300,
height=300,
continus=True,
)
print(
"constoller.last_action Agent Pos: {}".format(
controller.last_event.metadata["agent"]["position"]
)
)
evt = controller.step(action="GetReachablePositions", gridSize=gridSize)
print("After GetReachable AgentPos: {}".format(evt.metadata["agent"]["position"]))
print(evt.metadata["lastActionSuccess"])
print(evt.metadata["errorMessage"])
reachable_pos = evt.metadata["actionReturn"]
print(evt.metadata["actionReturn"])
evt = controller.step(
dict(
action="TeleportFull",
x=3.0,
y=reachable_pos[0]["y"],
z=-1.5,
rotation=dict(x=0, y=45.0, z=0),
horizon=0.0,
)
)
print("After teleport: {}".format(evt.metadata["agent"]["position"]))
@task
def get_physics_determinism(
ctx, scene="FloorPlan1_physics", agent_mode="arm", n=100, samples=100
):
import ai2thor.controller
import random
num_trials = n
width = 300
height = 300
fov = 100
def act(controller, actions, n):
for i in range(n):
action = random.choice(actions)
controller.step(dict(action=action))
controller = ai2thor.controller.Controller(
local_executable_path=None,
scene=scene,
gridSize=0.25,
width=width,
height=height,
agentMode=agent_mode,
fieldOfView=fov,
agentControllerType="mid-level",
server_class=ai2thor.fifo_server.FifoServer,
visibilityScheme="Distance",
)
from ai2thor.util.trials import trial_runner, ObjectPositionVarianceAverage
move_actions = ["MoveAhead", "MoveBack", "MoveLeft", "MoveRight"]
rotate_actions = ["RotateRight", "RotateLeft"]
look_actions = ["LookUp", "LookDown"]
all_actions = move_actions + rotate_actions + look_actions
sample_number = samples
action_tuples = [
("move", move_actions, sample_number),
("rotate", rotate_actions, sample_number),
("look", look_actions, sample_number),
("all", all_actions, sample_number),
]
for action_name, actions, n in action_tuples:
for controller, metric in trial_runner(
controller, num_trials, ObjectPositionVarianceAverage()
):
act(controller, actions, n)
print(
" actions: '{}', object_position_variance_average: {} ".format(
action_name, metric
)
)
@task
def generate_msgpack_resolver(task):
import glob
# mpc can be downloaded from: https://github.com/neuecc/MessagePack-CSharp/releases/download/v2.1.194/mpc.zip
# need to download/unzip into this path, add gatekeeper permission
target_dir = "unity/Assets/Scripts/ThorMsgPackResolver"
shutil.rmtree(target_dir, ignore_errors=True)
mpc_path = os.path.join(os.environ["HOME"], "local/bin/mpc")
subprocess.check_call(
"%s -i unity -o %s -m -r ThorIL2CPPGeneratedResolver" % (mpc_path, target_dir),
shell=True,
)
for g in glob.glob(os.path.join(target_dir, "*.cs")):
with open(g) as f:
source_code = f.read()
source_code = "using UnityEngine;\n" + source_code
with open(g, "w") as f:
f.write(source_code)
@task
def generate_pypi_index(context):
s3 = boto3.resource("s3")
root_index = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<HTML>
<BODY>
<a href="/ai2thor/index.html">/ai2thor/</a><br>
</BODY>
</HTML>
"""
s3.Object(ai2thor.build.PYPI_S3_BUCKET, "index.html").put(
Body=root_index, ACL="public-read", ContentType="text/html"
)
objects = list_objects_with_metadata(ai2thor.build.PYPI_S3_BUCKET)
links = []
for k, v in objects.items():
if k.split("/")[-1] != "index.html":
links.append('<a href="/%s">/%s</a><br>' % (k, k))
ai2thor_index = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<HTML>
<BODY>
%s
</BODY>
</HTML>
""" % "\n".join(
links
)
s3.Object(ai2thor.build.PYPI_S3_BUCKET, "ai2thor/index.html").put(
Body=ai2thor_index, ACL="public-read", ContentType="text/html"
)
@task
def ci_test_utf(context, build):
s3 = boto3.resource("s3")
logger.info(
"running Unity Test framework testRunner for %s %s"
% (build["branch"], build["commit_id"])
)
results_path, results_logfile = test_utf(context)
for l in [results_path, results_logfile]:
key = "builds/" + os.path.basename(l)
with open(l) as f:
s3.Object(ai2thor.build.PUBLIC_S3_BUCKET, key).put(
Body=f.read(), ContentType="text/plain", ACL="public-read"
)
logger.info(
"finished Unity Test framework runner for %s %s"
% (build["branch"], build["commit_id"])
)
@task
def format(context):
format_py(context)
format_cs(context)
@task
def format_cs(context):
install_dotnet_format(context)
# the following message will get emitted, this can safely be ignored
# "Warnings were encountered while loading the workspace. Set the verbosity option to the 'diagnostic' level to log warnings"
subprocess.check_call(
".dotnet/dotnet tool run dotnet-format unity/AI2-THOR-Base.csproj -w -s",
shell=True,
)
@task
def install_dotnet_format(context, force=False):
install_dotnet(context)
base_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)))
if not os.path.isfile(".config/dotnet-tools.json"):
command = os.path.join(base_dir, ".dotnet/dotnet") + " new tool-manifest"
subprocess.check_call(command, shell=True)
with open(".config/dotnet-tools.json") as f:
tools = json.loads(f.read())
# we may want to specify a version here in the future
if not force and "dotnet-format" in tools.get("tools", {}):
# dotnet-format already installed
return
command = os.path.join(base_dir, ".dotnet/dotnet") + " tool install dotnet-format"
subprocess.check_call(command, shell=True)
@task
def install_dotnet(context, force=False):
import requests
import stat
base_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)))
if not force and os.path.isfile(os.path.join(base_dir, ".dotnet/dotnet")):
# dotnet already installed
return
# https://docs.microsoft.com/en-us/dotnet/core/tools/dotnet-install-script
res = requests.get("https://dot.net/v1/dotnet-install.sh")
res.raise_for_status()
target = os.path.join(base_dir, "dotnet-install.sh")
with open(target, "wb") as f:
f.write(res.content)
os.chmod(target, stat.S_IREAD | stat.S_IEXEC | stat.S_IWRITE)
env = os.environ.copy()
env["DOTNET_INSTALL_DIR"] = os.path.join(base_dir, ".dotnet")
subprocess.check_call(target, shell=True, env=env)
os.unlink(target)
@task
def format_py(context):
try:
import black
except ImportError:
raise Exception("black not installed - run pip install black")
subprocess.check_call(
"black -v -t py38 --exclude unity/ --exclude .git/ .", shell=True
)
@task
def test_utf(context):
"""
Generates a module named ai2thor/tests/test_utf.py with test_XYZ style methods
that include failures (if any) extracted from the xml output
of the Unity Test Runner
"""
project_path = os.path.join(os.getcwd(), "unity")
commit_id = git_commit_id()
test_results_path = os.path.join(project_path, "utf_testResults-%s.xml" % commit_id)
logfile_path = os.path.join(os.getcwd(), "thor-testResults-%s.log" % commit_id)
command = (
"%s -runTests -testResults %s -logFile %s -testPlatform PlayMode -projectpath %s "
% (_unity_path(), test_results_path, logfile_path, project_path)
)
subprocess.call(command, shell=True)
generate_pytest_utf(test_results_path)
return test_results_path, logfile_path
def generate_pytest_utf(test_results_path):
import xml.etree.ElementTree as ET
with open(test_results_path) as f:
root = ET.fromstring(f.read())
from collections import defaultdict
class_tests = defaultdict(list)
for test_case in root.findall(".//test-case"):
# print(test_case.attrib['methodname'])
class_tests[test_case.attrib["classname"]].append(test_case)
class_data = []
class_data.append(
f"""
# GENERATED BY tasks.generate_pytest_utf - DO NOT EDIT/COMMIT
import pytest
import json
import os
def test_testresults_exist():
test_results_path = "{test_results_path}"
assert os.path.isfile("{test_results_path}"), "TestResults at: {test_results_path} do not exist"
"""
)
for class_name, test_cases in class_tests.items():
test_records = []
for test_case in test_cases:
methodname = test_case.attrib["methodname"]
if test_case.attrib["result"] == "Failed":
fail_message = test_case.find("failure/message")
stack_trace = test_case.find("failure/stack-trace")
message = json.dumps(fail_message.text + " " + stack_trace.text)
test_data = f"""
def test_{methodname}(self):
pytest.fail(json.loads(r'{message}'))
"""
else:
test_data = f"""
def test_{methodname}(self):
pass
"""
test_records.append(test_data)
test_record_data = " pass"
if test_records:
test_record_data = "\n".join(test_records)
encoded_class_name = re.sub(
r"[^a-zA-Z0-9_]", "_", re.sub("_", "__", class_name)
)
class_data.append(
f"""
class {encoded_class_name}:
{test_record_data}
"""
)
with open("ai2thor/tests/test_utf.py", "w") as f:
f.write("\n".join(class_data))
| []
| []
| [
"HOME",
"INCLUDE_PRIVATE_SCENES"
]
| [] | ["HOME", "INCLUDE_PRIVATE_SCENES"] | python | 2 | 0 | |
dipy/data/tests/test_fetcher.py | import tempfile
import os.path as op
import sys
import os
import numpy.testing as npt
from nibabel.tmpdirs import TemporaryDirectory
import dipy.data.fetcher as fetcher
from dipy.data import SPHERE_FILES
from threading import Thread
if sys.version_info[0] < 3:
from SimpleHTTPServer import SimpleHTTPRequestHandler # Python 2
from SocketServer import TCPServer as HTTPServer
else:
from http.server import HTTPServer, SimpleHTTPRequestHandler # Python 3
def test_check_md5():
fd, fname = tempfile.mkstemp()
stored_md5 = fetcher._get_file_md5(fname)
# If all is well, this shouldn't return anything:
npt.assert_equal(fetcher.check_md5(fname, stored_md5), None)
# If None is provided as input, it should silently not check either:
npt.assert_equal(fetcher.check_md5(fname, None), None)
# Otherwise, it will raise its exception class:
npt.assert_raises(fetcher.FetcherError, fetcher.check_md5, fname, 'foo')
def test_make_fetcher():
symmetric362 = SPHERE_FILES['symmetric362']
with TemporaryDirectory() as tmpdir:
stored_md5 = fetcher._get_file_md5(symmetric362)
# create local HTTP Server
testfile_url = op.split(symmetric362)[0] + os.sep
test_server_url = "http://127.0.0.1:8000/"
print(testfile_url)
print(symmetric362)
current_dir = os.getcwd()
# change pwd to directory containing testfile.
os.chdir(testfile_url)
server = HTTPServer(('localhost', 8000), SimpleHTTPRequestHandler)
server_thread = Thread(target=server.serve_forever)
server_thread.deamon = True
server_thread.start()
# test make_fetcher
sphere_fetcher = fetcher._make_fetcher("sphere_fetcher",
tmpdir, test_server_url,
[op.split(symmetric362)[-1]],
["sphere_name"],
md5_list=[stored_md5])
sphere_fetcher()
assert op.isfile(op.join(tmpdir, "sphere_name"))
npt.assert_equal(fetcher._get_file_md5(op.join(tmpdir, "sphere_name")),
stored_md5)
# stop local HTTP Server
server.shutdown()
# change to original working directory
os.chdir(current_dir)
def test_fetch_data():
symmetric362 = SPHERE_FILES['symmetric362']
with TemporaryDirectory() as tmpdir:
md5 = fetcher._get_file_md5(symmetric362)
bad_md5 = '8' * len(md5)
newfile = op.join(tmpdir, "testfile.txt")
# Test that the fetcher can get a file
testfile_url = symmetric362
print(testfile_url)
testfile_dir, testfile_name = op.split(testfile_url)
# create local HTTP Server
test_server_url = "http://127.0.0.1:8001/" + testfile_name
current_dir = os.getcwd()
# change pwd to directory containing testfile.
os.chdir(testfile_dir + os.sep)
# use different port as shutdown() takes time to release socket.
server = HTTPServer(('localhost', 8001), SimpleHTTPRequestHandler)
server_thread = Thread(target=server.serve_forever)
server_thread.deamon = True
server_thread.start()
files = {"testfile.txt": (test_server_url, md5)}
fetcher.fetch_data(files, tmpdir)
npt.assert_(op.exists(newfile))
# Test that the file is replaced when the md5 doesn't match
with open(newfile, 'a') as f:
f.write("some junk")
fetcher.fetch_data(files, tmpdir)
npt.assert_(op.exists(newfile))
npt.assert_equal(fetcher._get_file_md5(newfile), md5)
# Test that an error is raised when the md5 checksum of the download
# file does not match the expected value
files = {"testfile.txt": (test_server_url, bad_md5)}
npt.assert_raises(fetcher.FetcherError,
fetcher.fetch_data, files, tmpdir)
# stop local HTTP Server
server.shutdown()
# change to original working directory
os.chdir(current_dir)
def test_dipy_home():
test_path = 'TEST_PATH'
if 'DIPY_HOME' in os.environ:
old_home = os.environ['DIPY_HOME']
del os.environ['DIPY_HOME']
else:
old_home = None
reload(fetcher)
npt.assert_string_equal(fetcher.dipy_home,
op.join(os.path.expanduser('~'), '.dipy'))
os.environ['DIPY_HOME'] = test_path
reload(fetcher)
npt.assert_string_equal(fetcher.dipy_home, test_path)
# return to previous state
if old_home:
os.environ['DIPY_HOME'] = old_home
| []
| []
| [
"DIPY_HOME"
]
| [] | ["DIPY_HOME"] | python | 1 | 0 | |
app.go | package teak
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"gopkg.in/urfave/cli.v1"
)
//ModuleConfigFunc Signature used by functions that are used to configure a
//module. Some config callbacks include - initialize, setup, reset etc
type ModuleConfigFunc func(gtx context.Context, app *App) (err error)
//Module - represents an application module
type Module struct {
Name string `json:"name" db:"name"`
Description string `json:"desc" db:"desc"`
Endpoints []*Endpoint `json:"endpoints" db:"endpoints"`
ItemHandlers []StoredItemHandler `json:"itemHandlers" db:"item_handlers"`
Commands []*cli.Command
Initialize ModuleConfigFunc
Setup ModuleConfigFunc
Reset ModuleConfigFunc
}
//App - the application itself
type App struct {
cli.App
modules []*Module
apiRoot string
apiVersion int
}
//FromAppDir - gives a absolute path from a path relative to
//app directory
func (app *App) FromAppDir(relPath string) (abs string) {
home := os.Getenv("HOME")
if runtime.GOOS == "windows" {
home = os.Getenv("APPDATA")
}
return filepath.Join(home, "."+app.Name, relPath)
}
//AddModule - registers a module with the app
func (app *App) AddModule(module *Module) {
app.modules = append(app.modules, module)
}
func addInitializer(
gtx context.Context,
cmd *cli.Command,
module *Module,
app *App) {
req := func(ctx *cli.Context) error {
if module.Initialize != nil {
err := module.Initialize(gtx, app)
if err != nil {
Error("App", "Failed to initialize module %s",
module.Name)
}
}
return nil
}
if cmd.Before == nil {
cmd.Before = req
} else {
otherBefore := cmd.Before
cmd.Before = func(ctx *cli.Context) (err error) {
err = otherBefore(ctx)
if err == nil {
err = req(ctx)
}
return err
}
}
}
//Exec - runs the applications
func (app *App) Exec(gtx context.Context, args []string) (err error) {
for _, module := range app.modules {
if module.Commands != nil {
for _, cmd := range module.Commands {
addInitializer(gtx, cmd, module, app)
app.Commands = append(app.Commands, *cmd)
}
}
for _, fc := range module.ItemHandlers {
siHandlers[fc.DataType()] = fc
}
AddEndpoints(module.Endpoints...)
}
if err == nil {
InitServer(app.apiRoot, app.apiVersion)
err = app.Run(args)
}
return err
}
//NewApp - creates a new application with default options
func NewApp(
name string,
appVersion Version,
apiVersion int,
desc string,
authtr Authenticator,
authzr Authorizer,
uStorage UserStorage,
genStorage DataStorage) (app *App) {
dataStorage = genStorage
authenticator = authtr
authorizer = authzr
userStorage = uStorage
// if err := dataStorage.Init(); err != nil {
// Fatal("t.app.dataStore", "Failed to initilize application store")
// }
InitLogger(LoggerConfig{
Logger: NewDirectLogger(),
LogConsole: true,
FilterLevel: InfoLevel,
})
LoadConfig(name)
app = &App{
App: cli.App{
Name: name,
Commands: make([]cli.Command, 0, 100),
Version: appVersion.String(),
Authors: []cli.Author{
{
Name: "The " + name + " team",
},
},
Usage: desc,
ErrWriter: ioutil.Discard,
Metadata: map[string]interface{}{},
Flags: []cli.Flag{
cli.StringFlag{
Name: "log-level",
Value: "info",
Usage: "Give log level, one of: 'trace', 'debug', " +
"'info', 'warn', 'error'",
},
},
Before: func(ctx *cli.Context) error {
ag := NewArgGetter(ctx)
logLevel := ag.GetOptionalString("log-level")
if logLevel != "" {
switch logLevel {
case "trace":
SetLevel(TraceLevel)
case "debug":
SetLevel(DebugLevel)
case "info":
SetLevel(InfoLevel)
case "warn":
SetLevel(WarnLevel)
case "error":
SetLevel(ErrorLevel)
}
}
return nil
},
},
apiRoot: "",
apiVersion: apiVersion,
modules: make([]*Module, 0, 10),
}
app.Metadata["teak"] = app
app.modules = append(app.modules, &Module{
Name: "Core",
Description: "teak Core module",
Endpoints: MergeEnpoints(
getUserManagementEndpoints(),
getDataEndpoints(),
getAdminEndpoints(),
),
Commands: MergeCommands(
getAdminCommands(),
),
ItemHandlers: []StoredItemHandler{
&UserHandler{},
},
Setup: func(gtx context.Context, app *App) error {
// return dataStorage.Init()
return nil
},
Initialize: func(gtx context.Context, app *App) error {
return dataStorage.Init(gtx, nil)
},
Reset: func(gtx context.Context, app *App) error {
return dataStorage.Reset(gtx)
},
})
return app
}
// Init - initializes the application and the registered module. This needs to
// be called when app/module configuration changes.
// For example: This is the place where mongoDB indices are expected to
// be created.
func (app *App) Init(
gtx context.Context, admin *User, adminPass string, param M) (err error) {
err = GetStore().Setup(context.TODO(), admin, adminPass, M{})
if err != nil {
return err
}
for _, module := range app.modules {
if module.Initialize != nil {
err = module.Initialize(gtx, app)
if err != nil {
Error("t.app.init", "Failed to initialize %s",
module.Name)
break
}
Info("t.app.init", "Initialized module %s", module.Name)
}
}
return err
}
// Setup - Setup the application for the first time
func (app *App) Setup(gtx context.Context) (err error) {
defer func() {
if err != nil {
LogErrorX("t.app.setup", "Failed to setup data storage", err)
}
}()
Info("t.app.setup", "Data storage setup succesful")
for _, module := range app.modules {
if module.Setup != nil {
err = module.Setup(gtx, app)
if err != nil {
Error("t.app.setup", "Failed to set module %s up",
module.Name)
break
}
Info("t.app.setup", "Setup module %s", module.Name)
}
}
if err == nil {
Info("t.app.setup", "Application setup complete")
}
return err
}
//Reset - resets the application and module configuration and data.
//USE WITH CAUTION
func (app *App) Reset(gtx context.Context) (err error) {
defer func() {
if err != nil {
LogErrorX("t.app.reset", "Failed to reset app", err)
}
}()
for _, module := range app.modules {
if module.Reset != nil {
err = module.Reset(gtx, app)
if err != nil {
Error("t.app.reset", "Failed to reset module %s",
module.Name)
break
}
Info("t.app.reset", "Reset module %s succesfully", module.Name)
}
}
if err == nil {
Info("t.app.setup", "Application reset complete")
}
return err
}
| [
"\"HOME\"",
"\"APPDATA\""
]
| []
| [
"APPDATA",
"HOME"
]
| [] | ["APPDATA", "HOME"] | go | 2 | 0 | |
cmd/redis-operator/main_test.go | package main
import (
"testing"
"os"
)
func TestHomeDir(t *testing.T) {
expected := "failure"
if os.Getenv("HOME") != "" {
expected = os.Getenv("HOME")
} else {
expected = os.Getenv("USERPROFILE")
}
actual := homeDir()
if actual != expected {
t.Errorf("Test failed, got -->%s<-- was expecting -->%s<--", actual, expected)
}
}
| [
"\"HOME\"",
"\"HOME\"",
"\"USERPROFILE\""
]
| []
| [
"HOME",
"USERPROFILE"
]
| [] | ["HOME", "USERPROFILE"] | go | 2 | 0 | |
cmd/apps/kubernetes_exec.go | package apps
import (
"fmt"
"log"
"os"
"path"
"strings"
"github.com/alexellis/arkade/pkg/env"
execute "github.com/alexellis/go-execute/pkg/v1"
)
func fetchChart(path, chart, version string, helm3 bool) error {
versionStr := ""
if len(version) > 0 {
// Issue in helm where adding a space to the command makes it think that it's another chart of " " we want to template,
// So we add the space before version here rather than on the command
versionStr = " --version " + version
}
subdir := ""
if helm3 {
subdir = "helm3"
}
mkErr := os.MkdirAll(path, 0700)
if mkErr != nil {
return mkErr
}
task := execute.ExecTask{
Command: fmt.Sprintf("%s fetch %s --untar=true --untardir %s%s", env.LocalBinary("helm", subdir), chart, path, versionStr),
Env: os.Environ(),
StreamStdio: true,
}
res, err := task.Execute()
if err != nil {
return err
}
if res.ExitCode != 0 {
return fmt.Errorf("exit code %d", res.ExitCode)
}
return nil
}
func getNodeArchitecture() string {
res, _ := kubectlTask("get", "nodes", `--output`, `jsonpath={range $.items[0]}{.status.nodeInfo.architecture}`)
arch := strings.TrimSpace(string(res.Stdout))
return arch
}
func helm3Upgrade(basePath, chart, namespace, values, version string, overrides map[string]string, wait bool) error {
chartName := chart
if index := strings.Index(chartName, "/"); index > -1 {
chartName = chartName[index+1:]
}
chartRoot := basePath
args := []string{"upgrade", "--install", chartName, chart, "--namespace", namespace}
if len(version) > 0 {
args = append(args, "--version", version)
}
if wait {
args = append(args, "--wait")
}
fmt.Println("VALUES", values)
if len(values) > 0 {
args = append(args, "--values")
if !strings.HasPrefix(values, "/") {
args = append(args, path.Join(chartRoot, values))
} else {
args = append(args, values)
}
}
for k, v := range overrides {
args = append(args, "--set")
args = append(args, fmt.Sprintf("%s=%s", k, v))
}
task := execute.ExecTask{
Command: env.LocalBinary("helm", "helm3"),
Args: args,
Env: os.Environ(),
Cwd: basePath,
StreamStdio: true,
}
fmt.Printf("Command: %s %s\n", task.Command, task.Args)
res, err := task.Execute()
if err != nil {
return err
}
if res.ExitCode != 0 {
return fmt.Errorf("exit code %d, stderr: %s", res.ExitCode, res.Stderr)
}
if len(res.Stderr) > 0 {
log.Printf("stderr: %s\n", res.Stderr)
}
return nil
}
func templateChart(basePath, chart, namespace, outputPath, values string, overrides map[string]string) error {
rmErr := os.RemoveAll(outputPath)
if rmErr != nil {
log.Printf("Error cleaning up: %s, %s\n", outputPath, rmErr.Error())
}
mkErr := os.MkdirAll(outputPath, 0700)
if mkErr != nil {
return mkErr
}
overridesStr := ""
for k, v := range overrides {
overridesStr += fmt.Sprintf(" --set %s=%s", k, v)
}
chartRoot := path.Join(basePath, chart)
valuesStr := ""
if len(values) > 0 {
valuesStr = "--values " + path.Join(chartRoot, values)
}
task := execute.ExecTask{
Command: fmt.Sprintf("%s template %s --name %s --namespace %s --output-dir %s %s %s",
env.LocalBinary("helm", ""), chart, chart, namespace, outputPath, valuesStr, overridesStr),
Env: os.Environ(),
Cwd: basePath,
StreamStdio: true,
}
res, err := task.Execute()
if err != nil {
return err
}
if res.ExitCode != 0 {
return fmt.Errorf("exit code %d, stderr: %s", res.ExitCode, res.Stderr)
}
if len(res.Stderr) > 0 {
log.Printf("stderr: %s\n", res.Stderr)
}
return nil
}
func addHelmRepo(name, url string, helm3 bool) error {
subdir := ""
if helm3 {
subdir = "helm3"
}
task := execute.ExecTask{
Command: fmt.Sprintf("%s repo add %s %s", env.LocalBinary("helm", subdir), name, url),
Env: os.Environ(),
StreamStdio: true,
}
res, err := task.Execute()
if err != nil {
return err
}
if res.ExitCode != 0 {
return fmt.Errorf("exit code %d", res.ExitCode)
}
return nil
}
func updateHelmRepos(helm3 bool) error {
subdir := ""
if helm3 {
subdir = "helm3"
}
task := execute.ExecTask{
Command: fmt.Sprintf("%s repo update", env.LocalBinary("helm", subdir)),
Env: os.Environ(),
StreamStdio: true,
}
res, err := task.Execute()
if err != nil {
return err
}
if res.ExitCode != 0 {
return fmt.Errorf("exit code %d", res.ExitCode)
}
return nil
}
func kubectlTask(parts ...string) (execute.ExecResult, error) {
task := execute.ExecTask{
Command: "kubectl",
Args: parts,
StreamStdio: false,
}
res, err := task.Execute()
return res, err
}
func kubectl(parts ...string) error {
task := execute.ExecTask{
Command: "kubectl",
Args: parts,
StreamStdio: true,
}
res, err := task.Execute()
if err != nil {
return err
}
if res.ExitCode != 0 {
return fmt.Errorf("kubectl exit code %d, stderr: %s",
res.ExitCode,
res.Stderr)
}
return nil
}
func getDefaultKubeconfig() string {
kubeConfigPath := path.Join(os.Getenv("HOME"), ".kube/config")
if val, ok := os.LookupEnv("KUBECONFIG"); ok {
kubeConfigPath = val
}
return kubeConfigPath
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
pkg/util/utils.go | package util
import (
"encoding/json"
"fmt"
"os"
"os/user"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/BurntSushi/toml"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/pkg/errorhandling"
"github.com/containers/libpod/pkg/namespaces"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/signal"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh/terminal"
)
var containerConfig *config.Config
func init() {
var err error
containerConfig, err = config.Default()
if err != nil {
logrus.Error(err)
os.Exit(1)
}
}
// Helper function to determine the username/password passed
// in the creds string. It could be either or both.
func parseCreds(creds string) (string, string) {
if creds == "" {
return "", ""
}
up := strings.SplitN(creds, ":", 2)
if len(up) == 1 {
return up[0], ""
}
return up[0], up[1]
}
// ParseRegistryCreds takes a credentials string in the form USERNAME:PASSWORD
// and returns a DockerAuthConfig
func ParseRegistryCreds(creds string) (*types.DockerAuthConfig, error) {
username, password := parseCreds(creds)
if username == "" {
fmt.Print("Username: ")
fmt.Scanln(&username)
}
if password == "" {
fmt.Print("Password: ")
termPassword, err := terminal.ReadPassword(0)
if err != nil {
return nil, errors.Wrapf(err, "could not read password from terminal")
}
password = string(termPassword)
}
return &types.DockerAuthConfig{
Username: username,
Password: password,
}, nil
}
// StringInSlice determines if a string is in a string slice, returns bool
func StringInSlice(s string, sl []string) bool {
for _, i := range sl {
if i == s {
return true
}
}
return false
}
// ImageConfig is a wrapper around the OCIv1 Image Configuration struct exported
// by containers/image, but containing additional fields that are not supported
// by OCIv1 (but are by Docker v2) - notably OnBuild.
type ImageConfig struct {
v1.ImageConfig
OnBuild []string
}
// GetImageConfig produces a v1.ImageConfig from the --change flag that is
// accepted by several Podman commands. It accepts a (limited subset) of
// Dockerfile instructions.
func GetImageConfig(changes []string) (ImageConfig, error) {
// Valid changes:
// USER
// EXPOSE
// ENV
// ENTRYPOINT
// CMD
// VOLUME
// WORKDIR
// LABEL
// STOPSIGNAL
// ONBUILD
config := ImageConfig{}
for _, change := range changes {
// First, let's assume proper Dockerfile format - space
// separator between instruction and value
split := strings.SplitN(change, " ", 2)
if len(split) != 2 {
split = strings.SplitN(change, "=", 2)
if len(split) != 2 {
return ImageConfig{}, errors.Errorf("invalid change %q - must be formatted as KEY VALUE", change)
}
}
outerKey := strings.ToUpper(strings.TrimSpace(split[0]))
value := strings.TrimSpace(split[1])
switch outerKey {
case "USER":
// Assume literal contents are the user.
if value == "" {
return ImageConfig{}, errors.Errorf("invalid change %q - must provide a value to USER", change)
}
config.User = value
case "EXPOSE":
// EXPOSE is either [portnum] or
// [portnum]/[proto]
// Protocol must be "tcp" or "udp"
splitPort := strings.Split(value, "/")
if len(splitPort) > 2 {
return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change)
}
portNum, err := strconv.Atoi(splitPort[0])
if err != nil {
return ImageConfig{}, errors.Wrapf(err, "invalid change %q - EXPOSE port must be an integer", change)
}
if portNum > 65535 || portNum <= 0 {
return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE port must be a valid port number", change)
}
proto := "tcp"
if len(splitPort) > 1 {
testProto := strings.ToLower(splitPort[1])
switch testProto {
case "tcp", "udp":
proto = testProto
default:
return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change)
}
}
if config.ExposedPorts == nil {
config.ExposedPorts = make(map[string]struct{})
}
config.ExposedPorts[fmt.Sprintf("%d/%s", portNum, proto)] = struct{}{}
case "ENV":
// Format is either:
// ENV key=value
// ENV key=value key=value ...
// ENV key value
// Both keys and values can be surrounded by quotes to group them.
// For now: we only support key=value
// We will attempt to strip quotation marks if present.
var (
key, val string
)
splitEnv := strings.SplitN(value, "=", 2)
key = splitEnv[0]
// We do need a key
if key == "" {
return ImageConfig{}, errors.Errorf("invalid change %q - ENV must have at least one argument", change)
}
// Perfectly valid to not have a value
if len(splitEnv) == 2 {
val = splitEnv[1]
}
if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) {
key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`)
}
if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) {
val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`)
}
config.Env = append(config.Env, fmt.Sprintf("%s=%s", key, val))
case "ENTRYPOINT":
// Two valid forms.
// First, JSON array.
// Second, not a JSON array - we interpret this as an
// argument to `sh -c`, unless empty, in which case we
// just use a blank entrypoint.
testUnmarshal := []string{}
if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil {
// It ain't valid JSON, so assume it's an
// argument to sh -c if not empty.
if value != "" {
config.Entrypoint = []string{"/bin/sh", "-c", value}
} else {
config.Entrypoint = []string{}
}
} else {
// Valid JSON
config.Entrypoint = testUnmarshal
}
case "CMD":
// Same valid forms as entrypoint.
// However, where ENTRYPOINT assumes that 'ENTRYPOINT '
// means no entrypoint, CMD assumes it is 'sh -c' with
// no third argument.
testUnmarshal := []string{}
if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil {
// It ain't valid JSON, so assume it's an
// argument to sh -c.
// Only include volume if it's not ""
config.Cmd = []string{"/bin/sh", "-c"}
if value != "" {
config.Cmd = append(config.Cmd, value)
}
} else {
// Valid JSON
config.Cmd = testUnmarshal
}
case "VOLUME":
// Either a JSON array or a set of space-separated
// paths.
// Acts rather similar to ENTRYPOINT and CMD, but always
// appends rather than replacing, and no sh -c prepend.
testUnmarshal := []string{}
if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil {
// Not valid JSON, so split on spaces
testUnmarshal = strings.Split(value, " ")
}
if len(testUnmarshal) == 0 {
return ImageConfig{}, errors.Errorf("invalid change %q - must provide at least one argument to VOLUME", change)
}
for _, vol := range testUnmarshal {
if vol == "" {
return ImageConfig{}, errors.Errorf("invalid change %q - VOLUME paths must not be empty", change)
}
if config.Volumes == nil {
config.Volumes = make(map[string]struct{})
}
config.Volumes[vol] = struct{}{}
}
case "WORKDIR":
// This can be passed multiple times.
// Each successive invocation is treated as relative to
// the previous one - so WORKDIR /A, WORKDIR b,
// WORKDIR c results in /A/b/c
// Just need to check it's not empty...
if value == "" {
return ImageConfig{}, errors.Errorf("invalid change %q - must provide a non-empty WORKDIR", change)
}
config.WorkingDir = filepath.Join(config.WorkingDir, value)
case "LABEL":
// Same general idea as ENV, but we no longer allow " "
// as a separator.
// We didn't do that for ENV either, so nice and easy.
// Potentially problematic: LABEL might theoretically
// allow an = in the key? If people really do this, we
// may need to investigate more advanced parsing.
var (
key, val string
)
splitLabel := strings.SplitN(value, "=", 2)
// Unlike ENV, LABEL must have a value
if len(splitLabel) != 2 {
return ImageConfig{}, errors.Errorf("invalid change %q - LABEL must be formatted key=value", change)
}
key = splitLabel[0]
val = splitLabel[1]
if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) {
key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`)
}
if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) {
val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`)
}
// Check key after we strip quotations
if key == "" {
return ImageConfig{}, errors.Errorf("invalid change %q - LABEL must have a non-empty key", change)
}
if config.Labels == nil {
config.Labels = make(map[string]string)
}
config.Labels[key] = val
case "STOPSIGNAL":
// Check the provided signal for validity.
killSignal, err := ParseSignal(value)
if err != nil {
return ImageConfig{}, errors.Wrapf(err, "invalid change %q - KILLSIGNAL must be given a valid signal", change)
}
config.StopSignal = fmt.Sprintf("%d", killSignal)
case "ONBUILD":
// Onbuild always appends.
if value == "" {
return ImageConfig{}, errors.Errorf("invalid change %q - ONBUILD must be given an argument", change)
}
config.OnBuild = append(config.OnBuild, value)
default:
return ImageConfig{}, errors.Errorf("invalid change %q - invalid instruction %s", change, outerKey)
}
}
return config, nil
}
// ParseSignal parses and validates a signal name or number.
func ParseSignal(rawSignal string) (syscall.Signal, error) {
// Strip off leading dash, to allow -1 or -HUP
basename := strings.TrimPrefix(rawSignal, "-")
sig, err := signal.ParseSignal(basename)
if err != nil {
return -1, err
}
// 64 is SIGRTMAX; wish we could get this from a standard Go library
if sig < 1 || sig > 64 {
return -1, errors.Errorf("valid signals are 1 through 64")
}
return sig, nil
}
// GetKeepIDMapping returns the mappings and the user to use when keep-id is used
func GetKeepIDMapping() (*storage.IDMappingOptions, int, int, error) {
options := storage.IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
}
uid, gid := 0, 0
if rootless.IsRootless() {
min := func(a, b int) int {
if a < b {
return a
}
return b
}
uid = rootless.GetRootlessUID()
gid = rootless.GetRootlessGID()
uids, gids, err := rootless.GetConfiguredMappings()
if err != nil {
return nil, -1, -1, errors.Wrapf(err, "cannot read mappings")
}
maxUID, maxGID := 0, 0
for _, u := range uids {
maxUID += u.Size
}
for _, g := range gids {
maxGID += g.Size
}
options.UIDMap, options.GIDMap = nil, nil
options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(uid, maxUID)})
options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid, HostID: 0, Size: 1})
if maxUID > uid {
options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid + 1, HostID: uid + 1, Size: maxUID - uid})
}
options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(gid, maxGID)})
options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid, HostID: 0, Size: 1})
if maxGID > gid {
options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid + 1, HostID: gid + 1, Size: maxGID - gid})
}
options.HostUIDMapping = false
options.HostGIDMapping = false
}
// Simply ignore the setting and do not setup an inner namespace for root as it is a no-op
return &options, uid, gid, nil
}
// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping
func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []string, subUIDMap, subGIDMap string) (*storage.IDMappingOptions, error) {
options := storage.IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
}
if mode.IsAuto() {
var err error
options.HostUIDMapping = false
options.HostGIDMapping = false
options.AutoUserNs = true
opts, err := mode.GetAutoOptions()
if err != nil {
return nil, err
}
options.AutoUserNsOpts = *opts
return &options, nil
}
if mode.IsKeepID() {
options.HostUIDMapping = false
options.HostGIDMapping = false
return &options, nil
}
if subGIDMap == "" && subUIDMap != "" {
subGIDMap = subUIDMap
}
if subUIDMap == "" && subGIDMap != "" {
subUIDMap = subGIDMap
}
if len(gidMapSlice) == 0 && len(uidMapSlice) != 0 {
gidMapSlice = uidMapSlice
}
if len(uidMapSlice) == 0 && len(gidMapSlice) != 0 {
uidMapSlice = gidMapSlice
}
if subUIDMap != "" && subGIDMap != "" {
mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap)
if err != nil {
return nil, err
}
options.UIDMap = mappings.UIDs()
options.GIDMap = mappings.GIDs()
}
parsedUIDMap, err := idtools.ParseIDMap(uidMapSlice, "UID")
if err != nil {
return nil, err
}
parsedGIDMap, err := idtools.ParseIDMap(gidMapSlice, "GID")
if err != nil {
return nil, err
}
options.UIDMap = append(options.UIDMap, parsedUIDMap...)
options.GIDMap = append(options.GIDMap, parsedGIDMap...)
if len(options.UIDMap) > 0 {
options.HostUIDMapping = false
}
if len(options.GIDMap) > 0 {
options.HostGIDMapping = false
}
return &options, nil
}
var (
rootlessConfigHomeDirOnce sync.Once
rootlessConfigHomeDir string
rootlessRuntimeDirOnce sync.Once
rootlessRuntimeDir string
)
type tomlOptionsConfig struct {
MountProgram string `toml:"mount_program"`
}
type tomlConfig struct {
Storage struct {
Driver string `toml:"driver"`
RunRoot string `toml:"runroot"`
GraphRoot string `toml:"graphroot"`
Options struct{ tomlOptionsConfig } `toml:"options"`
} `toml:"storage"`
}
func getTomlStorage(storeOptions *storage.StoreOptions) *tomlConfig {
config := new(tomlConfig)
config.Storage.Driver = storeOptions.GraphDriverName
config.Storage.RunRoot = storeOptions.RunRoot
config.Storage.GraphRoot = storeOptions.GraphRoot
for _, i := range storeOptions.GraphDriverOptions {
s := strings.Split(i, "=")
if s[0] == "overlay.mount_program" {
config.Storage.Options.MountProgram = s[1]
}
}
return config
}
// WriteStorageConfigFile writes the configuration to a file
func WriteStorageConfigFile(storageOpts *storage.StoreOptions, storageConf string) error {
if err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil {
return err
}
storageFile, err := os.OpenFile(storageConf, os.O_RDWR|os.O_TRUNC, 0600)
if err != nil {
return errors.Wrapf(err, "cannot open %s", storageConf)
}
tomlConfiguration := getTomlStorage(storageOpts)
defer errorhandling.CloseQuiet(storageFile)
enc := toml.NewEncoder(storageFile)
if err := enc.Encode(tomlConfiguration); err != nil {
if err := os.Remove(storageConf); err != nil {
logrus.Errorf("unable to remove file %s", storageConf)
}
return err
}
return nil
}
// ParseInputTime takes the users input and to determine if it is valid and
// returns a time format and error. The input is compared to known time formats
// or a duration which implies no-duration
func ParseInputTime(inputTime string) (time.Time, error) {
timeFormats := []string{time.RFC3339Nano, time.RFC3339, "2006-01-02T15:04:05", "2006-01-02T15:04:05.999999999",
"2006-01-02Z07:00", "2006-01-02"}
// iterate the supported time formats
for _, tf := range timeFormats {
t, err := time.Parse(tf, inputTime)
if err == nil {
return t, nil
}
}
// input might be a duration
duration, err := time.ParseDuration(inputTime)
if err != nil {
return time.Time{}, errors.Errorf("unable to interpret time value")
}
return time.Now().Add(-duration), nil
}
// OpenExclusiveFile opens a file for writing and ensure it doesn't already exist
func OpenExclusiveFile(path string) (*os.File, error) {
baseDir := filepath.Dir(path)
if baseDir != "" {
if _, err := os.Stat(baseDir); err != nil {
return nil, err
}
}
return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
}
// PullType whether to pull new image
type PullType int
const (
// PullImageAlways always try to pull new image when create or run
PullImageAlways PullType = iota
// PullImageMissing pulls image if it is not locally
PullImageMissing
// PullImageNever will never pull new image
PullImageNever
)
// ValidatePullType check if the pullType from CLI is valid and returns the valid enum type
// if the value from CLI is invalid returns the error
func ValidatePullType(pullType string) (PullType, error) {
switch pullType {
case "always":
return PullImageAlways, nil
case "missing":
return PullImageMissing, nil
case "never":
return PullImageNever, nil
case "":
return PullImageMissing, nil
default:
return PullImageMissing, errors.Errorf("invalid pull type %q", pullType)
}
}
// ExitCode reads the error message when failing to executing container process
// and then returns 0 if no error, 126 if command does not exist, or 127 for
// all other errors
func ExitCode(err error) int {
if err == nil {
return 0
}
e := strings.ToLower(err.Error())
if strings.Contains(e, "file not found") ||
strings.Contains(e, "no such file or directory") {
return 127
}
return 126
}
// HomeDir returns the home directory for the current user.
func HomeDir() (string, error) {
home := os.Getenv("HOME")
if home == "" {
usr, err := user.LookupId(fmt.Sprintf("%d", rootless.GetRootlessUID()))
if err != nil {
return "", errors.Wrapf(err, "unable to resolve HOME directory")
}
home = usr.HomeDir
}
return home, nil
}
func Tmpdir() string {
tmpdir := os.Getenv("TMPDIR")
if tmpdir == "" {
tmpdir = "/var/tmp"
}
return tmpdir
}
// ValidateSysctls validates a list of sysctl and returns it.
func ValidateSysctls(strSlice []string) (map[string]string, error) {
sysctl := make(map[string]string)
validSysctlMap := map[string]bool{
"kernel.msgmax": true,
"kernel.msgmnb": true,
"kernel.msgmni": true,
"kernel.sem": true,
"kernel.shmall": true,
"kernel.shmmax": true,
"kernel.shmmni": true,
"kernel.shm_rmid_forced": true,
}
validSysctlPrefixes := []string{
"net.",
"fs.mqueue.",
}
for _, val := range strSlice {
foundMatch := false
arr := strings.Split(val, "=")
if len(arr) < 2 {
return nil, errors.Errorf("%s is invalid, sysctl values must be in the form of KEY=VALUE", val)
}
if validSysctlMap[arr[0]] {
sysctl[arr[0]] = arr[1]
continue
}
for _, prefix := range validSysctlPrefixes {
if strings.HasPrefix(arr[0], prefix) {
sysctl[arr[0]] = arr[1]
foundMatch = true
break
}
}
if !foundMatch {
return nil, errors.Errorf("sysctl '%s' is not whitelisted", arr[0])
}
}
return sysctl, nil
}
func DefaultContainerConfig() *config.Config {
return containerConfig
}
| [
"\"HOME\"",
"\"TMPDIR\""
]
| []
| [
"HOME",
"TMPDIR"
]
| [] | ["HOME", "TMPDIR"] | go | 2 | 0 | |
Godeps/_workspace/src/github.com/mailgun/vulcand/backend/etcdbackend/etcdbackend_test.go | // Note on debugging:
// github.com/davecgh/go-spew/spew package is extremely helpful when it comes to debugging DeepEquals issues.
// Here's how one uses it:
// spew.Printf("%#v\n vs\n %#v\n", a, b)
//
package etcdbackend
import (
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/mailgun/go-etcd/etcd"
"github.com/mailgun/log"
"github.com/mailgun/timetools"
"github.com/mailgun/vulcand/secret"
. "gopkg.in/check.v1"
. "github.com/mailgun/vulcand/backend"
"github.com/mailgun/vulcand/plugin/ratelimit"
. "github.com/mailgun/vulcand/plugin/registry"
)
func TestEtcdBackend(t *testing.T) { TestingT(t) }
type EtcdBackendSuite struct {
backend *EtcdBackend
nodes []string
etcdPrefix string
consistency string
client *etcd.Client
changesC chan interface{}
timeProvider *timetools.FreezedTime
key string
stopC chan bool
}
var _ = Suite(&EtcdBackendSuite{
etcdPrefix: "/vulcandtest",
consistency: etcd.STRONG_CONSISTENCY,
timeProvider: &timetools.FreezedTime{
CurrentTime: time.Date(2012, 3, 4, 5, 6, 7, 0, time.UTC),
},
})
func (s *EtcdBackendSuite) SetUpSuite(c *C) {
log.Init([]*log.LogConfig{&log.LogConfig{Name: "console"}})
key, err := secret.NewKeyString()
if err != nil {
panic(err)
}
s.key = key
nodes_string := os.Getenv("VULCAND_TEST_ETCD_NODES")
if nodes_string == "" {
// Skips the entire suite
c.Skip("This test requires etcd, provide comma separated nodes in VULCAND_TEST_ETCD_NODES environment variable")
return
}
s.nodes = strings.Split(nodes_string, ",")
}
func (s *EtcdBackendSuite) SetUpTest(c *C) {
// Initiate a backend with a registry
key, err := secret.KeyFromString(s.key)
c.Assert(err, IsNil)
box, err := secret.NewBox(key)
c.Assert(err, IsNil)
backend, err := NewEtcdBackendWithOptions(
GetRegistry(),
s.nodes,
s.etcdPrefix,
Options{
EtcdConsistency: s.consistency,
Box: box,
})
c.Assert(err, IsNil)
s.backend = backend
s.client = s.backend.client
// Delete all values under the given prefix
_, err = s.client.Get(s.etcdPrefix, false, false)
if err != nil {
// There's no key like this
if !notFound(err) {
// We haven't expected this error, oops
c.Assert(err, IsNil)
}
} else {
_, err = s.backend.client.Delete(s.etcdPrefix, true)
c.Assert(err, IsNil)
}
s.changesC = make(chan interface{})
s.stopC = make(chan bool)
go s.backend.WatchChanges(s.changesC, s.stopC)
}
func (s *EtcdBackendSuite) TearDownTest(c *C) {
close(s.stopC)
s.backend.Close()
}
func (s *EtcdBackendSuite) collectChanges(c *C, expected int) []interface{} {
changes := make([]interface{}, expected)
for i, _ := range changes {
select {
case changes[i] = <-s.changesC:
// successfully collected changes
case <-time.After(2 * time.Second):
c.Fatalf("Timeout occured")
}
}
return changes
}
func (s *EtcdBackendSuite) expectChanges(c *C, expected ...interface{}) {
changes := s.collectChanges(c, len(expected))
for i, ch := range changes {
c.Assert(ch, DeepEquals, expected[i])
}
}
func (s *EtcdBackendSuite) TestAddDeleteHost(c *C) {
host := s.makeHost("localhost")
h, err := s.backend.AddHost(host)
c.Assert(err, IsNil)
c.Assert(h, Equals, host)
s.expectChanges(c, &HostAdded{Host: host})
err = s.backend.DeleteHost("localhost")
c.Assert(err, IsNil)
s.expectChanges(c, &HostDeleted{
Name: "localhost",
})
}
func (s *EtcdBackendSuite) TestAddExpireHost(c *C) {
host := s.makeHost("localhost")
_, err := s.client.SetDir(s.backend.path("hosts", host.Name), 1)
c.Assert(err, IsNil)
s.expectChanges(c, &HostAdded{Host: host}, &HostDeleted{Name: host.Name})
}
func (s *EtcdBackendSuite) TestAddHostWithOptions(c *C) {
host := s.makeHost("localhost")
host.Options.Default = true
h, err := s.backend.AddHost(host)
c.Assert(err, IsNil)
c.Assert(h, Equals, host)
s.expectChanges(c, &HostAdded{Host: host})
err = s.backend.DeleteHost("localhost")
c.Assert(err, IsNil)
s.expectChanges(c, &HostDeleted{
Name: "localhost",
})
}
func (s *EtcdBackendSuite) TestAddHostWithKeyPair(c *C) {
host := s.makeHost("localhost")
host.KeyPair = &KeyPair{
Key: []byte("hello"),
Cert: []byte("world"),
}
h, err := s.backend.AddHost(host)
c.Assert(err, IsNil)
c.Assert(h, Equals, host)
hostNoKeyPair := *host
hostNoKeyPair.KeyPair = nil
s.expectChanges(c, &HostAdded{Host: &hostNoKeyPair}, &HostKeyPairUpdated{Host: host})
}
func (s *EtcdBackendSuite) TestAddHostWithListeners(c *C) {
host := s.makeHost("localhost")
host.Listeners = []*Listener{
&Listener{
Protocol: "http",
Address: Address{
Network: "tcp",
Address: "127.0.0.1:9000",
},
},
}
h, err := s.backend.AddHost(host)
c.Assert(err, IsNil)
c.Assert(h, Equals, host)
hostNoListeners := *host
hostNoListeners.Listeners = []*Listener{}
s.expectChanges(c, &HostAdded{Host: &hostNoListeners}, &HostListenerAdded{Host: host, Listener: host.Listeners[0]})
}
func (s *EtcdBackendSuite) TestAddHostListener(c *C) {
host := s.makeHost("localhost")
h, err := s.backend.AddHost(host)
c.Assert(err, IsNil)
c.Assert(h, Equals, host)
host.Listeners = []*Listener{
&Listener{
Id: "ls1",
Protocol: "http",
Address: Address{
Network: "tcp",
Address: "127.0.0.1:9000",
},
},
}
_, err = s.backend.AddHostListener(host.Name, host.Listeners[0])
c.Assert(err, IsNil)
hostNoListeners := *host
hostNoListeners.Listeners = []*Listener{}
s.expectChanges(c, &HostAdded{Host: &hostNoListeners}, &HostListenerAdded{Host: host, Listener: host.Listeners[0]})
// Adding same address second time fails
_, err = s.backend.AddHostListener(host.Name, host.Listeners[0])
c.Assert(err, NotNil)
c.Assert(s.backend.DeleteHostListener(host.Name, host.Listeners[0].Id), IsNil)
s.expectChanges(c, &HostListenerDeleted{Host: &hostNoListeners, ListenerId: host.Listeners[0].Id})
}
func (s *EtcdBackendSuite) TestUpdateHostKeyPair(c *C) {
host := s.makeHost("localhost")
h, err := s.backend.AddHost(host)
c.Assert(err, IsNil)
c.Assert(h, Equals, host)
hostNoKeyPair := *host
hostNoKeyPair.KeyPair = nil
host.KeyPair = &KeyPair{
Key: []byte("hello"),
Cert: []byte("world"),
}
s.backend.UpdateHostKeyPair(host.Name, host.KeyPair)
s.expectChanges(c, &HostAdded{Host: &hostNoKeyPair}, &HostKeyPairUpdated{Host: host})
}
func (s *EtcdBackendSuite) TestGetUpstreams(c *C) {
up := s.makeUpstream("u1", 1)
_, err := s.backend.AddUpstream(up)
c.Assert(err, IsNil)
_, err = s.backend.AddEndpoint(up.Endpoints[0])
c.Assert(err, IsNil)
upstreams, err := s.backend.GetUpstreams()
c.Assert(err, IsNil)
c.Assert(len(upstreams), Equals, 1)
c.Assert(upstreams[0], DeepEquals, up)
}
// Add the host twice fails
func (s *EtcdBackendSuite) TestAddTwice(c *C) {
_, err := s.backend.AddHost(&Host{Name: "localhost"})
c.Assert(err, IsNil)
_, err = s.backend.AddHost(&Host{Name: "localhost"})
c.Assert(err, FitsTypeOf, &AlreadyExistsError{})
}
func (s *EtcdBackendSuite) TestUpstreamCRUD(c *C) {
up := s.makeUpstream("up1", 0)
u, err := s.backend.AddUpstream(up)
c.Assert(err, IsNil)
c.Assert(u, Equals, up)
s.expectChanges(c, &UpstreamAdded{Upstream: up}, &UpstreamOptionsUpdated{Upstream: up})
upR, err := s.backend.GetUpstream("up1")
c.Assert(err, IsNil)
c.Assert(upR, NotNil)
c.Assert(upR.Id, Equals, "up1")
o := UpstreamOptions{Timeouts: UpstreamTimeouts{Read: "1s"}}
out, err := s.backend.UpdateUpstreamOptions(up.Id, o)
c.Assert(err, IsNil)
c.Assert(out.Options, DeepEquals, o)
s.expectChanges(c, &UpstreamOptionsUpdated{Upstream: out})
err = s.backend.DeleteUpstream("up1")
c.Assert(err, IsNil)
s.expectChanges(c, &UpstreamDeleted{
UpstreamId: "up1",
})
}
func (s *EtcdBackendSuite) TestUpstreamAutoId(c *C) {
u, err := s.backend.AddUpstream(&Upstream{Endpoints: []*Endpoint{}})
c.Assert(err, IsNil)
c.Assert(u, NotNil)
s.expectChanges(c, &UpstreamAdded{Upstream: u})
}
func (s *EtcdBackendSuite) TestUpstreamTwice(c *C) {
_, err := s.backend.AddUpstream(&Upstream{Id: "up1"})
c.Assert(err, IsNil)
_, err = s.backend.AddUpstream(&Upstream{Id: "up1"})
c.Assert(err, FitsTypeOf, &AlreadyExistsError{})
}
func (s *EtcdBackendSuite) TestEndpointAddReadDelete(c *C) {
up0 := s.makeUpstream("up1", 0)
_, err := s.backend.AddUpstream(up0)
c.Assert(err, IsNil)
s.expectChanges(c,
&UpstreamAdded{Upstream: up0},
&UpstreamOptionsUpdated{Upstream: up0})
up := s.makeUpstream("up1", 1)
e := up.Endpoints[0]
eR, err := s.backend.AddEndpoint(e)
c.Assert(err, IsNil)
c.Assert(eR, Equals, e)
eO, err := s.backend.GetEndpoint(e.UpstreamId, e.Id)
c.Assert(err, IsNil)
c.Assert(eO, DeepEquals, e)
s.expectChanges(c, &EndpointUpdated{
Upstream: up,
Endpoint: e,
})
err = s.backend.DeleteEndpoint(up.Id, e.Id)
c.Assert(err, IsNil)
s.expectChanges(c, &EndpointDeleted{
Upstream: up0,
EndpointId: e.Id,
})
}
func (s *EtcdBackendSuite) TestAddEndpointUsingSet(c *C) {
up := s.makeUpstream("u1", 1)
e := up.Endpoints[0]
_, err := s.client.Set(s.backend.path("upstreams", up.Id, "endpoints", e.Id), e.Url, 0)
c.Assert(err, IsNil)
s.expectChanges(c, &EndpointUpdated{
Upstream: up,
Endpoint: up.Endpoints[0],
})
}
func (s *EtcdBackendSuite) TestExpireEndpoint(c *C) {
up := s.makeUpstream("u1", 1)
e := up.Endpoints[0]
_, err := s.client.Set(s.backend.path("upstreams", up.Id, "endpoints", e.Id), e.Url, 1)
c.Assert(err, IsNil)
s.expectChanges(c, &EndpointUpdated{
Upstream: up,
Endpoint: up.Endpoints[0],
}, &EndpointDeleted{
Upstream: s.makeUpstream(up.Id, 0),
EndpointId: e.Id,
})
}
func (s *EtcdBackendSuite) TestAddEndpointAutoId(c *C) {
up := s.makeUpstream("up1", 1)
e := up.Endpoints[0]
e.Id = ""
_, err := s.backend.AddUpstream(up)
c.Assert(err, IsNil)
eR, err := s.backend.AddEndpoint(e)
c.Assert(len(eR.Id), Not(Equals), 0)
}
func (s *EtcdBackendSuite) TestDeleteBadEndpoint(c *C) {
up := s.makeUpstream("up1", 1)
_, err := s.backend.AddUpstream(up)
c.Assert(err, IsNil)
// Non existent endpoint
c.Assert(s.backend.DeleteEndpoint(up.Id, "notHere"), FitsTypeOf, &NotFoundError{})
// Non existent upstream
c.Assert(s.backend.DeleteEndpoint("upNotHere", "notHere"), FitsTypeOf, &NotFoundError{})
}
func (s *EtcdBackendSuite) TestLocationAddReadDelete(c *C) {
up := s.makeUpstream("u1", 1)
e := up.Endpoints[0]
_, err := s.backend.AddUpstream(up)
c.Assert(err, IsNil)
_, err = s.backend.AddEndpoint(e)
c.Assert(err, IsNil)
host := s.makeHost("localhost")
_, err = s.backend.AddHost(host)
c.Assert(err, IsNil)
s.collectChanges(c, 4)
loc := s.makeLocation("loc1", "/hello", host, up)
// CREATE
locR, err := s.backend.AddLocation(loc)
c.Assert(err, IsNil)
c.Assert(locR, DeepEquals, loc)
// READ
locR2, err := s.backend.GetLocation(loc.Hostname, loc.Id)
c.Assert(err, IsNil)
c.Assert(locR2, DeepEquals, loc)
s.expectChanges(c, &LocationUpstreamUpdated{
Host: host,
Location: loc,
})
// DELETE
c.Assert(s.backend.DeleteLocation(loc.Hostname, loc.Id), IsNil)
s.expectChanges(c, &LocationDeleted{
Host: host,
LocationId: loc.Id,
})
}
func (s *EtcdBackendSuite) TestLocationAddWithOptions(c *C) {
up := s.makeUpstream("u1", 1)
e := up.Endpoints[0]
_, err := s.backend.AddUpstream(up)
c.Assert(err, IsNil)
_, err = s.backend.AddEndpoint(e)
c.Assert(err, IsNil)
host := s.makeHost("localhost")
_, err = s.backend.AddHost(host)
c.Assert(err, IsNil)
s.collectChanges(c, 3)
loc := s.makeLocationWithOptions("loc1", "/hello", host, up, LocationOptions{Hostname: "host1"})
// CREATE
locR, err := s.backend.AddLocation(loc)
c.Assert(err, IsNil)
c.Assert(locR, DeepEquals, loc)
// READ
locR2, err := s.backend.GetLocation(loc.Hostname, loc.Id)
c.Assert(err, IsNil)
c.Assert(locR2, DeepEquals, loc)
}
// Make sure we can generate location id when it's not supplied
func (s *EtcdBackendSuite) TestLocationAutoId(c *C) {
up := s.makeUpstream("u1", 1)
host := s.makeHost("localhost")
e := up.Endpoints[0]
_, err := s.backend.AddUpstream(up)
c.Assert(err, IsNil)
_, err = s.backend.AddEndpoint(e)
c.Assert(err, IsNil)
_, err = s.backend.AddHost(host)
c.Assert(err, IsNil)
s.collectChanges(c, 3)
locR, err := s.backend.AddLocation(s.makeLocation("", "/hello", host, up))
c.Assert(err, IsNil)
c.Assert(len(locR.Id), Not(Equals), 0)
}
func (s *EtcdBackendSuite) TestLocationUpdateUpstream(c *C) {
up1 := s.makeUpstream("u1", 1)
up2 := s.makeUpstream("u2", 1)
host := s.makeHost("localhost")
_, err := s.backend.AddUpstream(up1)
c.Assert(err, IsNil)
_, err = s.backend.AddEndpoint(up1.Endpoints[0])
c.Assert(err, IsNil)
_, err = s.backend.AddUpstream(up2)
c.Assert(err, IsNil)
_, err = s.backend.AddEndpoint(up2.Endpoints[0])
c.Assert(err, IsNil)
_, err = s.backend.AddHost(host)
c.Assert(err, IsNil)
s.collectChanges(c, 7)
loc := s.makeLocation("loc1", "/hello", host, up1)
_, err = s.backend.AddLocation(loc)
c.Assert(err, IsNil)
s.collectChanges(c, 1)
locU, err := s.backend.UpdateLocationUpstream(loc.Hostname, loc.Id, up2.Id)
c.Assert(err, IsNil)
c.Assert(locU.Upstream, DeepEquals, up2)
s.expectChanges(c, &LocationUpstreamUpdated{
Host: host,
Location: locU,
})
}
func (s *EtcdBackendSuite) TestLocationUpdateOptions(c *C) {
up := s.makeUpstream("u1", 1)
host := s.makeHost("localhost")
_, err := s.backend.AddUpstream(up)
c.Assert(err, IsNil)
_, err = s.backend.AddEndpoint(up.Endpoints[0])
c.Assert(err, IsNil)
_, err = s.backend.AddHost(host)
c.Assert(err, IsNil)
s.collectChanges(c, 4)
loc := s.makeLocation("loc1", "/hello", host, up)
_, err = s.backend.AddLocation(loc)
c.Assert(err, IsNil)
s.collectChanges(c, 1)
options := LocationOptions{
Limits: LocationLimits{
MaxMemBodyBytes: 123456,
},
}
locU, err := s.backend.UpdateLocationOptions(loc.Hostname, loc.Id, options)
c.Assert(err, IsNil)
c.Assert(locU.Options, DeepEquals, options)
s.expectChanges(c, &LocationOptionsUpdated{
Host: host,
Location: locU,
})
}
func (s *EtcdBackendSuite) TestAddLocationBadUpstream(c *C) {
host := s.makeHost("localhost")
up1 := s.makeUpstream("u1", 1)
loc := s.makeLocation("loc1", "/hello", host, up1)
_, err := s.backend.AddLocation(loc)
c.Assert(err, NotNil)
}
func (s *EtcdBackendSuite) TestAddLocationBadHost(c *C) {
up := s.makeUpstream("u1", 1)
_, err := s.backend.AddUpstream(up)
c.Assert(err, IsNil)
host := s.makeHost("localhost")
loc := s.makeLocation("loc1", "/hello", host, up)
_, err = s.backend.AddLocation(loc)
c.Assert(err, NotNil)
}
func (s *EtcdBackendSuite) TestLocationRateLimitCRUD(c *C) {
up := s.makeUpstream("u1", 1)
host := s.makeHost("localhost")
e := up.Endpoints[0]
_, err := s.backend.AddUpstream(up)
c.Assert(err, IsNil)
_, err = s.backend.AddEndpoint(e)
c.Assert(err, IsNil)
_, err = s.backend.AddHost(host)
c.Assert(err, IsNil)
s.collectChanges(c, 4)
loc := s.makeLocation("loc1", "/hello", host, up)
_, err = s.backend.AddLocation(loc)
c.Assert(err, IsNil)
s.collectChanges(c, 1)
m := s.makeRateLimit("rl1", 10, "client.ip", 20, 1, loc)
mR, err := s.backend.AddLocationMiddleware(loc.Hostname, loc.Id, m)
c.Assert(mR, NotNil)
c.Assert(err, IsNil)
loc.Middlewares = []*MiddlewareInstance{m}
s.expectChanges(c, &LocationMiddlewareUpdated{
Host: host,
Location: loc,
Middleware: m,
})
_, err = s.backend.UpdateLocationMiddleware(loc.Hostname, loc.Id, m)
c.Assert(err, IsNil)
s.expectChanges(c, &LocationMiddlewareUpdated{
Host: host,
Location: loc,
Middleware: m,
})
c.Assert(s.backend.DeleteLocationMiddleware(loc.Hostname, loc.Id, m.Type, m.Id), IsNil)
loc.Middlewares = []*MiddlewareInstance{}
s.expectChanges(c, &LocationMiddlewareDeleted{
Host: host,
Location: loc,
MiddlewareId: m.Id,
MiddlewareType: m.Type,
})
}
func (s *EtcdBackendSuite) TestLocationLimitsErrorHandling(c *C) {
up := s.makeUpstream("u1", 1)
host := s.makeHost("localhost")
loc := s.makeLocation("loc1", "/hello", host, up)
// Location does not exist
m := s.makeRateLimit("rl1", 10, "client.ip", 20, 1, loc)
_, err := s.backend.AddLocationMiddleware(loc.Hostname, loc.Id, m)
c.Assert(err, NotNil)
_, err = s.backend.UpdateLocationMiddleware(loc.Hostname, loc.Id, m)
c.Assert(err, NotNil)
// Deeleteing non-existent middleware fails
c.Assert(s.backend.DeleteLocationMiddleware(loc.Hostname, loc.Id, m.Type, m.Id), FitsTypeOf, &NotFoundError{})
// Middleware type is not registered
mBad := s.makeRateLimit("rl1", 10, "client.ip", 20, 1, loc)
m.Type = "what"
// Adding it fails
_, err = s.backend.AddLocationMiddleware(loc.Hostname, loc.Id, mBad)
c.Assert(err, FitsTypeOf, &NotFoundError{})
// Updating it fails
_, err = s.backend.UpdateLocationMiddleware(loc.Hostname, loc.Id, mBad)
c.Assert(err, FitsTypeOf, &NotFoundError{})
// Getting it fails
_, err = s.backend.GetLocationMiddleware(loc.Hostname, loc.Id, mBad.Type, mBad.Id)
c.Assert(err, FitsTypeOf, &NotFoundError{})
// Deleting it fails
c.Assert(s.backend.DeleteLocationMiddleware(loc.Hostname, loc.Id, "what", m.Id), FitsTypeOf, &NotFoundError{})
// Just bad params
_, err = s.backend.AddLocationMiddleware("", "", mBad)
c.Assert(err, NotNil)
// Updating it fails
_, err = s.backend.UpdateLocationMiddleware("", "", mBad)
c.Assert(err, NotNil)
}
func (s *EtcdBackendSuite) TestLocationMiddlewaresAutoId(c *C) {
up := s.makeUpstream("u1", 1)
host := s.makeHost("localhost")
e := up.Endpoints[0]
_, err := s.backend.AddUpstream(up)
c.Assert(err, IsNil)
_, err = s.backend.AddEndpoint(e)
c.Assert(err, IsNil)
_, err = s.backend.AddHost(host)
c.Assert(err, IsNil)
s.collectChanges(c, 3)
loc := s.makeLocation("loc1", "/hello", host, up)
_, err = s.backend.AddLocation(loc)
c.Assert(err, IsNil)
s.collectChanges(c, 1)
m := s.makeRateLimit("", 10, "client.ip", 20, 1, loc)
mR, err := s.backend.AddLocationMiddleware(loc.Hostname, loc.Id, m)
c.Assert(err, IsNil)
c.Assert(mR.Id, Not(Equals), "")
}
func (s *EtcdBackendSuite) TestDeleteUpstreamUsedByLocation(c *C) {
up := s.makeUpstream("u1", 1)
host := s.makeHost("localhost")
e := up.Endpoints[0]
loc := s.makeLocation("loc1", "/hello", host, up)
_, err := s.backend.AddUpstream(up)
c.Assert(err, IsNil)
_, err = s.backend.AddEndpoint(e)
c.Assert(err, IsNil)
_, err = s.backend.AddHost(host)
c.Assert(err, IsNil)
_, err = s.backend.AddLocation(loc)
c.Assert(err, IsNil)
s.collectChanges(c, 4)
c.Assert(s.backend.DeleteUpstream(up.Id), NotNil)
}
func (s *EtcdBackendSuite) makeUpstream(id string, endpoints int) *Upstream {
up := &Upstream{
Id: id,
Endpoints: []*Endpoint{},
}
for i := 1; i <= endpoints; i += 1 {
e := &Endpoint{
Id: fmt.Sprintf("e%d", i),
UpstreamId: up.Id,
Url: fmt.Sprintf("http://endpoint%d.com", i),
}
up.Endpoints = append(up.Endpoints, e)
}
return up
}
func (s *EtcdBackendSuite) makeHost(name string) *Host {
return &Host{
Name: name,
Locations: []*Location{},
Listeners: []*Listener{},
Options: HostOptions{},
}
}
func (s *EtcdBackendSuite) makeLocationWithOptions(id string, path string, host *Host, up *Upstream, options LocationOptions) *Location {
return &Location{
Id: id,
Hostname: host.Name,
Upstream: up,
Path: path,
Middlewares: []*MiddlewareInstance{},
Options: options,
}
}
func (s *EtcdBackendSuite) makeLocation(id string, path string, host *Host, up *Upstream) *Location {
return s.makeLocationWithOptions(id, path, host, up, LocationOptions{})
}
func (s *EtcdBackendSuite) makeRateLimit(id string, rate int64, variable string, burst int64, periodSeconds int64, loc *Location) *MiddlewareInstance {
rl, err := ratelimit.FromOther(ratelimit.RateLimit{
PeriodSeconds: periodSeconds,
Requests: rate,
Burst: burst,
Variable: variable})
if err != nil {
panic(err)
}
return &MiddlewareInstance{
Type: "ratelimit",
Priority: 1,
Id: id,
Middleware: rl,
}
}
| [
"\"VULCAND_TEST_ETCD_NODES\""
]
| []
| [
"VULCAND_TEST_ETCD_NODES"
]
| [] | ["VULCAND_TEST_ETCD_NODES"] | go | 1 | 0 | |
mlab-ns-simulator/setup.py | #!/usr/bin/env python2
import os
from os.path import abspath, dirname, join
import subprocess
from setuptools import setup, find_packages, Command
# Note: We follow PEP-0440 versioning:
# http://legacy.python.org/dev/peps/pep-0440/
VERSION = '0.1.dev0'
# Note: The dependency versions are chosen to match ooni-backend where they overlap:
TwistedDependency = 'twisted == 13.0' # BUG: Include the hash as per ooni-backend.
def run(*args):
print 'Running: {0!r}'.format(args)
try:
subprocess.check_call(args, shell=False)
except subprocess.CalledProcessError, e:
print 'Process exited with {0!r} exit status.'.format(e.returncode)
raise
class TestWithCoverageAndTrialInAVirtualEnvCommand (Command):
"""Run unit tests with coverage analysis and reporting in a virtualenv."""
# Internal settings:
TestToolRequirements = [
TwistedDependency,
'coverage == 3.7.1',
'mock >= 1.0.1',
]
description = __doc__
user_options = [
]
def __init__(self, dist):
Command.__init__(self, dist)
self.oonisupportdir = dirname(dirname(abspath(__file__)))
self.pkgdir = join(self.oonisupportdir, 'mlab-ns-simulator')
self.testdir = join(self.pkgdir, 'build', 'test')
self.venvdir = join(self.testdir, 'venv')
bindir = join(self.venvdir, 'bin')
self.pip = join(bindir, 'pip')
self.coverage = join(bindir, 'coverage')
self.trial = join(bindir, 'trial')
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self._initialize_virtualenv()
self._install_testing_tools()
pkgname = 'mlabsim'
pypkg = join(self.pkgdir, pkgname)
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = '{0}:{1}'.format(self.pkgdir, os.environ['PYTHONPATH'])
else:
os.environ['PYTHONPATH'] = self.pkgdir
# Coverage and trial dump things into cwd, so cd:
os.chdir(self.testdir)
run(self.coverage, 'run', '--branch', '--source', pypkg, self.trial, pkgname)
run(self.coverage, 'html')
def _initialize_virtualenv(self):
virtualenvscript = join(self.oonisupportdir, 'virtualenv', 'virtualenv.py')
run('python2', virtualenvscript, '--no-site-packages', self.venvdir)
def _install_testing_tools(self):
reqspath = join(self.testdir, 'test-tool-requirements.txt')
with file(reqspath, 'w') as f:
for req in self.TestToolRequirements:
f.write(req + '\n')
run(self.pip, 'install', '--use-mirrors', '--requirement', reqspath)
setup(
# Humanish metadata:
name='mlab-ns-simulator',
description='A simulator for the mlab-ns service which provides features Ooni needs.',
version=VERSION,
author='LeastAuthority',
author_email='[email protected]',
license='FIXME',
url='https://github.com/LeastAuthority/ooni-support',
# Python structure for this package:
packages=find_packages(),
entry_points = {
'console_scripts': [
'mlabsim = mlabsim.main:main',
],
},
test_suite='mlabsim.tests',
# Dependencies:
install_requires=[
TwistedDependency,
'argparse == 1.2.1',
],
# Command customization:
cmdclass={
'test': TestWithCoverageAndTrialInAVirtualEnvCommand,
},
)
| []
| []
| [
"PYTHONPATH"
]
| [] | ["PYTHONPATH"] | python | 1 | 0 | |
aerofoil/wsgi.py | """
WSGI config for aerofoil project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
import dotenv
from django.core.wsgi import get_wsgi_application
dotenv.read_dotenv(os.path.join(os.path.dirname(os.path.dirname(__file__)), '.env'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'aerofoil.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
mysql-migration_test.go | package main
import (
"database/sql"
_ "database/sql"
"fmt"
_ "github.com/denisenkom/go-mssqldb"
"github.com/denisenkom/go-mssqldb/msdsn"
_ "github.com/denisenkom/go-mssqldb/msdsn"
"github.com/go-sql-driver/mysql"
_ "github.com/go-sql-driver/mysql"
"log"
"os"
"strings"
"testing"
)
func TestGetAndValidateSchemasSuccess(t *testing.T) {
mssqlDsn, mysqlDsn, err := prepareDatabases()
if err != nil {
t.Error(err.Error())
return
}
mssqlDb, mysqlDb, _ := tryGetConnections(mssqlDsn, mysqlDsn)
defer mssqlDb.Close()
defer mysqlDb.Close()
_, err = mssqlDb.Exec(`CREATE TABLE Documents
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
DocDate DATE NOT NULL,
Description NCHAR(255),
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
_, err = mysqlDb.Exec(`CREATE TABLE Documents
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
DocDate DATE NOT NULL,
Description NCHAR(255),
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
mssqlTables, mysqlTables, _ := getAndValidateSchemas(mssqlDb, mysqlDb)
if len(mssqlTables) != 1 {
t.Error("Validation got an incorrect count of tables from MsSQL")
return
}
if len(mysqlTables) != 1 {
t.Error("Validation got an incorrect count of tables from MySQL")
return
}
if len(mssqlTables[0].columns) != 4 {
t.Error("Validation got an incorrect count of columns from MsSQL")
return
}
if len(mysqlTables[0].columns) != 4 {
t.Error("Validation got an incorrect count of columns from MySQL")
return
}
if strings.ToLower(mssqlTables[0].Name) != "documents" {
t.Error("Validation got an incorrect name of table from MsSQL")
return
}
if strings.ToLower(mysqlTables[0].Name) != "documents" {
t.Error("Validation got an incorrect name of table from MySQL")
return
}
if mssqlTables[0].columns[0].Name != "CompanyID" ||
mssqlTables[0].columns[1].Name != "DocumentID" ||
mssqlTables[0].columns[2].Name != "DocDate" ||
mssqlTables[0].columns[3].Name != "Description" {
t.Error("Incorrect columns in MsSQL")
return
}
if mysqlTables[0].columns[0].Name != "CompanyID" ||
mysqlTables[0].columns[1].Name != "DocumentID" ||
mysqlTables[0].columns[2].Name != "DocDate" ||
mysqlTables[0].columns[3].Name != "Description" {
t.Error("Incorrect columns in MySQL")
return
}
}
func TestGetAndValidateSchemasDifferentColumnsFail(t *testing.T) {
mssqlDsn, mysqlDsn, err := prepareDatabases()
if err != nil {
t.Error(err.Error())
return
}
mssqlDb, mysqlDb, _ := tryGetConnections(mssqlDsn, mysqlDsn)
defer mssqlDb.Close()
defer mysqlDb.Close()
_, err = mssqlDb.Exec(`CREATE TABLE Documents
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
DocDate DATE NOT NULL,
Description NCHAR(255),
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
_, err = mysqlDb.Exec(`CREATE TABLE Documents
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
DocDate DATE NOT NULL,
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
_, _, err = getAndValidateSchemas(mssqlDb, mysqlDb)
if err == nil || err.Error() != "databases' count of columns are different" {
t.Error("Validation shouldn't pass due to different count of columns")
return
}
}
func TestGetAndValidateSchemasDifferentTablesCountFail(t *testing.T) {
mssqlDsn, mysqlDsn, err := prepareDatabases()
if err != nil {
t.Error(err.Error())
return
}
mssqlDb, mysqlDb, _ := tryGetConnections(mssqlDsn, mysqlDsn)
defer mssqlDb.Close()
defer mysqlDb.Close()
_, err = mssqlDb.Exec(`CREATE TABLE Documents
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
DocDate DATE NOT NULL,
Description NCHAR(255),
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
_, err = mssqlDb.Exec(`CREATE TABLE Documents1
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
DocDate DATE NOT NULL,
Description NCHAR(255),
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
_, err = mysqlDb.Exec(`CREATE TABLE Documents
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
DocDate DATE NOT NULL,
Description NCHAR(255),
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
_, _, err = getAndValidateSchemas(mssqlDb, mysqlDb)
if err == nil || err.Error() != "databases' count of tables are different" {
t.Error("Validation shouldn't pass due to different count of tables")
return
}
}
func TestGetAndValidateSchemasDifferentTablesNameFail(t *testing.T) {
mssqlDsn, mysqlDsn, err := prepareDatabases()
if err != nil {
t.Error(err.Error())
return
}
mssqlDb, mysqlDb, _ := tryGetConnections(mssqlDsn, mysqlDsn)
defer mssqlDb.Close()
defer mysqlDb.Close()
_, err = mssqlDb.Exec(`CREATE TABLE Documents
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
DocDate DATE NOT NULL,
Description NCHAR(255),
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
_, err = mysqlDb.Exec(`CREATE TABLE Documents1
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
DocDate DATE NOT NULL,
Description NCHAR(255),
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
_, _, err = getAndValidateSchemas(mssqlDb, mysqlDb)
if err == nil || err.Error() != "database's tables are different" {
t.Error("Validation shouldn't pass due to different names of tables")
return
}
}
type Document struct {
CompanyID int
DocumentID int
DocDate string
Description string
}
func TestMigrateDatabase(t *testing.T) {
mssqlDsn, mysqlDsn, err := prepareDatabases()
if err != nil {
t.Error(err.Error())
return
}
TestGetAndValidateSchemasSuccess(t)
mssqlDb, mysqlDb, _ := tryGetConnections(mssqlDsn, mysqlDsn)
defer mssqlDb.Close()
defer mysqlDb.Close()
documents := []Document{{1, 0, "1970-02-04", "Description 1"},
{1, 1, "1993-01-27", "Description 2"},
{1, 2, "2016-06-04", "Description 3"}}
for _, doc := range documents {
_, err = mssqlDb.Exec(`INSERT INTO Documents (CompanyID, DocumentID, DocDate, Description)
VALUES (?, ?, ?, ?);`,
doc.CompanyID, doc.DocumentID, doc.DocDate, doc.Description)
if err != nil {
t.Error(err.Error())
return
}
}
migrateDatabase(mssqlDsn, mysqlDsn)
rows, err := mysqlDb.Query(`SELECT CompanyID, DocumentID, DocDate, Description FROM Documents`)
if err != nil {
t.Error(err.Error())
return
}
var migratedDocuments []Document
for rows.Next() {
var doc Document
rows.Scan(&doc.CompanyID, &doc.DocumentID, &doc.DocDate, &doc.Description)
migratedDocuments = append(migratedDocuments, doc)
}
if len(documents) != len(migratedDocuments) {
t.Error("Not all documents migrated")
return
}
for i, doc := range documents {
migratedDoc := migratedDocuments[i]
if doc != migratedDoc {
t.Error("Document migrated incorrect")
}
}
}
func TestMigrationEscapedCharacters(t *testing.T) {
mssqlDsn, mysqlDsn, err := prepareDatabases()
if err != nil {
t.Error(err.Error())
return
}
TestGetAndValidateSchemasSuccess(t)
mssqlDb, mysqlDb, _ := tryGetConnections(mssqlDsn, mysqlDsn)
defer mssqlDb.Close()
defer mysqlDb.Close()
documents := []Document{{1, 0, "1970-02-04", `Description with 'quotes' 1`},
{1, 1, "1993-01-27", `Description \ 2`},
{1, 2, "2016-06-04", `Description '/', '\', '.' 3`}}
for _, doc := range documents {
_, err = mssqlDb.Exec(`INSERT INTO Documents (CompanyID, DocumentID, DocDate, Description)
VALUES (?, ?, ?, ?);`,
doc.CompanyID, doc.DocumentID, doc.DocDate, doc.Description)
if err != nil {
t.Error(err.Error())
return
}
}
migrateDatabase(mssqlDsn, mysqlDsn)
rows, err := mysqlDb.Query(`SELECT CompanyID, DocumentID, DocDate, Description FROM Documents`)
if err != nil {
t.Error(err.Error())
return
}
var migratedDocuments []Document
for rows.Next() {
var doc Document
rows.Scan(&doc.CompanyID, &doc.DocumentID, &doc.DocDate, &doc.Description)
migratedDocuments = append(migratedDocuments, doc)
}
if len(documents) != len(migratedDocuments) {
t.Error("Not all documents migrated")
return
}
for i, doc := range documents {
migratedDoc := migratedDocuments[i]
if doc != migratedDoc {
t.Error("Document migrated incorrect")
}
}
}
type DocumentWithAmount struct {
CompanyID int
DocumentID int
DecimalAmount string
FloatAmount float64
}
func TestMigrationPointTypes(t *testing.T) {
mssqlDsn, mysqlDsn, err := prepareDatabases()
if err != nil {
t.Error(err.Error())
return
}
mssqlDb, mysqlDb, _ := tryGetConnections(mssqlDsn, mysqlDsn)
defer mssqlDb.Close()
defer mysqlDb.Close()
_, err = mssqlDb.Exec(`CREATE TABLE DocumentsWithAmount
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
DecimalAmount DECIMAL(6,3) NULL,
FloatAmount FLOAT NULL,
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
_, err = mysqlDb.Exec(`CREATE TABLE DocumentsWithAmount
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
DecimalAmount DECIMAL(6,3) NULL,
FloatAmount FLOAT NULL,
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
documents := []DocumentWithAmount{{1, 0, "100.356", 100.356},
{1, 1, "-100.356", -100.356}}
for _, doc := range documents {
_, err = mssqlDb.Exec(`INSERT INTO DocumentsWithAmount (CompanyID, DocumentID, DecimalAmount, FloatAmount)
VALUES (?, ?, ?, ?);`,
doc.CompanyID, doc.DocumentID, doc.DecimalAmount, doc.FloatAmount)
if err != nil {
t.Error(err.Error())
return
}
}
migrateDatabase(mssqlDsn, mysqlDsn)
rows, err := mysqlDb.Query(`SELECT CompanyID, DocumentID, DecimalAmount, FloatAmount FROM DocumentsWithAmount`)
if err != nil {
t.Error(err.Error())
return
}
var migratedDocuments []DocumentWithAmount
for rows.Next() {
var doc DocumentWithAmount
rows.Scan(&doc.CompanyID, &doc.DocumentID, &doc.DecimalAmount, &doc.FloatAmount)
migratedDocuments = append(migratedDocuments, doc)
}
if len(documents) != len(migratedDocuments) {
t.Error("Not all documents migrated")
return
}
for i, doc := range documents {
migratedDoc := migratedDocuments[i]
if doc != migratedDoc {
t.Error("Document migrated incorrect")
}
}
}
type DocumentWithGuid struct {
CompanyID int
DocumentID int
Guid string
}
func TestMigrationGuid(t *testing.T) {
mssqlDsn, mysqlDsn, err := prepareDatabases()
if err != nil {
t.Error(err.Error())
return
}
mssqlDb, mysqlDb, _ := tryGetConnections(mssqlDsn, mysqlDsn)
defer mssqlDb.Close()
defer mysqlDb.Close()
_, err = mssqlDb.Exec(`CREATE TABLE DocumentsWithGuid
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
Guid UNIQUEIDENTIFIER NOT NULL,
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
_, err = mysqlDb.Exec(`CREATE TABLE DocumentsWithGuid
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
Guid CHAR(36) NOT NULL,
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
documents := []DocumentWithGuid{{1, 0, "3635ea7d-0975-48ff-b4ac-a870f7e1e8b5"}}
for _, doc := range documents {
_, err = mssqlDb.Exec(`INSERT INTO DocumentsWithGuid (CompanyID, DocumentID, Guid)
VALUES (?, ?, ?);`,
doc.CompanyID, doc.DocumentID, doc.Guid)
if err != nil {
t.Error(err.Error())
return
}
}
migrateDatabase(mssqlDsn, mysqlDsn)
rows, err := mysqlDb.Query(`SELECT CompanyID, DocumentID, Guid FROM DocumentsWithGuid`)
if err != nil {
t.Error(err.Error())
return
}
var migratedDocuments []DocumentWithGuid
for rows.Next() {
var doc DocumentWithGuid
rows.Scan(&doc.CompanyID, &doc.DocumentID, &doc.Guid)
migratedDocuments = append(migratedDocuments, doc)
}
if len(documents) != len(migratedDocuments) {
t.Error("Not all documents migrated")
return
}
for i, doc := range documents {
migratedDoc := migratedDocuments[i]
if doc != migratedDoc {
t.Error("Document migrated incorrect")
}
}
}
type DocumentWithVarbinary struct {
CompanyID int
DocumentID int
Mask string
}
func TestMigrationVarbinary(t *testing.T) {
mssqlDsn, mysqlDsn, err := prepareDatabases()
if err != nil {
t.Error(err.Error())
return
}
mssqlDb, mysqlDb, _ := tryGetConnections(mssqlDsn, mysqlDsn)
defer mssqlDb.Close()
defer mysqlDb.Close()
_, err = mssqlDb.Exec(`CREATE TABLE DocumentsWithVarbinary
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
Mask VARBINARY(2) NOT NULL,
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
_, err = mysqlDb.Exec(`CREATE TABLE DocumentsWithVarbinary
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
Mask VARBINARY(2) NOT NULL,
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
documents := []DocumentWithVarbinary{{1, 0, "0x00"},
{1, 1, "0x1a"},
{1, 2, "0x03"}}
for _, doc := range documents {
_, err = mssqlDb.Exec(`INSERT INTO DocumentsWithVarbinary (CompanyID, DocumentID, Mask)
VALUES (?, ?, CONVERT(VARBINARY, ?, 1));`,
doc.CompanyID, doc.DocumentID, doc.Mask)
if err != nil {
t.Error(err.Error())
return
}
}
migrateDatabase(mssqlDsn, mysqlDsn)
rows, err := mysqlDb.Query(`SELECT CompanyID, DocumentID, Mask FROM DocumentsWithVarbinary`)
if err != nil {
t.Error(err.Error())
return
}
var migratedDocuments []DocumentWithVarbinary
for rows.Next() {
var doc DocumentWithVarbinary
var byteArray []uint8
rows.Scan(&doc.CompanyID, &doc.DocumentID, &byteArray)
doc.Mask = fmt.Sprintf("0x%x", byteArray)
migratedDocuments = append(migratedDocuments, doc)
}
if len(documents) != len(migratedDocuments) {
t.Error("Not all documents migrated")
return
}
for i, doc := range documents {
migratedDoc := migratedDocuments[i]
if doc != migratedDoc {
t.Error("Document migrated incorrect")
}
}
}
type DocumentWithBoolean struct {
CompanyID int
DocumentID int
Released bool
Released2 bool
}
func TestMigrationBoolean(t *testing.T) {
mssqlDsn, mysqlDsn, err := prepareDatabases()
if err != nil {
t.Error(err.Error())
return
}
mssqlDb, mysqlDb, _ := tryGetConnections(mssqlDsn, mysqlDsn)
defer mssqlDb.Close()
defer mysqlDb.Close()
_, err = mssqlDb.Exec(`CREATE TABLE DocumentsWithBoolean
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
Released BIT NOT NULL,
Released2 BIT NOT NULL,
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
_, err = mysqlDb.Exec(`CREATE TABLE DocumentsWithBoolean
(
CompanyID INT NOT NULL,
DocumentID INT NOT NULL,
Released TINYINT(1) NOT NULL,
Released2 BOOLEAN NOT NULL,
PRIMARY KEY (CompanyID, DocumentID)
);`)
if err != nil {
t.Error(err.Error())
return
}
documents := []DocumentWithBoolean{{1, 0, true, true},
{1, 1, false, false}}
for _, doc := range documents {
_, err = mssqlDb.Exec(`INSERT INTO DocumentsWithBoolean (CompanyID, DocumentID, Released, Released2)
VALUES (?, ?, ?, ?);`,
doc.CompanyID, doc.DocumentID, doc.Released, doc.Released2)
if err != nil {
t.Error(err.Error())
return
}
}
migrateDatabase(mssqlDsn, mysqlDsn)
rows, err := mysqlDb.Query(`SELECT CompanyID, DocumentID, Released, Released2 FROM DocumentsWithBoolean`)
if err != nil {
t.Error(err.Error())
return
}
var migratedDocuments []DocumentWithBoolean
for rows.Next() {
var doc DocumentWithBoolean
rows.Scan(&doc.CompanyID, &doc.DocumentID, &doc.Released, &doc.Released2)
migratedDocuments = append(migratedDocuments, doc)
}
if len(documents) != len(migratedDocuments) {
t.Error("Not all documents migrated")
return
}
for i, doc := range documents {
migratedDoc := migratedDocuments[i]
if doc != migratedDoc {
t.Error("Document migrated incorrect")
}
}
}
func getMsSqlDsnParams(mssqlDsn string) msdsn.Config {
if len(mssqlDsn) > 0 {
params, _, err := msdsn.Parse(mssqlDsn)
if err != nil {
log.Fatal("unable to parse MSSQLSERVER_DSN", err)
}
return params
}
return msdsn.Config{}
}
func getMySqlDsnParams(mysqlDsn string) *mysql.Config {
if len(mysqlDsn) > 0 {
params, err := mysql.ParseDSN(mysqlDsn)
if err != nil {
log.Fatal("unable to parse MYSQLSERVER_DSN", err)
}
return params
}
return &mysql.Config{}
}
func dropDatabaseIfExists(dB *sql.DB, dbName string) error {
_, err := dB.Exec(fmt.Sprintf(`DROP DATABASE IF EXISTS %s;`, dbName))
return err
}
func createDatabase(dB *sql.DB, dbName string) error {
_, err := dB.Exec(fmt.Sprintf(`CREATE DATABASE %s;`, dbName))
return err
}
func prepareDatabases() (string, string, error) {
mssqlDsn := os.Getenv("MSSQLSERVER_DSN")
mysqlDsn := os.Getenv("MYSQLSERVER_DSN")
msSqlParams := getMsSqlDsnParams(mssqlDsn)
mySqlParams := getMySqlDsnParams(mysqlDsn)
mssqlDbName := msSqlParams.Database
mysqlDbName := mySqlParams.DBName
msSqlParams.Database = "master"
mySqlParams.DBName = "mysql"
mssqlDb, mysqlDb, _ := tryGetConnections(msSqlParams.URL().String(), mySqlParams.FormatDSN())
defer mssqlDb.Close()
defer mysqlDb.Close()
err := dropDatabaseIfExists(mssqlDb, mssqlDbName)
if err != nil {
return "", "", err
}
err = dropDatabaseIfExists(mysqlDb, mysqlDbName)
if err != nil {
return "", "", err
}
err = createDatabase(mssqlDb, mssqlDbName)
if err != nil {
return "", "", err
}
err = createDatabase(mysqlDb, mysqlDbName)
if err != nil {
return "", "", err
}
msSqlParams.Database = mssqlDbName
mySqlParams.DBName = mysqlDbName
mssqlDb, mysqlDb, _ = tryGetConnections(msSqlParams.URL().String(), mySqlParams.FormatDSN())
defer mssqlDb.Close()
defer mysqlDb.Close()
return mssqlDsn, mysqlDsn, nil
}
| [
"\"MSSQLSERVER_DSN\"",
"\"MYSQLSERVER_DSN\""
]
| []
| [
"MSSQLSERVER_DSN",
"MYSQLSERVER_DSN"
]
| [] | ["MSSQLSERVER_DSN", "MYSQLSERVER_DSN"] | go | 2 | 0 | |
pkg/runtime/ssh_executor_linux_test.go | package runtime
import (
"github.com/stretchr/testify/assert"
"log"
"os"
"testing"
)
type SSHExecutorTestEnv struct {
User string
Pass string
Host string
IdentityFile string
}
var SSHTestEnv SSHExecutorTestEnv
func createExecutor() SSHExecutor {
SSHTestEnv = SSHExecutorTestEnv{
Host: os.Getenv("COMMANDER_TEST_SSH_HOST"),
Pass: os.Getenv("COMMANDER_TEST_SSH_PASS"),
User: os.Getenv("COMMANDER_TEST_SSH_USER"),
IdentityFile: os.Getenv("COMMANDER_TEST_SSH_IDENTITY_FILE"),
}
s := SSHExecutor{
Host: SSHTestEnv.Host,
User: SSHTestEnv.User,
Password: SSHTestEnv.Pass,
IdentityFile: SSHTestEnv.IdentityFile,
}
return s
}
func Test_SSHExecutor(t *testing.T) {
if !isSSHTestsEnabled() {
return
}
s := createExecutor()
test := TestCase{
Command: CommandUnderTest{
Cmd: "echo test",
},
Expected: Expected{
ExitCode: 0,
Stdout: ExpectedOut{Exactly: "test"},
},
}
got := s.Execute(test)
assert.True(t, got.ValidationResult.Success)
assert.Equal(t, "test", got.TestCase.Result.Stdout)
}
func Test_SSHExecutor_WithDir(t *testing.T) {
if !isSSHTestsEnabled() {
return
}
s := createExecutor()
test := TestCase{
Command: CommandUnderTest{
Cmd: "echo $LC_TEST_KEY1; echo $LC_TEST_KEY2",
Env: map[string]string{
"LC_TEST_KEY1": "ENV_VALUE1",
"LC_TEST_KEY2": "ENV_VALUE2",
},
},
}
got := s.Execute(test)
assert.True(t, got.ValidationResult.Success)
assert.Equal(t, "ENV_VALUE1\nENV_VALUE2", got.TestCase.Result.Stdout)
assert.Equal(t, 0, got.TestCase.Result.ExitCode)
}
func Test_SSHExecutor_ExitCode(t *testing.T) {
if !isSSHTestsEnabled() {
return
}
s := createExecutor()
test := TestCase{
Command: CommandUnderTest{
Cmd: "exit 2;",
},
}
got := s.Execute(test)
assert.False(t, got.ValidationResult.Success)
assert.Equal(t, 2, got.TestCase.Result.ExitCode)
}
func isSSHTestsEnabled() bool {
v := os.Getenv("COMMANDER_SSH_TEST")
if v != "1" {
log.Println("Skip ssh_executor_test, set env COMMANDER_SSH_TEST to 1")
return false
}
return true
}
| [
"\"COMMANDER_TEST_SSH_HOST\"",
"\"COMMANDER_TEST_SSH_PASS\"",
"\"COMMANDER_TEST_SSH_USER\"",
"\"COMMANDER_TEST_SSH_IDENTITY_FILE\"",
"\"COMMANDER_SSH_TEST\""
]
| []
| [
"COMMANDER_TEST_SSH_USER",
"COMMANDER_TEST_SSH_IDENTITY_FILE",
"COMMANDER_SSH_TEST",
"COMMANDER_TEST_SSH_PASS",
"COMMANDER_TEST_SSH_HOST"
]
| [] | ["COMMANDER_TEST_SSH_USER", "COMMANDER_TEST_SSH_IDENTITY_FILE", "COMMANDER_SSH_TEST", "COMMANDER_TEST_SSH_PASS", "COMMANDER_TEST_SSH_HOST"] | go | 5 | 0 | |
workspace/model_main_tf2.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Creates and runs TF2 object detection models.
For local training/evaluation run:
PIPELINE_CONFIG_PATH=path/to/pipeline.config
MODEL_DIR=/tmp/model_outputs
NUM_TRAIN_STEPS=10000
SAMPLE_1_OF_N_EVAL_EXAMPLES=1
python model_main_tf2.py -- \
--model_dir=$MODEL_DIR --num_train_steps=$NUM_TRAIN_STEPS \
--sample_1_of_n_eval_examples=$SAMPLE_1_OF_N_EVAL_EXAMPLES \
--pipeline_config_path=$PIPELINE_CONFIG_PATH \
--alsologtostderr
"""
from absl import flags
import tensorflow.compat.v2 as tf
from object_detection import model_lib_v2
import os
flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '
'file.')
flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.')
flags.DEFINE_bool('eval_on_train_data', False, 'Enable evaluating on train '
'data (only supported in distributed training).')
flags.DEFINE_integer('sample_1_of_n_eval_examples', None, 'Will sample one of '
'every n eval input examples, where n is provided.')
flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample '
'one of every n train input examples for evaluation, '
'where n is provided. This is only used if '
'`eval_training_data` is True.')
flags.DEFINE_string(
'model_dir', None, 'Path to output model directory '
'where event and checkpoint files will be written.')
flags.DEFINE_string(
'checkpoint_dir', None, 'Path to directory holding a checkpoint. If '
'`checkpoint_dir` is provided, this binary operates in eval-only mode, '
'writing resulting metrics to `model_dir`.')
flags.DEFINE_integer('eval_timeout', 3600, 'Number of seconds to wait for an'
'evaluation checkpoint before exiting.')
flags.DEFINE_bool('use_tpu', False, 'Whether the job is executing on a TPU.')
flags.DEFINE_string(
'tpu_name',
default=None,
help='Name of the Cloud TPU for Cluster Resolvers.')
flags.DEFINE_integer(
'num_workers', 1, 'When num_workers > 1, training uses '
'MultiWorkerMirroredStrategy. When num_workers = 1 it uses '
'MirroredStrategy.')
flags.DEFINE_integer(
'checkpoint_every_n', 1000, 'Integer defining how often we checkpoint.')
flags.DEFINE_boolean('record_summaries', True,
('Whether or not to record summaries defined by the model'
' or the training pipeline. This does not impact the'
' summaries of the loss values which are always'
' recorded.'))
FLAGS = flags.FLAGS
def main(unused_argv):
flags.mark_flag_as_required('model_dir')
flags.mark_flag_as_required('pipeline_config_path')
tf.config.set_soft_device_placement(True)
if FLAGS.checkpoint_dir:
model_lib_v2.eval_continuously(
pipeline_config_path=FLAGS.pipeline_config_path,
model_dir=FLAGS.model_dir,
train_steps=FLAGS.num_train_steps,
sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
sample_1_of_n_eval_on_train_examples=(
FLAGS.sample_1_of_n_eval_on_train_examples),
checkpoint_dir=FLAGS.checkpoint_dir,
wait_interval=300, timeout=FLAGS.eval_timeout)
else:
if FLAGS.use_tpu:
# TPU is automatically inferred if tpu_name is None and
# we are running under cloud ai-platform.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
elif FLAGS.num_workers > 1:
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
else:
strategy = tf.compat.v2.distribute.MirroredStrategy()
with strategy.scope():
model_lib_v2.train_loop(
pipeline_config_path=FLAGS.pipeline_config_path,
model_dir=FLAGS.model_dir,
train_steps=FLAGS.num_train_steps,
use_tpu=FLAGS.use_tpu,
checkpoint_every_n=FLAGS.checkpoint_every_n,
record_summaries=FLAGS.record_summaries)
if __name__ == '__main__':
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
tf.compat.v1.app.run()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
backend/Virtuele/asgi.py | """
ASGI config for Virtuele project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Virtuele.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pilot/pkg/dns/leak_test.go | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dns
import (
"testing"
"istio.io/istio/tests/util/leak"
)
func TestMain(m *testing.M) {
// CheckMain asserts that no goroutines are leaked after a test package exits.
leak.CheckMain(m)
}
| []
| []
| []
| [] | [] | go | null | null | null |
src/config/api-server/vnc_cfg_api_server/api_server.py | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
"""
This is the main module in vnc_cfg_api_server package. It manages interaction
between http/rest, address management, authentication and database interfaces.
"""
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from past.builtins import basestring
from builtins import object
from gevent import monkey
monkey.patch_all()
from gevent import hub
# from neutron plugin to api server, the request URL could be large.
# fix the const
import gevent.pywsgi
gevent.pywsgi.MAX_REQUEST_LINE = 65535
import sys
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding('UTF8')
from six import string_types
from six.moves import reload_module
from six.moves.configparser import SafeConfigParser, NoOptionError
import functools
import hashlib
import itertools
import signal
import netaddr
import os
import re
import random
import socket
import ast
from cfgm_common import jsonutils as json
from .provision_defaults import *
import uuid
import copy
from pprint import pformat
from io import StringIO
from vnc_api.utils import AAA_MODE_VALID_VALUES
# import GreenletProfiler
from cfgm_common import vnc_cgitb
from kazoo.exceptions import LockTimeout
from attrdict import AttrDict
from distutils.util import strtobool
from cfgm_common import has_role
from cfgm_common import _obj_serializer_all
from cfgm_common.utils import _DEFAULT_ZK_COUNTER_PATH_PREFIX
from cfgm_common.utils import _DEFAULT_ZK_LOCK_PATH_PREFIX
from cfgm_common import is_uuid_like
from cfgm_common import SG_NO_RULE_FQ_NAME
from cfgm_common.uve.vnc_api.ttypes import VncApiLatencyStats, VncApiLatencyStatsLog
import time
import requests
import xml.etree.ElementTree as etree
from functools import partial
"""
Following is needed to silence warnings on every request when keystone
auth_token middleware + Sandesh is used. Keystone or Sandesh alone
do not produce these warnings.
Exception AttributeError: AttributeError(
"'_DummyThread' object has no attribute '_Thread__block'",)
in <module 'threading' from '/usr/lib64/python2.7/threading.pyc'> ignored
See http://stackoverflow.com/questions/13193278/understand-python-threading-bug
for more information.
"""
import threading
threading._DummyThread._Thread__stop = lambda x: 42
CONFIG_VERSION = '1.0'
import bottle
from . import utils
from . import context
from .context import get_request, get_context, set_context, use_context
from .context import ApiContext
from .context import is_internal_request
from .resources import initialize_all_server_resource_classes
from .vnc_db import VncDbClient
import cfgm_common
from cfgm_common import ignore_exceptions
from cfgm_common.uve.vnc_api.ttypes import VncApiCommon, VncApiConfigLog,\
VncApiDebug, VncApiInfo, VncApiNotice, VncApiError
from cfgm_common.uve.vnc_api.ttypes import FabricJobExecution, FabricJobUve, \
PhysicalRouterJobExecution, PhysicalRouterJobUve
from cfgm_common import illegal_xml_chars_RE
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames, Module2NodeType,\
NodeTypeNames, INSTANCE_ID_DEFAULT, TagTypeNameToId,\
TAG_TYPE_NOT_UNIQUE_PER_OBJECT, TAG_TYPE_AUTHORIZED_ON_ADDRESS_GROUP,\
POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT, SECURITY_OBJECT_TYPES
from .provision_defaults import Provision
from .vnc_quota import *
from vnc_api.gen.resource_xsd import *
from vnc_api.gen.resource_common import *
from vnc_api.gen.vnc_api_client_gen import all_resource_type_tuples
import cfgm_common
from cfgm_common.utils import cgitb_hook
from cfgm_common.rest import LinkObject, hdr_server_tenant
from cfgm_common.exceptions import *
from cfgm_common.vnc_extensions import ExtensionManager
from . import vnc_addr_mgmt
from . import vnc_auth
from . import vnc_auth_keystone
from . import vnc_perms
from . import vnc_rbac
from cfgm_common.uve.cfgm_cpuinfo.ttypes import ModuleCpuState, ModuleCpuStateTrace
from cfgm_common.buildinfo import build_info
from cfgm_common.vnc_api_stats import log_api_stats
from pysandesh.sandesh_base import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
# from gen_py.vnc_api.ttypes import *
import netifaces
from pysandesh.connection_info import ConnectionState
from cfgm_common.uve.nodeinfo.ttypes import NodeStatusUVE, \
NodeStatus
from .sandesh.traces.ttypes import RestApiTrace
from .vnc_bottle import get_bottle_server
from cfgm_common.vnc_greenlets import VncGreenlet
from cfgm_common.kombu_amqp import KombuAmqpClient
import ssl
_ACTION_RESOURCES = [
{'uri': '/prop-collection-get', 'link_name': 'prop-collection-get',
'method': 'GET', 'method_name': 'prop_collection_http_get'},
{'uri': '/prop-collection-update', 'link_name': 'prop-collection-update',
'method': 'POST', 'method_name': 'prop_collection_http_post'},
{'uri': '/ref-update', 'link_name': 'ref-update',
'method': 'POST', 'method_name': 'ref_update_http_post'},
{'uri': '/ref-relax-for-delete', 'link_name': 'ref-relax-for-delete',
'method': 'POST', 'method_name': 'ref_relax_for_delete_http_post'},
{'uri': '/fqname-to-id', 'link_name': 'name-to-id',
'method': 'POST', 'method_name': 'fq_name_to_id_http_post'},
{'uri': '/id-to-fqname', 'link_name': 'id-to-name',
'method': 'POST', 'method_name': 'id_to_fq_name_http_post'},
{'uri': '/useragent-kv', 'link_name': 'useragent-keyvalue',
'method': 'POST', 'method_name': 'useragent_kv_http_post'},
{'uri': '/db-check', 'link_name': 'database-check',
'method': 'POST', 'method_name': 'db_check'},
{'uri': '/fetch-records', 'link_name': 'fetch-records',
'method': 'POST', 'method_name': 'fetch_records'},
{'uri': '/start-profile', 'link_name': 'start-profile',
'method': 'POST', 'method_name': 'start_profile'},
{'uri': '/stop-profile', 'link_name': 'stop-profile',
'method': 'POST', 'method_name': 'stop_profile'},
{'uri': '/list-bulk-collection', 'link_name': 'list-bulk-collection',
'method': 'POST', 'method_name': 'list_bulk_collection_http_post'},
{'uri': '/obj-perms', 'link_name': 'obj-perms',
'method': 'GET', 'method_name': 'obj_perms_http_get'},
{'uri': '/chown', 'link_name': 'chown',
'method': 'POST', 'method_name': 'obj_chown_http_post'},
{'uri': '/chmod', 'link_name': 'chmod',
'method': 'POST', 'method_name': 'obj_chmod_http_post'},
{'uri': '/aaa-mode', 'link_name': 'aaa-mode',
'method': 'PUT', 'method_name': 'aaa_mode_http_put'},
{'uri': '/obj-cache', 'link_name': 'obj-cache',
'method': 'GET', 'method_name': 'dump_cache'},
{'uri': '/obj-cache', 'link_name': 'obj-cache',
'method': 'POST', 'method_name': 'dump_cache'},
{'uri': '/execute-job', 'link_name': 'execute-job',
'method': 'POST', 'method_name': 'execute_job_http_post'},
{'uri': '/abort-job', 'link_name': 'abort-job',
'method': 'POST', 'method_name': 'abort_job_http_post'},
{'uri': '/amqp-publish', 'link_name': 'amqp-publish',
'method': 'POST', 'method_name': 'amqp_publish_http_post'},
{'uri': '/amqp-request', 'link_name': 'amqp-request',
'method': 'POST', 'method_name': 'amqp_request_http_post'},
{'uri': '/hbs-get', 'link_name': 'hbs-get',
'method': 'POST', 'method_name': 'hbs_get'},
]
_MANDATORY_PROPS = [
'loadbalancer_healthmonitor_properties',
]
# following allowed without authentication
_WHITE_LIST_URI = [
'^/documentation', # allow all documentation
'^/$', # allow discovery
]
_WHITE_LIST_URI_REGEX = re.compile(r'%s' % '|'.join(_WHITE_LIST_URI))
def error_400(err):
return err.body
# end error_400
def error_403(err):
return err.body
# end error_403
def error_404(err):
return err.body
# end error_404
def error_405(err):
return err.body
# end error_405
def error_409(err):
return err.body
# end error_409
@bottle.error(412)
def error_412(err):
return err.body
# end error_412
def error_500(err):
return err.body
# end error_500
def error_503(err):
return err.body
# end error_503
def traceback_err_msg(err_msg):
'''Return the last row of traceback error message'''
return err_msg.strip('\n').split('\n')[-1]
# end traceback_err_msg
class VncApiServer(object):
"""
This is the manager class co-ordinating all classes present in the package
"""
_INVALID_NAME_CHARS = set(':')
_GENERATE_DEFAULT_INSTANCE = [
'namespace',
'project',
'virtual_network', 'virtual-network',
'network_ipam', 'network-ipam',
]
JOB_REQUEST_EXCHANGE = "job_request_exchange"
JOB_REQUEST_ROUTING_KEY = "job.request"
JOB_ABORT_ROUTING_KEY = "job.abort"
def __new__(cls, *args, **kwargs):
obj = super(VncApiServer, cls).__new__(cls)
obj.api_bottle = bottle.Bottle()
obj.route('/', 'GET', obj.homepage_http_get)
obj.api_bottle.error_handler = {
400: error_400,
403: error_403,
404: error_404,
405: error_405,
409: error_409,
500: error_500,
503: error_503,
}
cls._generate_resource_crud_methods(obj)
cls._generate_resource_crud_uri(obj)
for act_res in _ACTION_RESOURCES:
http_method = act_res.get('method', 'POST')
method_name = getattr(obj, act_res['method_name'])
obj.route(act_res['uri'], http_method, method_name)
return obj
# end __new__
@classmethod
def _validate_complex_type(cls, dict_cls, dict_body):
if dict_body is None:
return
for key, value in list(dict_body.items()):
if key not in dict_cls.attr_fields:
raise ValueError('class %s does not have field %s' % (
str(dict_cls), key))
attr_type_vals = dict_cls.attr_field_type_vals[key]
attr_type = attr_type_vals['attr_type']
restrictions = attr_type_vals['restrictions']
is_array = attr_type_vals.get('is_array', False)
if not value:
continue
if is_array:
if not isinstance(value, list):
raise ValueError('Field %s must be a list. Received value: %s'
% (key, str(value)))
values = value
else:
values = [value]
if attr_type_vals['is_complex']:
attr_cls = cfgm_common.utils.str_to_class(attr_type, __name__)
for item in values:
if attr_type == 'AllowedAddressPair':
cls._validate_allowed_address_pair_prefix_len(item)
if attr_type == 'SubnetType':
cls._validate_subnet_type(item)
cls._validate_complex_type(attr_cls, item)
else:
simple_type = attr_type_vals['simple_type']
for idx, item in enumerate(values):
values[idx] = cls._validate_simple_type(key, attr_type,
simple_type, item,
restrictions)
if not is_array:
dict_body[key] = values[0]
# end _validate_complex_type
@staticmethod
def _validate_subnet_type(subnet):
try:
cidr_str = '%s/%s' % (subnet['ip_prefix'], subnet['ip_prefix_len'])
except TypeError:
raise ValueError("Subnet type is invalid")
try:
cidr = netaddr.IPNetwork(cidr_str)
except netaddr.core.AddrFormatError:
raise ValueError("Subnet type '%s' is invalid" % cidr_str)
subnet['ip_prefix'] = str(cidr.network)
subnet['ip_prefix_len'] = cidr.prefixlen
@classmethod
def _validate_allowed_address_pair_prefix_len(cls, value):
'''Do not allow configuration of AAP with
IPv4 prefix length less than 24 and 120 for IPv6.
LP #1720118
'''
if value['address_mode'] == 'active-standby':
ip_net_family = netaddr.IPNetwork(value['ip']['ip_prefix']).version
if ip_net_family == 6 and value['ip']['ip_prefix_len'] < 120:
raise ValueError('IPv6 Prefix length lesser than 120 is'
' is not acceptable')
if ip_net_family == 4 and value['ip']['ip_prefix_len'] < 24:
raise ValueError('IPv4 Prefix length lesser than 24'
' is not acceptable')
# end _validate_allowed_address_pair_prefix_len
@classmethod
def _validate_communityattribute_type(cls, value):
poss_values = ["no-export",
"accept-own",
"no-advertise",
"no-export-subconfed",
"no-reoriginate"]
if value in poss_values:
return
res = re.match('[0-9]+:[0-9]+', value)
if res is None:
raise ValueError('Invalid community format %s. '
'Change to \'number:number\''
% value)
asn = value.split(':')
if int(asn[0]) > 65535:
raise ValueError('Out of range ASN value %s. '
'ASN values cannot exceed 65535.'
% value)
@classmethod
def _validate_serviceinterface_type(cls, value):
poss_values = ["management",
"left",
"right"]
if value in poss_values:
return
res = re.match('other[0-9]*', value)
if res is None:
raise ValueError('Invalid service interface type %s. '
'Valid values are: management|left|right|other[0-9]*'
% value)
def validate_execute_job_input_params(self, request_params):
device_list = None
job_template_id = request_params.get('job_template_id')
job_template_fq_name = request_params.get('job_template_fq_name')
if not (job_template_id or job_template_fq_name):
err_msg = "Either job_template_id or job_template_fq_name" \
" required in request"
raise cfgm_common.exceptions.HttpError(400, err_msg)
# check if the job template id is a valid uuid
if job_template_id:
if self.invalid_uuid(job_template_id):
msg = 'Invalid job-template uuid type %s. uuid type required' \
% job_template_id
raise cfgm_common.exceptions.HttpError(400, msg)
try:
job_template_fqname = self._db_conn.uuid_to_fq_name(
job_template_id)
request_params['job_template_fq_name'] = job_template_fqname
except NoIdError as no_id_exec:
raise cfgm_common.exceptions.HttpError(404, str(no_id_exec))
except Exception as e:
msg = "Error while reading job_template_id: " + str(e)
raise cfgm_common.exceptions.HttpError(400, msg)
else:
# check if the job template fqname is a valid fq_name
try:
job_template_id = self._db_conn.fq_name_to_uuid(
"job_template", job_template_fq_name)
request_params['job_template_id'] = job_template_id
except NoIdError as no_id_exec:
raise cfgm_common.exceptions.HttpError(404, str(no_id_exec))
except Exception as e:
msg = "Error while reading job_template_fqname: " + str(e)
raise cfgm_common.exceptions.HttpError(400, msg)
extra_params = request_params.get('params')
if extra_params is not None:
device_list = extra_params.get('device_list')
if device_list:
if not isinstance(device_list, list):
err_msg = "malformed request param: device_list, " \
"expects list"
raise cfgm_common.exceptions.HttpError(400, err_msg)
for device_id in device_list:
if not isinstance(device_id, basestring):
err_msg = "malformed request param: device_list, " \
"expects list of string device_uuids," \
" found device_uuid %s" % device_id
raise cfgm_common.exceptions.HttpError(400, err_msg)
# check if the device id passed is a valid uuid
if self.invalid_uuid(device_id):
msg = 'Invalid device uuid type %s.' \
' uuid type required' % device_id
raise cfgm_common.exceptions.HttpError(400, msg)
def execute_job_http_post(self):
''' Payload of execute_job
job_template_id (Mandatory if no job_template_fq_name): <uuid> of
the created job_template
job_template_fq_name (Mandatory if no job_template_id): fqname in
the format: ["<global-system-config-name>",
"<name of the job-template>"]
input (Type json): Input Schema of the playbook under the
job_template_id
params (Type json): Extra_params for the job_manager
(Eg. device_list)
E.g. Payload:
{
"job_template_id": "<uuid>",
"params": {
"device_list": ["<device_uuid1>", "<device_uuid2>", ....
"<device_uuidn>"]
}
}
'''
try:
self.config_log("Entered execute-job",
level=SandeshLevel.SYS_INFO)
request_params = get_request().json
msg = "Job Input %s " % json.dumps(request_params)
self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
# some basic validation like checking if UUIDs in input are
# syntactically valid
self.validate_execute_job_input_params(request_params)
# get the auth token
auth_token = get_request().get_header('X-Auth-Token')
request_params['auth_token'] = auth_token
# get cluster id
contrail_cluster_id = get_request().get_header('X-Cluster-ID')
request_params['contrail_cluster_id'] = contrail_cluster_id
# get the API config node ip list
if not self._config_node_list:
(ok, cfg_node_list, _) = self._db_conn.dbe_list(
'config_node', field_names=['config_node_ip_address'])
if not ok:
raise cfgm_common.exceptions.HttpError(
500, 'Error in dbe_list while getting the '
'config_node_ip_address'
' %s' % cfg_node_list)
if not cfg_node_list:
err_msg = "Config-Node list empty"
raise cfgm_common.exceptions.HttpError(404, err_msg)
for node in cfg_node_list:
self._config_node_list.append(node.get(
'config_node_ip_address'))
request_params['api_server_host'] = self._config_node_list
# generate the job execution id
execution_id = str(int(round(time.time() * 1000))) + '_' + \
str(uuid.uuid4())
request_params['job_execution_id'] = execution_id
# publish message to RabbitMQ
self.publish_job_request(request_params, execution_id)
self.config_log("Published job message to RabbitMQ."
" Execution id: %s" % execution_id,
level=SandeshLevel.SYS_INFO)
return {'job_execution_id': str(execution_id)}
except cfgm_common.exceptions.HttpError as e:
raise
def publish_job_request(self, request_params, job_execution_id):
try:
self._amqp_client.publish(
request_params, self.JOB_REQUEST_EXCHANGE,
routing_key=self.JOB_REQUEST_ROUTING_KEY,
serializer='json', retry=True,
retry_policy={'max_retries': 12,
'interval_start': 2,
'interval_step': 5,
'interval_max': 15})
except Exception as e:
msg = "Failed to send job request via RabbitMQ" \
" %s %s" % (job_execution_id, repr(e))
raise cfgm_common.exceptions.HttpError(500, msg)
def abort_job_http_post(self):
try:
self.config_log("Entered abort-job",
level=SandeshLevel.SYS_INFO)
request_params = get_request().json
msg = "Abort Job Input %s " % json.dumps(request_params)
self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
# get the auth token
auth_token = get_request().get_header('X-Auth-Token')
request_params['auth_token'] = auth_token
# get cluster id
contrail_cluster_id = get_request().get_header('X-Cluster-ID')
request_params['contrail_cluster_id'] = contrail_cluster_id
# get the API config node ip list
if not self._config_node_list:
(ok, cfg_node_list, _) = self._db_conn.dbe_list(
'config_node', field_names=['config_node_ip_address'])
if not ok:
raise cfgm_common.exceptions.HttpError(
500, 'Error in dbe_list while getting the '
'config_node_ip_address'
' %s' % cfg_node_list)
if not cfg_node_list:
err_msg = "Config-Node list empty"
raise cfgm_common.exceptions.HttpError(404, err_msg)
for node in cfg_node_list:
self._config_node_list.append(node.get(
'config_node_ip_address'))
request_params['api_server_host'] = self._config_node_list
# publish message to RabbitMQ
self.publish_job_abort(request_params)
self.config_log("Published job abort to RabbitMQ.",
level=SandeshLevel.SYS_INFO)
return {}
except cfgm_common.exceptions.HttpError as e:
raise
def publish_job_abort(self, request_params):
try:
self._amqp_client.publish(
request_params, self.JOB_REQUEST_EXCHANGE,
routing_key=self.JOB_ABORT_ROUTING_KEY,
serializer='json', retry=True,
retry_policy={'max_retries': 12,
'interval_start': 2,
'interval_step': 5,
'interval_max': 15})
except Exception as e:
msg = "Failed to send job abort via RabbitMQ" \
" %s" % (repr(e))
raise cfgm_common.exceptions.HttpError(500, msg)
def amqp_publish_http_post(self):
''' Payload of amqp-publish
exchange (Type string) (mandatory): name of the exchange
exchange_type (Type string) (mandatory): type of the exchange
routing_key (Type string): routing key for the message
headers (Type dict): headers for the message
payload (Type object): the message
'''
self.config_log("Entered amqp-publish",
level=SandeshLevel.SYS_INFO)
body = get_request().json
msg = "Amqp publish %s " % json.dumps(body)
self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
if self._amqp_client.get_exchange(body.get('exchange')) is None:
self._amqp_client.add_exchange(body.get('exchange'),
type=body.get('exchange_type'))
self._amqp_client.publish(body.get('payload'),
body.get('exchange'),
routing_key=body.get('routing_key'),
headers=body.get('headers'))
bottle.response.status = 202
self.config_log("Exiting amqp-publish", level=SandeshLevel.SYS_DEBUG)
# end amqp_publish_http_post
def amqp_request_http_post(self):
''' Payload of amqp-request
exchange (Type string) (mandatory): name of the exchange
exchange_type (Type string) (mandatory): type of the exchange
routing_key (Type string): routing key for the message
response_key (Type string): routing key for the response message
headers (Type dict): headers for the message
payload (Type object): the message
'''
self.config_log("Entered amqp-request",
level=SandeshLevel.SYS_INFO)
body = get_request().json
msg = "Amqp request %s " % json.dumps(body)
self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
if self._amqp_client.get_exchange(body.get('exchange')) is None:
self._amqp_client.add_exchange(body.get('exchange'),
type=body.get('exchange_type'))
consumer = 'amqp_request.%s.%s' % (socket.getfqdn(), str(uuid.uuid4()))
amqp_worker = VncApiServer.AmqpWorker()
self._amqp_client.add_consumer(consumer, body.get('exchange'),
routing_key=body.get('response_key'),
callback=amqp_worker.handle_message,
wait=True, auto_delete=True)
self._amqp_client.publish(body.get('payload'),
body.get('exchange'),
routing_key=body.get('routing_key'),
headers=body.get('headers'))
try:
amqp_worker.queue.get(block=True, timeout=self._args.amqp_timeout)
bottle.response.status = 200
except gevent.queue.Empty:
bottle.response.status = 500
finally:
self._amqp_client.remove_consumer(consumer)
msg = "Amqp response, status %s, body %s " % (bottle.response.status,
json.dumps(amqp_worker.body))
self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
self.config_log("Exiting amqp-request", level=SandeshLevel.SYS_DEBUG)
return amqp_worker.body
# end amqp_request_http_post
class AmqpWorker(object):
def __init__(self):
self.queue = gevent.queue.Queue(maxsize=1)
self.body = None
# end __init__
def handle_message(self, body, message):
message.ack()
self.body = body
self.queue.put_nowait(True)
# end handle_message
# end AmqpWorker
@classmethod
def _validate_simple_type(cls, type_name, xsd_type, simple_type, value, restrictions=None):
if value is None:
return
elif xsd_type in ('unsignedLong', 'integer'):
if not isinstance(value, (int, int)):
# If value is not an integer, then try to convert it to integer
try:
value = int(value)
except (TypeError, ValueError):
raise ValueError('%s: integer value expected instead of %s' %(
type_name, value))
if restrictions:
if not (int(restrictions[0]) <= value <= int(restrictions[1])):
raise ValueError('%s: value must be between %s and %s' %(
type_name, restrictions[0], restrictions[1]))
elif xsd_type == 'boolean':
if not isinstance(value, bool):
raise ValueError('%s: true/false expected instead of %s' %(
type_name, value))
elif xsd_type == 'string' and simple_type == 'CommunityAttribute':
cls._validate_communityattribute_type(value)
elif xsd_type == 'string' and simple_type == 'ServiceInterfaceType':
cls._validate_serviceinterface_type(value)
else:
if not isinstance(value, basestring):
raise ValueError('%s: string value expected instead of %s' %(
type_name, value))
if restrictions and value not in restrictions:
raise ValueError('%s: value must be one of %s' % (
type_name, str(restrictions)))
return value
# end _validate_simple_type
def _check_mandatory_props_list(self, prop_name):
return prop_name in _MANDATORY_PROPS
# end _check_mandatory_props_list
def _validate_props_in_request(self, resource_class, obj_dict, operation):
for prop_name in resource_class.prop_fields:
prop_field_types = resource_class.prop_field_types[prop_name]
is_simple = not prop_field_types['is_complex']
prop_type = prop_field_types['xsd_type']
restrictions = prop_field_types['restrictions']
simple_type = prop_field_types['simple_type']
is_list_prop = prop_name in resource_class.prop_list_fields
is_map_prop = prop_name in resource_class.prop_map_fields
prop_value = obj_dict.get(prop_name)
if not prop_value:
if operation == 'CREATE' and (
prop_field_types['required'] == 'required'):
if self._check_mandatory_props_list(prop_name):
err_msg = '%s property is missing' %prop_name
return False, err_msg
continue
if is_simple:
try:
obj_dict[prop_name] = self._validate_simple_type(prop_name,
prop_type, simple_type,
prop_value, restrictions)
except Exception as e:
err_msg = 'Error validating property ' + str(e)
return False, err_msg
else:
continue
prop_cls = cfgm_common.utils.str_to_class(prop_type, __name__)
if isinstance(prop_value, dict):
try:
self._validate_complex_type(prop_cls, prop_value)
except Exception as e:
err_msg = 'Error validating property %s value %s ' %(
prop_name, prop_value)
err_msg += str(e)
return False, err_msg
else: # complex-type + value isn't dict or wrapped in list or map
err_msg = 'Error in property %s type %s value of %s ' %(
prop_name, prop_cls, prop_value)
return False, err_msg
# end for all properties
return True, ''
# end _validate_props_in_request
def _validate_refs_in_request(self, resource_class, obj_dict):
for ref_name in resource_class.ref_fields:
ref_fld_types_list = list(resource_class.ref_field_types[ref_name])
ref_link_type = ref_fld_types_list[1]
if ref_link_type == 'None':
continue
attr_cls = cfgm_common.utils.str_to_class(ref_link_type, __name__)
for ref_dict in obj_dict.get(ref_name) or []:
try:
self._validate_complex_type(attr_cls, ref_dict['attr'])
except Exception as e:
err_msg = 'Error validating reference %s value %s ' \
%(ref_name, ref_dict)
err_msg += str(e)
return False, err_msg
return True, ''
# end _validate_refs_in_request
@staticmethod
def _validate_tag_refs(obj_type, obj_dict):
if not obj_dict:
return True, ''
refs_per_type = {}
for ref in obj_dict.get('tag_refs', []):
ref_type = ref['to'][-1].partition('=')[0]
refs_per_type.setdefault(ref_type, []).append(ref)
for tag_type, refs in list(refs_per_type.items()):
# Tag type is unique per object, unless
# TAG_TYPE_NOT_UNIQUE_PER_OBJECT tag type or object type is a
# Firewall Rule
if (len(refs) > 1 and
tag_type not in TAG_TYPE_NOT_UNIQUE_PER_OBJECT and
obj_type != FirewallRule.object_type):
msg = ("Tag type '%s' cannot be set multiple times on the same "
"%s resource type" % (
tag_type, obj_type.replace('_', ' ').title()))
return False, (400, msg)
# address-group resource can only be associated with label tag type
if (obj_type == 'address_group' and
tag_type not in TAG_TYPE_AUTHORIZED_ON_ADDRESS_GROUP):
msg = ("Invalid tag type %s for object type %s" %
(tag_type, obj_type))
return False, (400, msg)
return True, ''
def _validate_perms_in_request(self, resource_class, obj_type, obj_dict):
for ref_name in resource_class.ref_fields:
for ref in obj_dict.get(ref_name) or []:
try:
ref_uuid = ref['uuid']
except KeyError:
ref_uuid = self._db_conn.fq_name_to_uuid(ref_name[:-5],
ref['to'])
(ok, status) = self._permissions.check_perms_link(
get_request(), ref_uuid)
if not ok:
(code, err_msg) = status
raise cfgm_common.exceptions.HttpError(code, err_msg)
# end _validate_perms_in_request
def _validate_resource_type(self, type):
try:
r_class = self.get_resource_class(type)
return r_class.resource_type, r_class
except TypeError:
raise cfgm_common.exceptions.HttpError(
404, "Resource type '%s' not found" % type)
# end _validate_resource_type
def _ensure_services_conn(
self, api_name, obj_type, obj_uuid=None, obj_fq_name=None):
# If not connected to zookeeper do not allow operations that
# causes the state change
if not self._db_conn._zk_db.is_connected():
errmsg = 'No connection to zookeeper.'
fq_name_str = ':'.join(obj_fq_name or [])
self.config_object_error(
obj_uuid, fq_name_str, obj_type, api_name, errmsg)
raise cfgm_common.exceptions.HttpError(503, errmsg)
# If there are too many pending updates to rabbit, do not allow
# operations that cause state change
npending = self._db_conn.dbe_oper_publish_pending()
if (npending >= int(self._args.rabbit_max_pending_updates)):
err_str = str(MaxRabbitPendingError(npending))
raise cfgm_common.exceptions.HttpError(500, err_str)
# end _ensure_services_conn
def undo(self, result, obj_type, id=None, fq_name=None, counter=None, value=0):
(code, msg) = result
if counter:
counter = counter + value
get_context().invoke_undo(code, msg, self.config_log)
failed_stage = get_context().get_state()
self.config_object_error(
id, fq_name, obj_type, failed_stage, msg)
# end undo
# http_resource_<oper> - handlers invoked from
# a. bottle route (on-the-wire) OR
# b. internal requests
# using normalized get_request() from ApiContext
@log_api_stats
def http_resource_create(self, obj_type):
resource_type, r_class = self._validate_resource_type(obj_type)
obj_dict = get_request().json[resource_type]
# check visibility
user_visible = (obj_dict.get('id_perms') or {}).get('user_visible', True)
if not user_visible and not self.is_admin_request():
result = 'This object is not visible by users'
self.config_object_error(None, None, obj_type, 'http_post', result)
raise cfgm_common.exceptions.HttpError(400, result)
self._post_validate(obj_type, obj_dict=obj_dict)
fq_name = obj_dict['fq_name']
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_create' %(obj_type), obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_create an extension had error for %s' \
%(obj_type, obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# properties validator
ok, result = self._validate_props_in_request(r_class,
obj_dict, operation='CREATE')
if not ok:
result = 'Bad property in create: ' + result
raise cfgm_common.exceptions.HttpError(400, result)
# references validator
ok, result = self._validate_refs_in_request(r_class, obj_dict)
if not ok:
result = 'Bad reference in create: ' + result
raise cfgm_common.exceptions.HttpError(400, result)
get_context().set_state('PRE_DBE_ALLOC')
# type-specific hook
ok, result = r_class.pre_dbe_alloc(obj_dict)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
name = obj_dict['fq_name'][-1]
fq_name = obj_dict['fq_name']
# common handling for all resource create
(ok, result) = self._post_common(obj_type, obj_dict)
if not ok:
(code, msg) = result
fq_name_str = ':'.join(obj_dict.get('fq_name', []))
self.config_object_error(None, fq_name_str, obj_type, 'http_post',
msg)
raise cfgm_common.exceptions.HttpError(code, msg)
uuid_in_req = result
# no ref to a pending deleted resource
ok, result = r_class.no_pending_deleted_resource_in_refs(obj_dict)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
# Can abort resource creation and retrun 202 status code
get_context().set_state('PENDING_DBE_CREATE')
ok, result = r_class.pending_dbe_create(obj_dict)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
if ok and isinstance(result, tuple) and result[0] == 202:
# Creation accepted but not applied, pending delete return 202 HTTP
# OK code to aware clients
pending_obj_dict = result[1]
bottle.response.status = 202
rsp_body = {}
rsp_body['fq_name'] = pending_obj_dict['fq_name']
rsp_body['uuid'] = pending_obj_dict['uuid']
rsp_body['name'] = pending_obj_dict['fq_name'][-1]
rsp_body['href'] = self.generate_url(resource_type,
pending_obj_dict['uuid'])
rsp_body['parent_type'] = pending_obj_dict['parent_type']
rsp_body['parent_uuid'] = pending_obj_dict['parent_uuid']
rsp_body['parent_href'] = self.generate_url(
pending_obj_dict['parent_type'],pending_obj_dict['parent_uuid'])
return {resource_type: rsp_body}
db_conn = self._db_conn
# if client gave parent_type of config-root, ignore and remove
if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
del obj_dict['parent_type']
parent_class = None
if 'parent_type' in obj_dict:
# non config-root child, verify parent exists
parent_res_type, parent_class = self._validate_resource_type(
obj_dict['parent_type'])
parent_obj_type = parent_class.object_type
parent_res_type = parent_class.resource_type
parent_fq_name = obj_dict['fq_name'][:-1]
try:
parent_uuid = self._db_conn.fq_name_to_uuid(parent_obj_type,
parent_fq_name)
(ok, status) = self._permissions.check_perms_write(
get_request(), parent_uuid)
if not ok:
(code, err_msg) = status
raise cfgm_common.exceptions.HttpError(code, err_msg)
self._permissions.set_user_role(get_request(), obj_dict)
obj_dict['parent_uuid'] = parent_uuid
except NoIdError:
err_msg = 'Parent %s type %s does not exist' % (
pformat(parent_fq_name), parent_res_type)
fq_name_str = ':'.join(parent_fq_name)
self.config_object_error(None, fq_name_str, obj_type, 'http_post', err_msg)
raise cfgm_common.exceptions.HttpError(400, err_msg)
# Validate perms on references
try:
self._validate_perms_in_request(r_class, obj_type, obj_dict)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
400, 'Unknown reference in resource create %s.' %(obj_dict))
# State modification starts from here. Ensure that cleanup is done for all state changes
cleanup_on_failure = []
quota_counter = []
def stateful_create():
get_context().set_state('DBE_ALLOC')
# Alloc and Store id-mappings before creating entry on pubsub store.
# Else a subscriber can ask for an id mapping before we have stored it
(ok, result) = db_conn.dbe_alloc(obj_type, obj_dict, uuid_in_req)
if not ok:
return (ok, result)
get_context().push_undo(db_conn.dbe_release, obj_type, fq_name)
obj_id = result
env = get_request().headers.environ
tenant_name = env.get(hdr_server_tenant()) or 'default-project'
get_context().set_state('PRE_DBE_CREATE')
# type-specific hook
(ok, result) = r_class.pre_dbe_create(
tenant_name, obj_dict, db_conn)
if not ok:
return (ok, result)
# All resource type can have tag refs but there is some constraints
# Done after PRE_DBE_CREATE as tag refs can be modifed in that hook
ok, result = self._validate_tag_refs(obj_type, obj_dict)
if not ok:
return False, result
callable = getattr(r_class, 'http_post_collection_fail', None)
if callable:
cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
ok, quota_limit, proj_uuid = r_class.get_quota_for_resource(obj_type,
obj_dict, db_conn)
if not ok:
return ok, quota_limit
get_context().set_state('DBE_CREATE')
if quota_limit >= 0:
path = self._path_prefix + proj_uuid + "/" + obj_type
if not self.quota_counter.get(path):
# Init quota counter
path_prefix = self._path_prefix + proj_uuid
try:
QuotaHelper._zk_quota_counter_init(
path_prefix, {obj_type: quota_limit}, proj_uuid,
self._db_conn, self.quota_counter)
except NoIdError:
msg = "Error in initializing quota "\
"Internal error : Failed to read resource count"
return (False, (404, msg))
(ok, result) = QuotaHelper.verify_quota_and_create_resource(
db_conn, obj_dict, obj_type, obj_id,
quota_limit, self.quota_counter[path])
if not ok:
return (ok, result)
else:
# To be used for reverting back count when undo() is called
try:
quota_counter.append(self.quota_counter[path])
except KeyError:
# project quota could be remove since
pass
else:
#normal execution
(ok, result) = db_conn.dbe_create(obj_type, obj_id, obj_dict)
if not ok:
return (ok, result)
get_context().set_state('POST_DBE_CREATE')
# type-specific hook
try:
ok, result = r_class.post_dbe_create(tenant_name, obj_dict, db_conn)
except Exception as e:
ok = False
msg = ("%s:%s post_dbe_create had an exception: %s\n%s" %
(obj_type, obj_id, str(e),
cfgm_common.utils.detailed_traceback()))
result = (None, msg)
if not ok:
# Create is done, log to system, no point in informing user
self.config_log(result[1], level=SandeshLevel.SYS_ERR)
return True, obj_id
# end stateful_create
try:
ok, result = stateful_create()
except Exception as e:
ok = False
err_msg = cfgm_common.utils.detailed_traceback()
result = (500, traceback_err_msg(err_msg))
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
if not ok:
fq_name_str = ':'.join(fq_name)
self.undo(result, obj_type, fq_name=fq_name_str,
counter=quota_counter, value=-1)
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
# Initialize quota counter if resource is project
if resource_type == 'project' and 'quota' in obj_dict:
proj_id = obj_dict['uuid']
path_prefix = self._path_prefix + proj_id
try:
QuotaHelper._zk_quota_counter_init(
path_prefix,
QuotaHelper.get_quota_limits(obj_dict),
proj_id,
db_conn,
self.quota_counter)
except NoIdError:
msg = ("Error in initializing quota Internal error : %s" %
str(e))
self.config_log(msg, level=SandeshLevel.SYS_ERR)
rsp_body = {}
rsp_body['name'] = name
rsp_body['fq_name'] = fq_name
rsp_body['uuid'] = result
rsp_body['href'] = self.generate_url(resource_type, result)
if parent_class:
# non config-root child, send back parent uuid/href
rsp_body['parent_type'] = obj_dict['parent_type']
rsp_body['parent_uuid'] = parent_uuid
rsp_body['parent_href'] = self.generate_url(parent_res_type,
parent_uuid)
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_create' %(obj_type), obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In post_%s_create an extension had error for %s' \
%(obj_type, obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
return {resource_type: rsp_body}
# end http_resource_create
@log_api_stats
def http_resource_read(self, obj_type, id):
resource_type, r_class = self._validate_resource_type(obj_type)
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_read' %(obj_type), id)
except Exception as e:
pass
etag = get_request().headers.get('If-None-Match')
db_conn = self._db_conn
try:
req_obj_type = db_conn.uuid_to_obj_type(id)
if req_obj_type != obj_type:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
fq_name = db_conn.uuid_to_fq_name(id)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
# common handling for all resource get
(ok, result) = self._get_common(get_request(), id)
if not ok:
(code, msg) = result
self.config_object_error(
id, None, obj_type, 'http_get', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
db_conn = self._db_conn
if etag:
(ok, result) = db_conn.dbe_is_latest(id, etag.strip('"'))
if not ok:
# Not present in DB
self.config_object_error(
id, None, obj_type, 'http_get', result)
raise cfgm_common.exceptions.HttpError(404, result)
is_latest = result
if is_latest:
# send Not-Modified, caches use this for read optimization
bottle.response.status = 304
return
# end if etag
# Generate field list for db layer
obj_fields = r_class.prop_fields | r_class.ref_fields
if 'fields' in get_request().query:
obj_fields = set(get_request().query.fields.split(',')) & (
obj_fields |
r_class.backref_fields |
r_class.children_fields
) | set(['id_perms', 'perms2'])
else: # default props + children + refs + backrefs
if 'exclude_back_refs' not in get_request().query:
obj_fields |= r_class.backref_fields
if 'exclude_children' not in get_request().query:
obj_fields |= r_class.children_fields
(ok, result) = r_class.pre_dbe_read(id, fq_name, db_conn)
if not ok:
(code, msg) = result
raise cfgm_common.exceptions.HttpError(code, msg)
try:
(ok, result) = db_conn.dbe_read(obj_type, id,
list(obj_fields), ret_readonly=True)
if not ok:
self.config_object_error(id, None, obj_type, 'http_get', result)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
if not ok:
raise cfgm_common.exceptions.HttpError(500, result)
# check visibility
if (not result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(id, None, obj_type, 'http_get', result)
raise cfgm_common.exceptions.HttpError(404, result)
if not self.is_admin_request():
self.obj_view(resource_type, result)
(ok, err_msg) = r_class.post_dbe_read(result, db_conn)
if not ok:
(code, msg) = err_msg
raise cfgm_common.exceptions.HttpError(code, msg)
rsp_body = {}
rsp_body['uuid'] = id
rsp_body['name'] = result['fq_name'][-1]
if 'exclude_hrefs' not in get_request().query:
self.generate_hrefs(resource_type, result)
rsp_body.update(result)
id_perms = result['id_perms']
bottle.response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_read' %(obj_type), id, rsp_body)
except Exception as e:
pass
return {resource_type: rsp_body}
# end http_resource_read
# filter object references based on permissions
def obj_view(self, resource_type, obj_dict, ref_perms=None):
r_class = self.get_resource_class(resource_type)
obj_links = r_class.obj_links & set(obj_dict.keys())
if not ref_perms:
if self.is_rbac_enabled():
fields = ['perms2']
else:
fields = ['id_perms']
ref_uuids = {ref['uuid'] for link in obj_links
for ref in obj_dict[link]}
ref_perms = {obj_dict['uuid']: obj_dict for obj_dict in
self._db_conn._object_db.object_raw_read(
resource_type, ref_uuids,fields)}
for link_field in obj_links:
links = obj_dict[link_field][:]
# build new links in returned dict based on permissions on linked
# object
for link in links:
if (link['uuid'] not in ref_perms or
not self._permissions.check_perms_read(
get_request(),
link['uuid'],
ref_perms[link['uuid']])[0]):
obj_dict[link_field].remove(link)
# end obj_view
@log_api_stats
def http_resource_update(self, obj_type, id):
resource_type, r_class = self._validate_resource_type(obj_type)
# Early return if there is no body or an empty body
request = get_request()
req_json = request.json
if not req_json or not req_json[resource_type]:
return
obj_dict = get_request().json[resource_type]
if 'perms2' in obj_dict:
if 'owner' not in obj_dict['perms2']:
raise cfgm_common.exceptions.HttpError(400,
'owner in perms2 must be present')
fields = r_class.prop_fields | r_class.ref_fields
try:
ok, result = self._db_conn.dbe_read(obj_type, id, fields)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
if not ok:
self.config_object_error(id, None, obj_type, 'http_resource_update',
result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
db_obj_dict = result
# Look if the resource have a pending version, if yes use it as resource
# to update
if hasattr(r_class, 'get_pending_resource'):
ok, result = r_class.get_pending_resource(db_obj_dict, fields)
if ok and isinstance(result, dict):
db_obj_dict = result
id = obj_dict['uuid'] = db_obj_dict['uuid']
if not ok and result[0] != 404:
self.config_object_error(
id, None, obj_type, 'http_resource_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
self._put_common('http_put', obj_type, id, db_obj_dict,
req_obj_dict=obj_dict)
rsp_body = {}
rsp_body['uuid'] = id
rsp_body['href'] = self.generate_url(resource_type, id)
return {resource_type: rsp_body}
# end http_resource_update
@log_api_stats
def http_resource_delete(self, obj_type, id):
resource_type, r_class = self._validate_resource_type(obj_type)
db_conn = self._db_conn
# if obj doesn't exist return early
try:
req_obj_type = db_conn.uuid_to_obj_type(id)
if req_obj_type != obj_type:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
_ = db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'ID %s does not exist' %(id))
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_delete' %(obj_type), id)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_delete an extension had error for %s' \
%(obj_type, id)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# read in obj from db (accepting error) to get details of it
try:
(read_ok, read_result) = db_conn.dbe_read(obj_type, id)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
if not read_ok:
self.config_object_error(
id, None, obj_type, 'http_delete', read_result)
# proceed down to delete the resource
# check visibility
if (not read_result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(id, None, obj_type, 'http_delete', result)
raise cfgm_common.exceptions.HttpError(404, result)
# common handling for all resource delete
parent_uuid = read_result.get('parent_uuid')
(ok, del_result) = self._delete_common(
get_request(), obj_type, id, parent_uuid)
if not ok:
(code, msg) = del_result
self.config_object_error(id, None, obj_type, 'http_delete', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
# Permit abort resource deletion and retrun 202 status code
get_context().set_state('PENDING_DBE_DELETE')
ok, result = r_class.pending_dbe_delete(read_result)
if (not ok and isinstance(result, tuple) and result[0] == 409 and
isinstance(result[1], set)):
# Found back reference to existing enforced or draft resource
exist_hrefs = [self.generate_url(type, uuid)
for type, uuid in result[1]]
msg = "Delete when resource still referred: %s" % exist_hrefs
self.config_object_error(id, None, obj_type, 'http_delete', msg)
raise cfgm_common.exceptions.HttpError(409, msg)
elif ok and isinstance(result, tuple) and result[0] == 202:
# Deletion accepted but not applied, pending delete
# return 202 HTTP OK code to aware clients
bottle.response.status = 202
return
elif not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
# fail if non-default children or non-derived backrefs exist
for child_field in r_class.children_fields:
child_type, is_derived = r_class.children_field_types[child_field]
if is_derived:
continue
child_cls = self.get_resource_class(child_type)
default_child_name = 'default-%s' %(
child_cls(parent_type=obj_type).get_type())
exist_hrefs = []
for child in read_result.get(child_field, []):
if child['to'][-1] in [default_child_name,
POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT]:
continue
exist_hrefs.append(
self.generate_url(child_type, child['uuid']))
if exist_hrefs:
err_msg = 'Delete when children still present: %s' %(
exist_hrefs)
self.config_object_error(
id, None, obj_type, 'http_delete', err_msg)
raise cfgm_common.exceptions.HttpError(409, err_msg)
relaxed_refs = set(db_conn.dbe_get_relaxed_refs(id))
for backref_field in r_class.backref_fields:
backref_type, _, is_derived = \
r_class.backref_field_types[backref_field]
if is_derived:
continue
exist_hrefs = [self.generate_url(backref_type, backref['uuid'])
for backref in read_result.get(backref_field, [])
if backref['uuid'] not in relaxed_refs]
if exist_hrefs:
err_msg = 'Delete when resource still referred: %s' %(
exist_hrefs)
self.config_object_error(
id, None, obj_type, 'http_delete', err_msg)
raise cfgm_common.exceptions.HttpError(409, err_msg)
# State modification starts from here. Ensure that cleanup is done for all state changes
cleanup_on_failure = []
quota_counter = []
def stateful_delete():
get_context().set_state('PRE_DBE_DELETE')
proj_id = r_class.get_project_id_for_resource(read_result, obj_type,
db_conn)
(ok, del_result, zk_del_kwargs) = r_class.pre_dbe_delete(id, read_result, db_conn)
if not ok:
if del_result:
return (ok, del_result)
else:
return (ok, zk_del_kwargs)
zk_del_kwargs = zk_del_kwargs or {}
# Delete default children first
for child_field in r_class.children_fields:
child_type, is_derived = r_class.children_field_types[child_field]
if is_derived:
continue
if child_field in self._GENERATE_DEFAULT_INSTANCE:
self.delete_default_children(child_type, read_result)
callable = getattr(r_class, 'http_delete_fail', None)
if callable:
cleanup_on_failure.append((callable, [id, read_result, db_conn]))
get_context().set_state('DBE_DELETE')
(ok, del_result) = db_conn.dbe_delete(obj_type, id, read_result)
if not ok:
return (ok, del_result)
if proj_id:
(ok, proj_dict) = QuotaHelper.get_project_dict_for_quota(
proj_id, db_conn)
if not ok:
return ok, proj_dict
quota_limit = QuotaHelper.get_quota_limit(proj_dict, obj_type)
path = self._path_prefix + proj_id + "/" + obj_type
if quota_limit > 0:
if self.quota_counter.get(path):
self.quota_counter[path] -= 1
else:
# quota counter obj not initialized
# in this api-server, Init counter
path_prefix = self._path_prefix + proj_id
QuotaHelper._zk_quota_counter_init(
path_prefix, {obj_type : quota_limit},
proj_id, db_conn, self.quota_counter)
if db_conn._zk_db.quota_counter_exists(path):
self.quota_counter[path] -= 1
quota_counter.append(self.quota_counter.get(path))
elif self.quota_counter.get(path):
# quota limit is modified to unlimited
# delete counter object
del self.quota_counter[path]
# type-specific hook
get_context().set_state('POST_DBE_DELETE')
try:
ok, result = r_class.post_dbe_delete(
id, read_result, db_conn, **zk_del_kwargs)
except Exception as e:
ok = False
msg = ("%s:%s post_dbe_delete had an exception: %s\n%s" %
(obj_type, id, str(e),
cfgm_common.utils.detailed_traceback()))
result = (None, msg)
if not ok:
# Delete is done, log to system, no point in informing user
self.config_log(result[1], level=SandeshLevel.SYS_ERR)
return (True, '')
# end stateful_delete
try:
ok, result = stateful_delete()
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
except Exception as e:
ok = False
err_msg = cfgm_common.utils.detailed_traceback()
result = (500, traceback_err_msg(err_msg))
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
if not ok:
self.undo(result, obj_type, id=id, counter=quota_counter, value=1)
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_delete' %(obj_type), id, read_result)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_delete an extension had error for %s' \
%(obj_type, id)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# end http_resource_delete
@log_api_stats
def http_resource_list(self, obj_type):
resource_type, r_class = self._validate_resource_type(obj_type)
db_conn = self._db_conn
env = get_request().headers.environ
parent_uuids = None
back_ref_uuids = None
obj_uuids = None
pagination = {}
if 'parent_fq_name_str' in get_request().query:
parent_uuids = []
parent_fq_name = get_request().query.parent_fq_name_str.split(':')
parent_types = r_class.parent_types
if 'parent_type' in get_request().query:
parent_types = [get_request().query.parent_type]
for parent_type in parent_types:
_, p_class = self._validate_resource_type(parent_type)
try:
parent_uuids.append(
self._db_conn.fq_name_to_uuid(p_class.object_type,
parent_fq_name),
)
except cfgm_common.exceptions.NoIdError:
pass
elif 'parent_id' in get_request().query:
parent_uuids = get_request().query.parent_id.split(',')
if 'back_ref_id' in get_request().query:
back_ref_uuids = get_request().query.back_ref_id.split(',')
if 'obj_uuids' in get_request().query:
obj_uuids = get_request().query.obj_uuids.split(',')
if 'fq_names' in get_request().query:
obj_fqn_strs = get_request().query.fq_names.split(',')
obj_uuid = None
for obj_fqn_str in obj_fqn_strs:
try:
obj_fqn = obj_fqn_str.split(':')
obj_uuid = self._db_conn.fq_name_to_uuid(obj_type, obj_fqn)
if obj_uuids is None:
obj_uuids = []
obj_uuids.append(obj_uuid)
except cfgm_common.exceptions.NoIdError as e:
pass
if obj_uuids is None:
return {'%ss' %(resource_type): []}
if 'page_marker' in get_request().query:
pagination['marker'] = self._validate_page_marker(
get_request().query['page_marker'])
if 'page_limit' in get_request().query:
pagination['limit'] = self._validate_page_limit(
get_request().query['page_limit'])
# common handling for all resource get
for parent_uuid in list(parent_uuids or []):
(ok, result) = self._get_common(get_request(), parent_uuid)
if not ok:
parent_uuids.remove(parent_uuid)
if obj_uuids is None and back_ref_uuids is None and parent_uuids == []:
return {'%ss' %(resource_type): []}
if 'count' in get_request().query:
is_count = 'true' in get_request().query.count.lower()
else:
is_count = False
if 'detail' in get_request().query:
is_detail = 'true' in get_request().query.detail.lower()
else:
is_detail = False
if 'fields' in get_request().query:
req_fields = get_request().query.fields.split(',')
else:
req_fields = []
if 'shared' in get_request().query:
include_shared = 'true' in get_request().query.shared.lower()
else:
include_shared = False
try:
filters = utils.get_filters(get_request().query.filters)
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid filter ' + get_request().query.filters)
if 'exclude_hrefs' in get_request().query:
exclude_hrefs = True
else:
exclude_hrefs = False
return self._list_collection(obj_type, parent_uuids, back_ref_uuids,
obj_uuids, is_count, is_detail, filters,
req_fields, include_shared, exclude_hrefs,
pagination)
# end http_resource_list
# internal_request_<oper> - handlers of internally generated requests
# that save-ctx, generate-ctx and restore-ctx
def internal_request_create(self, resource_type, obj_json):
object_type = self.get_resource_class(resource_type).object_type
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/%ss' %(resource_type),
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
json_as_dict = {'%s' %(resource_type): obj_json}
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
json_as_dict, None)
set_context(context.ApiContext(internal_req=i_req))
resp = self.http_resource_create(object_type)
return True, resp
finally:
set_context(orig_context)
# end internal_request_create
def internal_request_update(self, resource_type, obj_uuid, obj_json):
r_class = self.get_resource_class(resource_type)
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/%ss' %(r_class.resource_type),
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
json_as_dict = {'%s' %(r_class.resource_type): obj_json}
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
json_as_dict, None)
set_context(context.ApiContext(internal_req=i_req))
self.http_resource_update(r_class.object_type, obj_uuid)
return True, ""
finally:
set_context(orig_context)
# end internal_request_update
def internal_request_delete(self, resource_type, obj_uuid):
object_type = self.get_resource_class(resource_type).object_type
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/%s/%s' %(resource_type, obj_uuid),
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
None, None)
set_context(context.ApiContext(internal_req=i_req))
self.http_resource_delete(object_type, obj_uuid)
return True, ""
finally:
set_context(orig_context)
# end internal_request_delete
def internal_request_ref_update(self, res_type, obj_uuid, operation,
ref_res_type, ref_uuid=None,
ref_fq_name=None, attr=None,
relax_ref_for_delete=False):
req_dict = {'type': res_type,
'uuid': obj_uuid,
'operation': operation,
'ref-type': ref_res_type,
'ref-uuid': ref_uuid,
'ref-fq-name': ref_fq_name,
'attr': attr,
'relax_ref_for_delete': relax_ref_for_delete}
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/ref-update',
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
req_dict, None)
set_context(context.ApiContext(internal_req=i_req))
self.ref_update_http_post()
return True, ""
finally:
set_context(orig_context)
# end internal_request_ref_update
def internal_request_prop_collection(self, obj_uuid, updates=None):
req_dict = {
'uuid': obj_uuid,
'updates': updates or [],
}
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/ref-update',
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
req_dict, None)
set_context(context.ApiContext(internal_req=i_req))
self.prop_collection_http_post()
return True, ''
finally:
set_context(orig_context)
def alloc_vn_id(self, fq_name_str):
return self._db_conn._zk_db.alloc_vn_id(fq_name_str)
def alloc_security_group_id(self, fq_name_str):
return self._db_conn._zk_db.alloc_sg_id(fq_name_str)
def alloc_tag_value_id(self, tag_type, name):
return self._db_conn._zk_db.alloc_tag_value_id(tag_type, name)
def create_default_children(self, object_type, parent_obj):
childs = self.get_resource_class(object_type).children_field_types
# Create a default child only if provisioned for
child_types = {type for _, (type, derivate) in list(childs.items())
if (not derivate and
type in self._GENERATE_DEFAULT_INSTANCE)}
if not child_types:
return True, ''
for child_type in child_types:
child_cls = self.get_resource_class(child_type)
child_obj_type = child_cls.object_type
child_obj = child_cls(parent_obj=parent_obj)
child_dict = child_obj.__dict__
child_dict['id_perms'] = self._get_default_id_perms()
child_dict['perms2'] = self._get_default_perms2()
(ok, result) = self._db_conn.dbe_alloc(child_obj_type, child_dict)
if not ok:
return (ok, result)
obj_id = result
# For virtual networks, allocate an ID
if child_obj_type == 'virtual_network':
child_dict['virtual_network_network_id'] = self.alloc_vn_id(
child_obj.get_fq_name_str())
(ok, result) = self._db_conn.dbe_create(child_obj_type, obj_id,
child_dict)
if not ok:
# DB Create failed, log and stop further child creation.
err_msg = "DB Create failed creating %s" % child_type
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
return (ok, result)
# recurse down type hierarchy
ok, result = self.create_default_children(child_obj_type,
child_obj)
if not ok:
return False, result
return True, ''
# end create_default_children
def delete_default_children(self, resource_type, parent_dict):
r_class = self.get_resource_class(resource_type)
for child_field in r_class.children_fields:
# Delete a default child only if provisioned for
child_type, is_derived = r_class.children_field_types[child_field]
if child_type not in self._GENERATE_DEFAULT_INSTANCE:
continue
child_cls = self.get_resource_class(child_type)
# first locate default child then delete it")
default_child_name = 'default-%s' %(child_type)
child_infos = parent_dict.get(child_field, [])
for child_info in child_infos:
if child_info['to'][-1] == default_child_name:
default_child_id = child_info['uuid']
self.http_resource_delete(child_type, default_child_id)
break
# end delete_default_children
@classmethod
def _generate_resource_crud_methods(cls, obj):
for object_type, _ in all_resource_type_tuples:
create_method = functools.partial(obj.http_resource_create,
object_type)
functools.update_wrapper(create_method, obj.http_resource_create)
setattr(obj, '%ss_http_post' %(object_type), create_method)
read_method = functools.partial(obj.http_resource_read,
object_type)
functools.update_wrapper(read_method, obj.http_resource_read)
setattr(obj, '%s_http_get' %(object_type), read_method)
update_method = functools.partial(obj.http_resource_update,
object_type)
functools.update_wrapper(update_method, obj.http_resource_update)
setattr(obj, '%s_http_put' %(object_type), update_method)
delete_method = functools.partial(obj.http_resource_delete,
object_type)
functools.update_wrapper(delete_method, obj.http_resource_delete)
setattr(obj, '%s_http_delete' %(object_type), delete_method)
list_method = functools.partial(obj.http_resource_list,
object_type)
functools.update_wrapper(list_method, obj.http_resource_list)
setattr(obj, '%ss_http_get' %(object_type), list_method)
# end _generate_resource_crud_methods
@classmethod
def _generate_resource_crud_uri(cls, obj):
for object_type, resource_type in all_resource_type_tuples:
# CRUD + list URIs of the form
# obj.route('/virtual-network/<id>', 'GET', obj.virtual_network_http_get)
# obj.route('/virtual-network/<id>', 'PUT', obj.virtual_network_http_put)
# obj.route('/virtual-network/<id>', 'DELETE', obj.virtual_network_http_delete)
# obj.route('/virtual-networks', 'POST', obj.virtual_networks_http_post)
# obj.route('/virtual-networks', 'GET', obj.virtual_networks_http_get)
# leaf resource
obj.route('/%s/<id>' %(resource_type),
'GET',
getattr(obj, '%s_http_get' %(object_type)))
obj.route('/%s/<id>' %(resource_type),
'PUT',
getattr(obj, '%s_http_put' %(object_type)))
obj.route('/%s/<id>' %(resource_type),
'DELETE',
getattr(obj, '%s_http_delete' %(object_type)))
# collection of leaf
obj.route('/%ss' %(resource_type),
'POST',
getattr(obj, '%ss_http_post' %(object_type)))
obj.route('/%ss' %(resource_type),
'GET',
getattr(obj, '%ss_http_get' %(object_type)))
# end _generate_resource_crud_uri
def __init__(self, args_str=None):
self._db_conn = None
self._resource_classes = initialize_all_server_resource_classes(self)
self._args = None
self._path_prefix = _DEFAULT_ZK_COUNTER_PATH_PREFIX
self.quota_counter = {}
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
self.lock_path_prefix = '%s/%s' % (self._args.cluster_id,
_DEFAULT_ZK_LOCK_PATH_PREFIX)
self.security_lock_prefix = '%s/security' % self.lock_path_prefix
# set the max size of the api requests
bottle.BaseRequest.MEMFILE_MAX = self._args.max_request_size
# multi_tenancy is ignored if aaa_mode is configured by user
if self._args.aaa_mode is not None:
if self.aaa_mode not in AAA_MODE_VALID_VALUES:
self.aaa_mode = AAA_MODE_DEFAULT_VALUE
elif self._args.multi_tenancy is not None:
# MT configured by user - determine from aaa-mode
self.aaa_mode = "cloud-admin" if self._args.multi_tenancy else "no-auth"
else:
self.aaa_mode = "cloud-admin"
api_proto = 'https' if self._args.config_api_ssl_enable else 'http'
api_host_name = socket.getfqdn(self._args.listen_ip_addr)
self._base_url = "%s://%s:%s" % (api_proto, api_host_name,
self._args.listen_port)
# Generate LinkObjects for all entities
links = []
# Link for root
links.append(LinkObject('root', self._base_url , '/config-root',
'config-root'))
for _, resource_type in all_resource_type_tuples:
link = LinkObject('collection',
self._base_url , '/%ss' %(resource_type),
'%s' %(resource_type))
links.append(link)
for _, resource_type in all_resource_type_tuples:
link = LinkObject('resource-base',
self._base_url , '/%s' %(resource_type),
'%s' %(resource_type))
links.append(link)
self._homepage_links = links
self._pipe_start_app = None
#GreenletProfiler.set_clock_type('wall')
self._profile_info = None
for act_res in _ACTION_RESOURCES:
link = LinkObject('action', self._base_url, act_res['uri'],
act_res['link_name'], act_res['method'])
self._homepage_links.append(link)
# Register for VN delete request. Disallow delete of system default VN
self.route('/virtual-network/<id>', 'DELETE', self.virtual_network_http_delete)
self.route('/documentation/<filename:path>',
'GET', self.documentation_http_get)
self._homepage_links.insert(
0, LinkObject('documentation', self._base_url,
'/documentation/index.html',
'documentation', 'GET'))
# APIs to reserve/free block of IP address from a VN/Subnet
self.route('/virtual-network/<id>/ip-alloc',
'POST', self.vn_ip_alloc_http_post)
self._homepage_links.append(
LinkObject('action', self._base_url,
'/virtual-network/%s/ip-alloc',
'virtual-network-ip-alloc', 'POST'))
self.route('/virtual-network/<id>/ip-free',
'POST', self.vn_ip_free_http_post)
self._homepage_links.append(
LinkObject('action', self._base_url,
'/virtual-network/%s/ip-free',
'virtual-network-ip-free', 'POST'))
# APIs to find out number of ip instances from given VN subnet
self.route('/virtual-network/<id>/subnet-ip-count',
'POST', self.vn_subnet_ip_count_http_post)
self._homepage_links.append(
LinkObject('action', self._base_url,
'/virtual-network/%s/subnet-ip-count',
'virtual-network-subnet-ip-count', 'POST'))
# Enable/Disable aaa mode
self.route('/aaa-mode', 'GET', self.aaa_mode_http_get)
self.route('/aaa-mode', 'PUT', self.aaa_mode_http_put)
# Set Tag actions
self.route('/set-tag', 'POST', self.set_tag)
self._homepage_links.append(
LinkObject('action', self._base_url, '/set-tag', 'set-tag',
'POST'))
# Commit or discard draft security policy
self.route('/security-policy-draft', 'POST',
self.security_policy_draft)
self._homepage_links.append(
LinkObject('action', self._base_url, '/security-policy-draft',
'security-policy-draft', 'POST'))
# randomize the collector list
self._random_collectors = self._args.collectors
self._chksum = "";
if self._args.collectors:
self._chksum = hashlib.md5(''.join(self._args.collectors)).hexdigest()
self._random_collectors = random.sample(self._args.collectors, \
len(self._args.collectors))
# sandesh init
self._sandesh = Sandesh()
# Reset the sandesh send rate limit value
if self._args.sandesh_send_rate_limit is not None:
SandeshSystem.set_sandesh_send_rate_limit(
self._args.sandesh_send_rate_limit)
module = Module.API_SERVER
module_name = ModuleNames[Module.API_SERVER]
node_type = Module2NodeType[module]
node_type_name = NodeTypeNames[node_type]
self.table = "ObjectConfigNode"
if self._args.worker_id:
instance_id = self._args.worker_id
else:
instance_id = INSTANCE_ID_DEFAULT
hostname = socket.getfqdn(self._args.listen_ip_addr)
self._sandesh.init_generator(module_name, hostname,
node_type_name, instance_id,
self._random_collectors,
'vnc_api_server_context',
int(self._args.http_server_port),
['cfgm_common', 'vnc_cfg_api_server.sandesh'],
logger_class=self._args.logger_class,
logger_config_file=self._args.logging_conf,
config=self._args.sandesh_config)
self._sandesh.trace_buffer_create(name="VncCfgTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="RestApiTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="DBRequestTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="DBUVERequestTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="MessageBusNotifyTraceBuf",
size=1000)
VncGreenlet.register_sandesh_handler()
self._sandesh.set_logging_params(
enable_local_log=self._args.log_local,
category=self._args.log_category,
level=self._args.log_level,
file=self._args.log_file,
enable_syslog=self._args.use_syslog,
syslog_facility=self._args.syslog_facility)
ConnectionState.init(self._sandesh, hostname, module_name,
instance_id,
staticmethod(ConnectionState.get_conn_state_cb),
NodeStatusUVE, NodeStatus, self.table)
# Address Management interface
addr_mgmt = vnc_addr_mgmt.AddrMgmt(self)
self._addr_mgmt = addr_mgmt
self._default_domain = None
self._default_project = None
# DB interface initialization
if self._args.wipe_config:
self._db_connect(True)
else:
self._db_connect(self._args.reset_config)
self._db_init_entries()
self._initialize_quota_counters()
# API/Permissions check
# after db init (uses db_conn)
self._rbac = vnc_rbac.VncRbac(self, self._db_conn)
self._permissions = vnc_perms.VncPermissions(self, self._args)
if self.is_rbac_enabled():
self._create_default_rbac_rule()
if self.is_auth_needed():
self._generate_obj_view_links()
if os.path.exists('/usr/bin/contrail-version'):
cfgm_cpu_uve = ModuleCpuState()
cfgm_cpu_uve.name = socket.getfqdn(self._args.listen_ip_addr)
cfgm_cpu_uve.config_node_ip = self.get_server_ip()
command = "contrail-version contrail-config | grep 'contrail-config'"
version = os.popen(command).read()
if version:
version = version.split()
if not version or len(version) != 3:
# In case of setup from source there is no RPM and version is available in env
version_str = os.environ.get('CONTRAIL_VERSION', 'tf-master-latest')
version = re.split('\.|-', version_str)[-3:]
if len(version) != 3:
# in CI version is like changedid-changeset
version = ['tf', version_str, version_str]
_, build_id, build_num = version[:3]
cfgm_cpu_uve.build_info = build_info + '"build-id" : "' + \
build_id + '", "build-number" : "' + \
build_num + '"}]}'
cpu_info_trace = ModuleCpuStateTrace(data=cfgm_cpu_uve, sandesh=self._sandesh)
cpu_info_trace.send(sandesh=self._sandesh)
self.re_uuid = re.compile('^[0-9A-F]{8}-?[0-9A-F]{4}-?4[0-9A-F]{3}-?[89AB][0-9A-F]{3}-?[0-9A-F]{12}$',
re.IGNORECASE)
# Load extensions
self._extension_mgrs = {}
self._load_extensions()
# Authn/z interface
if self._args.auth == 'keystone':
auth_svc = vnc_auth_keystone.AuthServiceKeystone(self, self._args)
else:
auth_svc = vnc_auth.AuthService(self, self._args)
self._pipe_start_app = auth_svc.get_middleware_app()
self._auth_svc = auth_svc
if int(self._args.worker_id) == 0:
try:
self._extension_mgrs['resync'].map(
self._resync_domains_projects)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
self._global_asn = None
self._enable_4byte_as = None
# api server list info
self._config_node_list = []
# create amqp handle
self._amqp_client = self.initialize_amqp_client()
# end __init__
def _initialize_quota_counters(self):
ok, result, _ = self._db_conn.dbe_list(
'project', field_names=['quota'])
if not ok:
raise cfgm_common.exceptions.HttpError(result[0], result[1])
projects = result
for project in projects or []:
if project.get('quota'):
path_prefix = self._path_prefix + project['uuid']
try:
QuotaHelper._zk_quota_counter_init(
path_prefix,
QuotaHelper.get_quota_limits(project),
project['uuid'],
self._db_conn,
self.quota_counter)
except NoIdError as e:
msg = ("Error in initializing quota Internal error: %s" %
str(e))
self.config_log(msg, level=SandeshLevel.SYS_ERR)
def initialize_amqp_client(self):
amqp_client = None
use_ssl = None
try:
if self._args.rabbit_use_ssl is not None:
use_ssl = str(self._args.rabbit_use_ssl).lower() == 'true'
# prepare rabbitMQ params
rabbitmq_cfg = AttrDict(
servers=self._args.rabbit_server,
port=self._args.rabbit_port,
user=self._args.rabbit_user,
password=self._args.rabbit_password,
vhost=self._args.rabbit_vhost,
ha_mode=self._args.rabbit_ha_mode,
use_ssl=use_ssl,
ssl_version=self._args.kombu_ssl_version,
ssl_keyfile=self._args.kombu_ssl_keyfile,
ssl_certfile=self._args.kombu_ssl_certfile,
ssl_ca_certs=self._args.kombu_ssl_ca_certs
)
amqp_client = KombuAmqpClient(self.config_log, rabbitmq_cfg,
heartbeat=self.get_rabbit_health_check_interval())
amqp_client.add_exchange(self.JOB_REQUEST_EXCHANGE, type="direct")
amqp_client.run()
# add dummy consumer to initialize the consumer greenlet
amqp_client.add_exchange('amqp_request_exchange', type='direct')
consumer = 'amqp_request.%s.dummy' % socket.getfqdn()
amqp_client.add_consumer(consumer, 'amqp_request_exchange',
routing_key='amqp.request.dummy', auto_delete=True)
except Exception as e:
err_msg = "Error while initializing the AMQP client %s " % repr(e)
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
if amqp_client is not None:
amqp_client.stop()
return None
return amqp_client
@property
def global_autonomous_system(self):
if not self._global_asn:
gsc_class = self.get_resource_class(GlobalSystemConfig.object_type)
ok, result = gsc_class.locate(uuid=self._gsc_uuid, create_it=False,
fields=['autonomous_system'])
if not ok:
msg = ("Cannot fetch Global System Config to obtain "
"autonomous system")
raise cfgm_common.exceptions.VncError(msg)
self._global_asn = result['autonomous_system']
return self._global_asn
@global_autonomous_system.setter
def global_autonomous_system(self, asn):
self._global_asn = asn
@property
def enable_4byte_as(self):
if not self._enable_4byte_as:
gsc_class = self.get_resource_class(GlobalSystemConfig.object_type)
ok, result = gsc_class.locate(uuid=self._gsc_uuid, create_it=False,
fields=['enable_4byte_as'])
if not ok:
msg = ("Cannot fetch Global System Config to obtain "
"enable_4byte_as flag")
raise cfgm_common.exceptions.VncError(msg)
self._enable_4byte_as = result.get('enable_4byte_as', False)
return self._enable_4byte_as
@enable_4byte_as.setter
def enable_4byte_as(self, enable_4byte_as):
self._enable_4byte_as = enable_4byte_as
@property
def default_domain(self):
if not self._default_domain:
domain_class = self.get_resource_class(Domain.object_type)
ok, result = domain_class.locate(
fq_name=Domain().fq_name, create_it=False)
if not ok:
msg = ("Cannot fetch default domain")
raise cfgm_common.exceptions.VncError(msg)
self._default_domain = result
return self._default_domain
@default_domain.setter
def default_domain(self, default_domain):
self._default_domain = default_domain
@property
def default_project(self):
if not self._default_project:
project_class = self.get_resource_class(Project.object_type)
ok, result = project_class.locate(
fq_name=Project().fq_name, create_it=False)
if not ok:
msg = ("Cannot fetch default project")
raise cfgm_common.exceptions.VncError(msg)
self._default_project = result
return self._default_project
@default_project.setter
def default_project(self, default_project):
self._default_project = default_project
def _extensions_transform_request(self, request):
extensions = self._extension_mgrs.get('resourceApi')
if not extensions or not extensions.names():
return None
return extensions.map_method(
'transform_request', request)
# end _extensions_transform_request
def _extensions_validate_request(self, request):
extensions = self._extension_mgrs.get('resourceApi')
if not extensions or not extensions.names():
return None
return extensions.map_method(
'validate_request', request)
# end _extensions_validate_request
def _extensions_transform_response(self, request, response):
extensions = self._extension_mgrs.get('resourceApi')
if not extensions or not extensions.names():
return None
return extensions.map_method(
'transform_response', request, response)
# end _extensions_transform_response
@ignore_exceptions
def _generate_rest_api_request_trace(self):
method = get_request().method.upper()
if method == 'GET':
return None
req_id = get_request().headers.get('X-Request-Id',
'req-%s' %(str(uuid.uuid4())))
gevent.getcurrent().trace_request_id = req_id
url = get_request().url
if method == 'DELETE':
req_data = ''
else:
try:
req_data = json.dumps(get_request().json)
except Exception as e:
req_data = '%s: Invalid request body' %(e)
rest_trace = RestApiTrace(request_id=req_id)
rest_trace.url = url
rest_trace.method = method
rest_trace.request_data = req_data
# Also log keystone response time against this request id,
# before returning the trace message.
if ((get_context().get_keystone_response_time()) is not None):
response_time = get_context().get_keystone_response_time()
response_time_in_usec = ((response_time.days*24*60*60) +
(response_time.seconds*1000000) +
response_time.microseconds)
stats = VncApiLatencyStats(
operation_type='VALIDATE',
application='KEYSTONE',
response_time_in_usec=response_time_in_usec,
response_size=0,
identifier=req_id,
)
stats_log = VncApiLatencyStatsLog(node_name="issu-vm6", api_latency_stats=stats, sandesh=self._sandesh)
x=stats_log.send(sandesh=self._sandesh)
return rest_trace
# end _generate_rest_api_request_trace
@ignore_exceptions
def _generate_rest_api_response_trace(self, rest_trace, response):
if not rest_trace:
return
rest_trace.status = bottle.response.status
rest_trace.response_body = json.dumps(response)
rest_trace.trace_msg(name='RestApiTraceBuf', sandesh=self._sandesh)
# end _generate_rest_api_response_trace
# Public Methods
def route(self, uri, method, handler):
@use_context
def handler_trap_exception(*args, **kwargs):
try:
trace = None
self._extensions_transform_request(get_request())
self._extensions_validate_request(get_request())
trace = self._generate_rest_api_request_trace()
(ok, status) = self._rbac.validate_request(get_request())
if not ok:
(code, err_msg) = status
raise cfgm_common.exceptions.HttpError(code, err_msg)
response = handler(*args, **kwargs)
self._generate_rest_api_response_trace(trace, response)
self._extensions_transform_response(get_request(), response)
return response
except Exception as e:
if trace:
trace.trace_msg(name='RestApiTraceBuf',
sandesh=self._sandesh)
# don't log details of cfgm_common.exceptions.HttpError i.e handled error cases
if isinstance(e, cfgm_common.exceptions.HttpError):
bottle.abort(e.status_code, e.content)
else:
string_buf = StringIO()
cgitb_hook(file=string_buf, format="text")
err_msg = string_buf.getvalue()
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
raise
self.api_bottle.route(uri, method, handler_trap_exception)
# end route
def get_args(self):
return self._args
# end get_args
def get_server_ip(self):
ip_list = []
for i in netifaces.interfaces():
try:
if netifaces.AF_INET in netifaces.ifaddresses(i):
addr = netifaces.ifaddresses(i)[netifaces.AF_INET][0][
'addr']
if addr != '127.0.0.1' and addr not in ip_list:
ip_list.append(addr)
except ValueError as e:
self.config_log("Skipping interface %s: %s" % (i, str(e)),
level=SandeshLevel.SYS_DEBUG)
return ip_list
# end get_server_ip
def get_listen_ip(self):
return self._args.listen_ip_addr
# end get_listen_ip
def get_server_port(self):
return self._args.listen_port
# end get_server_port
def get_enable_ssl(self):
return self._args.config_api_ssl_enable
# end get_enable_ssl
def get_keyfile(self):
return self._args.config_api_ssl_keyfile
# end get_keyfile
def get_certfile(self):
return self._args.config_api_ssl_certfile
# end get_certfile
def get_ca_cert(self):
return self._args.config_api_ssl_ca_cert
# end get_ca_cert
def get_worker_id(self):
return int(self._args.worker_id)
# end get_worker_id
def get_pipe_start_app(self):
return self._pipe_start_app
# end get_pipe_start_app
def get_rabbit_health_check_interval(self):
return float(self._args.rabbit_health_check_interval)
# end get_rabbit_health_check_interval
@staticmethod
def path_in_white_list(path=None):
if not path:
path = get_context().path
return _WHITE_LIST_URI_REGEX.match(path) is not None
def is_auth_disabled(self):
return self._args.auth is None or self._args.auth.lower() != 'keystone'
def is_admin_request(self):
if not self.is_auth_needed():
return True
if is_internal_request():
return True
env = bottle.request.headers.environ
roles = []
for field in ('HTTP_X_API_ROLE', 'HTTP_X_ROLE'):
if field in env:
roles.extend(env[field].split(','))
return has_role(self.cloud_admin_role, roles)
def get_auth_headers_from_token(self, request, token):
if self.is_auth_disabled() or not self.is_auth_needed():
return {}
return self._auth_svc.get_auth_headers_from_token(request, token)
# end get_auth_headers_from_token
def _generate_obj_view_links(self):
for object_type, resource_type in all_resource_type_tuples:
r_class = self.get_resource_class(resource_type)
r_class.obj_links = (r_class.ref_fields | r_class.backref_fields | r_class.children_fields)
# Check for the system created VN. Disallow such VN delete
def virtual_network_http_delete(self, id):
db_conn = self._db_conn
# if obj doesn't exist return early
try:
obj_type = db_conn.uuid_to_obj_type(id)
if obj_type != 'virtual_network':
raise cfgm_common.exceptions.HttpError(
404, 'No virtual-network object found for id %s' %(id))
vn_name = db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'ID %s does not exist' %(id))
if (vn_name == cfgm_common.IP_FABRIC_VN_FQ_NAME or
vn_name == cfgm_common.LINK_LOCAL_VN_FQ_NAME):
raise cfgm_common.exceptions.HttpError(
409,
'Can not delete system created default virtual-network '+id)
super(VncApiServer, self).virtual_network_http_delete(id)
# end
@use_context
def homepage_http_get(self):
json_body = {}
json_links = []
# strip trailing '/' in url
url = get_request().url[:-1]
url = url.replace('<script>', '<!--script>')
url = url.replace('</script>', '</script-->')
for link in self._homepage_links:
# strip trailing '/' in url
json_links.append(
{'link': link.to_dict(with_url=url)}
)
json_body = {"href": url, "links": json_links}
return json_body
# end homepage_http_get
def documentation_http_get(self, filename):
# ubuntu packaged path
doc_root = '/usr/share/doc/contrail-config/doc/source/contrail-config/html/'
if not os.path.exists(doc_root):
# centos packaged path
doc_root='/usr/share/doc/python-vnc_cfg_api_server/contrial-config/html/'
return bottle.static_file(
filename,
root=doc_root)
# end documentation_http_get
def obj_perms_http_get(self):
if self.is_auth_disabled() or not self.is_auth_needed():
result = {
'token_info': None,
'is_cloud_admin_role': False,
'is_global_read_only_role': False,
'permissions': 'RWX'
}
return result
obj_uuid = None
if 'uuid' in get_request().query:
obj_uuid = get_request().query.uuid
ok, result = self._auth_svc.validate_user_token()
if not ok:
code, msg = result
self.config_object_error(obj_uuid, None, None,
'obj_perms_http_get', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
token_info = result
# roles in result['token_info']['access']['user']['roles']
result = {'token_info': token_info}
# Handle v2 and v3 responses
roles_list = []
if 'access' in token_info:
roles_list = [roles['name'] for roles in
token_info['access']['user']['roles']]
elif 'token' in token_info:
roles_list = [roles['name'] for roles in
token_info['token']['roles']]
result['is_cloud_admin_role'] = has_role(self.cloud_admin_role,
roles_list)
result['is_global_read_only_role'] = has_role(
self.global_read_only_role, roles_list)
if obj_uuid:
result['permissions'] = self._permissions.obj_perms(get_request(),
obj_uuid)
if 'token' in list(token_info.keys()):
if 'project' in list(token_info['token'].keys()):
domain = None
try:
domain = token_info['token']['project']['domain']['id']
domain = str(uuid.UUID(domain))
except ValueError as TypeError:
if domain == self._args.default_domain_id:
domain = 'default-domain'
domain = self._db_conn.fq_name_to_uuid('domain', [domain])
if domain:
domain = domain.replace('-', '')
token_info['token']['project']['domain']['id'] = domain
return result
# end obj_perms_http_get
def invalid_uuid(self, uuid):
return self.re_uuid.match(uuid) is None
def invalid_access(self, access):
return type(access) is not int or access not in list(range(0, 8))
def invalid_share_type(self, share_type):
return share_type not in cfgm_common.PERMS2_VALID_SHARE_TYPES
# change ownership of an object
def obj_chown_http_post(self):
obj_uuid = get_request().json.get('uuid')
owner = get_request().json.get('owner')
if obj_uuid is None:
msg = "Bad Request, no resource UUID provided to chown"
raise cfgm_common.exceptions.HttpError(400, msg)
if owner is None:
msg = "Bad Request, no owner UUID provided to chown"
raise cfgm_common.exceptions.HttpError(400, msg)
if self.invalid_uuid(obj_uuid):
msg = "Bad Request, invalid resource UUID"
raise cfgm_common.exceptions.HttpError(400, msg)
if self.invalid_uuid(owner):
msg = "Bad Request, invalid owner UUID"
raise cfgm_common.exceptions.HttpError(400, msg)
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
self._ensure_services_conn('chown', obj_type, obj_uuid=obj_uuid)
# ensure user has RW permissions to object
perms = self._permissions.obj_perms(get_request(), obj_uuid)
if not 'RW' in perms:
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
try:
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=['perms2'])
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
obj_dict['perms2']['owner'] = owner
self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict)
msg = "chown: %s owner set to %s" % (obj_uuid, owner)
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
return {}
#end obj_chown_http_post
def dump_cache(self):
self._post_common(None, {})
req_dict = get_request().json or {}
obj_uuids = req_dict.get('uuids', [])
count = req_dict.get('count', 10)
return self._db_conn._object_db._obj_cache_mgr.dump_cache(
obj_uuids=obj_uuids, count=count)
# chmod for an object
def obj_chmod_http_post(self):
try:
obj_uuid = get_request().json['uuid']
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
if self.invalid_uuid(obj_uuid):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid object id")
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
self._ensure_services_conn('chmod', obj_type, obj_uuid=obj_uuid)
# ensure user has RW permissions to object
perms = self._permissions.obj_perms(get_request(), obj_uuid)
if not 'RW' in perms:
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
request_params = get_request().json
owner = request_params.get('owner')
share = request_params.get('share')
owner_access = request_params.get('owner_access')
global_access = request_params.get('global_access')
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=['perms2', 'is_shared'])
obj_perms = obj_dict['perms2']
old_perms = '%s/%d %d %s' % (obj_perms['owner'],
obj_perms['owner_access'], obj_perms.get('global_access', 0),
['%s:%d' % (item['tenant'], item['tenant_access']) for item in
obj_perms.get('share', [])])
if owner:
if self.invalid_uuid(owner):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid owner")
obj_perms['owner'] = owner.replace('-','')
if owner_access is not None:
if self.invalid_access(owner_access):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid owner_access value")
obj_perms['owner_access'] = owner_access
if share is not None:
try:
for item in share:
"""
item['tenant'] := [<share_type>:] <uuid>
share_type := ['domain' | 'tenant']
"""
(share_type, share_id) = cfgm_common.utils.shareinfo_from_perms2_tenant(item['tenant'])
if self.invalid_share_type(share_type) or self.invalid_uuid(share_id) or self.invalid_access(item['tenant_access']):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid share list")
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
obj_perms['share'] = share
if global_access is not None:
if self.invalid_access(global_access):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid global_access value")
obj_perms['global_access'] = global_access
obj_dict['is_shared'] = (global_access != 0)
new_perms = '%s/%d %d %s' % (obj_perms['owner'],
obj_perms['owner_access'], obj_perms.get('global_access', 0),
['%s:%d' % (item['tenant'], item['tenant_access']) for item in
obj_perms.get('share', [])])
self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict)
msg = "chmod: %s perms old=%s, new=%s" % (obj_uuid, old_perms, new_perms)
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
return {}
# end obj_chmod_http_post
def prop_collection_http_get(self):
if 'uuid' not in get_request().query:
raise cfgm_common.exceptions.HttpError(
400, 'Object uuid needed for property collection get')
obj_uuid = get_request().query.uuid
if 'fields' not in get_request().query:
raise cfgm_common.exceptions.HttpError(
400, 'Object fields needed for property collection get')
obj_fields = get_request().query.fields.split(',')
if 'position' in get_request().query:
fields_position = get_request().query.position
else:
fields_position = None
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Object Not Found: ' + obj_uuid)
resource_class = self.get_resource_class(obj_type)
for obj_field in obj_fields:
if ((obj_field not in resource_class.prop_list_fields) and
(obj_field not in resource_class.prop_map_fields)):
err_msg = '%s neither "ListProperty" nor "MapProperty"' %(
obj_field)
raise cfgm_common.exceptions.HttpError(400, err_msg)
# request validations over
# common handling for all resource get
(ok, result) = self._get_common(get_request(), obj_uuid)
if not ok:
(code, msg) = result
self.config_object_error(
obj_uuid, None, None, 'prop_collection_http_get', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
try:
ok, result = self._db_conn.prop_collection_get(
obj_type, obj_uuid, obj_fields, fields_position)
if not ok:
self.config_object_error(
obj_uuid, None, None, 'prop_collection_http_get', result)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
if not ok:
raise cfgm_common.exceptions.HttpError(500, result)
# check visibility
if (not result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(
id, None, None, 'prop_collection_http_get', result)
raise cfgm_common.exceptions.HttpError(404, result)
# Prepare response
del result['id_perms']
return result
# end prop_collection_http_get
def prop_collection_http_post(self):
request_params = get_request().json
# validate each requested operation
obj_uuid = request_params.get('uuid')
if not obj_uuid:
err_msg = 'Error: prop_collection_update needs obj_uuid'
raise cfgm_common.exceptions.HttpError(400, err_msg)
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Object Not Found: ' + obj_uuid)
r_class = self.get_resource_class(obj_type)
for req_param in request_params.get('updates') or []:
obj_field = req_param.get('field')
if obj_field in r_class.prop_list_fields:
prop_coll_type = 'list'
elif obj_field in r_class.prop_map_fields:
prop_coll_type = 'map'
else:
err_msg = '%s neither "ListProperty" nor "MapProperty"' %(
obj_field)
raise cfgm_common.exceptions.HttpError(400, err_msg)
req_oper = req_param.get('operation').lower()
field_val = req_param.get('value')
field_pos = str(req_param.get('position'))
prop_type = r_class.prop_field_types[obj_field]['xsd_type']
prop_cls = cfgm_common.utils.str_to_class(prop_type, __name__)
prop_val_type = prop_cls.attr_field_type_vals[prop_cls.attr_fields[0]]['attr_type']
prop_val_cls = cfgm_common.utils.str_to_class(prop_val_type, __name__)
try:
self._validate_complex_type(prop_val_cls, field_val)
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
if prop_coll_type == 'list':
if req_oper not in ('add', 'modify', 'delete'):
err_msg = 'Unsupported operation %s in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
if ((req_oper == 'add') and field_val is None):
err_msg = 'Add needs field value in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
elif ((req_oper == 'modify') and
None in (field_val, field_pos)):
err_msg = 'Modify needs field value and position in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
elif ((req_oper == 'delete') and field_pos is None):
err_msg = 'Delete needs field position in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
elif prop_coll_type == 'map':
if req_oper not in ('set', 'delete'):
err_msg = 'Unsupported operation %s in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
if ((req_oper == 'set') and field_val is None):
err_msg = 'Set needs field value in request %s' %(
req_oper, json.dumps(req_param))
elif ((req_oper == 'delete') and field_pos is None):
err_msg = 'Delete needs field position in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
# Get actual resource from DB
fields = r_class.prop_fields | r_class.ref_fields
try:
ok, result = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=fields)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except Exception:
ok = False
result = cfgm_common.utils.detailed_traceback()
if not ok:
self.config_object_error(
obj_uuid, None, obj_type, 'prop_collection_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
db_obj_dict = result
# Look if the resource have a pending version, if yes use it as resource
# to update
if hasattr(r_class, 'get_pending_resource'):
ok, result = r_class.get_pending_resource(db_obj_dict, fields)
if ok and isinstance(result, dict):
db_obj_dict = result
obj_uuid = db_obj_dict['uuid']
if not ok and result[0] != 404:
self.config_object_error(obj_uuid, None, obj_type,
'prop_collection_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
self._put_common('prop-collection-update', obj_type, obj_uuid,
db_obj_dict,
req_prop_coll_updates=request_params.get('updates'))
# end prop_collection_http_post
def ref_update_http_post(self):
# grab fields
type = get_request().json.get('type')
res_type, res_class = self._validate_resource_type(type)
obj_uuid = get_request().json.get('uuid')
ref_type = get_request().json.get('ref-type')
ref_field = '%s_refs' %(ref_type.replace('-', '_'))
ref_res_type, ref_class = self._validate_resource_type(ref_type)
operation = get_request().json.get('operation')
ref_uuid = get_request().json.get('ref-uuid')
ref_fq_name = get_request().json.get('ref-fq-name')
attr = get_request().json.get('attr')
relax_ref_for_delete = get_request().json.get('relax_ref_for_delete', False)
# validate fields
if None in (res_type, obj_uuid, ref_res_type, operation):
err_msg = 'Bad Request: type/uuid/ref-type/operation is null: '
err_msg += '%s, %s, %s, %s.' \
%(res_type, obj_uuid, ref_res_type, operation)
raise cfgm_common.exceptions.HttpError(400, err_msg)
operation = operation.upper()
if operation not in ['ADD', 'DELETE']:
err_msg = 'Bad Request: operation should be add or delete: %s' \
%(operation)
raise cfgm_common.exceptions.HttpError(400, err_msg)
if not ref_uuid and not ref_fq_name:
err_msg = 'Bad Request: ref-uuid or ref-fq-name must be specified'
raise cfgm_common.exceptions.HttpError(400, err_msg)
obj_type = res_class.object_type
ref_obj_type = ref_class.object_type
if not ref_uuid:
try:
ref_uuid = self._db_conn.fq_name_to_uuid(ref_obj_type, ref_fq_name)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Name ' + pformat(ref_fq_name) + ' not found')
elif operation == 'ADD':
# if UUID provided verify existence of the reference being added
try:
ref_fq_name = self._db_conn.uuid_to_fq_name(ref_uuid)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
# To invoke type specific hook and extension manager
fields = res_class.prop_fields | res_class.ref_fields
try:
ok, result = self._db_conn.dbe_read(obj_type, obj_uuid, fields)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except Exception:
ok = False
result = cfgm_common.utils.detailed_traceback()
if not ok:
self.config_object_error(obj_uuid, None, obj_type, 'ref_update',
result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
db_obj_dict = result
# Look if the resource have a pending version, if yes use it as resource
# to update
if hasattr(res_class, 'get_pending_resource'):
ok, result = res_class.get_pending_resource(db_obj_dict, fields)
if ok and isinstance(result, dict):
db_obj_dict = result
obj_uuid = db_obj_dict['uuid']
if not ok and result[0] != 404:
self.config_object_error(
obj_uuid, None, obj_type, 'ref_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
obj_dict = {'uuid': obj_uuid}
if ref_field in db_obj_dict:
obj_dict[ref_field] = copy.deepcopy(db_obj_dict[ref_field])
if operation == 'ADD':
if ref_obj_type+'_refs' not in obj_dict:
obj_dict[ref_obj_type + '_refs'] = []
for ref in obj_dict.get(ref_obj_type + '_refs', []):
if ref['uuid'] == ref_uuid:
ref['attr'] = attr
break
else:
obj_dict[ref_obj_type+'_refs'].append(
{'to':ref_fq_name, 'uuid': ref_uuid, 'attr':attr})
elif operation == 'DELETE':
for old_ref in obj_dict.get(ref_obj_type+'_refs', []):
if old_ref['to'] == ref_fq_name or old_ref['uuid'] == ref_uuid:
obj_dict[ref_obj_type+'_refs'].remove(old_ref)
break
ref_args = {'ref_obj_type':ref_obj_type, 'ref_uuid': ref_uuid,
'operation': operation, 'data': {'attr': attr},
'relax_ref_for_delete': relax_ref_for_delete}
self._put_common('ref-update', obj_type, obj_uuid, db_obj_dict,
req_obj_dict=obj_dict, ref_args=ref_args)
return {'uuid': obj_uuid}
# end ref_update_http_post
def ref_relax_for_delete_http_post(self):
self._post_common(None, {})
# grab fields
obj_uuid = get_request().json.get('uuid')
ref_uuid = get_request().json.get('ref-uuid')
# validate fields
if None in (obj_uuid, ref_uuid):
err_msg = 'Bad Request: Both uuid and ref-uuid should be specified: '
err_msg += '%s, %s.' %(obj_uuid, ref_uuid)
raise cfgm_common.exceptions.HttpError(400, err_msg)
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
self._db_conn.ref_relax_for_delete(obj_uuid, ref_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'uuid ' + obj_uuid + ' not found')
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
fq_name = self._db_conn.uuid_to_fq_name(obj_uuid)
apiConfig.identifier_name=':'.join(fq_name)
apiConfig.identifier_uuid = obj_uuid
apiConfig.operation = 'ref-relax-for-delete'
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
return {'uuid': obj_uuid}
# end ref_relax_for_delete_http_post
def fq_name_to_id_http_post(self):
self._post_common(None, {})
type = get_request().json.get('type')
res_type, r_class = self._validate_resource_type(type)
obj_type = r_class.object_type
fq_name = get_request().json['fq_name']
try:
id = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except NoIdError:
if obj_type == 'project':
resource_type, r_class = self._validate_resource_type(obj_type)
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_read_fqname' %(obj_type), fq_name)
id = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except Exception as e:
self.config_log("fq_name_to_id_http_post error: " + str(e),
level=SandeshLevel.SYS_DEBUG)
raise cfgm_common.exceptions.HttpError(
404, 'Name ' + pformat(fq_name) + ' not found')
else:
raise cfgm_common.exceptions.HttpError(
404, 'Name ' + pformat(fq_name) + ' not found')
# ensure user has access to this id
ok, result = self._permissions.check_perms_read(bottle.request, id)
if not ok:
err_code, err_msg = result
raise cfgm_common.exceptions.HttpError(err_code, err_msg)
return {'uuid': id}
# end fq_name_to_id_http_post
def id_to_fq_name_http_post(self):
self._post_common(None, {})
obj_uuid = get_request().json['uuid']
# ensure user has access to this id
ok, result = self._permissions.check_perms_read(get_request(), obj_uuid)
if not ok:
err_code, err_msg = result
raise cfgm_common.exceptions.HttpError(err_code, err_msg)
try:
fq_name = self._db_conn.uuid_to_fq_name(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'UUID ' + obj_uuid + ' not found')
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
res_type = self.get_resource_class(obj_type).resource_type
return {'fq_name': fq_name, 'type': res_type}
# end id_to_fq_name_http_post
# Enables a user-agent to store and retrieve key-val pair
# TODO this should be done only for special/quantum plugin
def useragent_kv_http_post(self):
self._post_common(None, {})
request_params = get_request().json
oper = request_params.get('operation')
if oper is None:
err_msg = ("Error: Key/value store API needs 'operation' "
"parameter")
raise cfgm_common.exceptions.HttpError(400, err_msg)
if 'key' not in request_params:
err_msg = ("Error: Key/value store API needs 'key' parameter")
raise cfgm_common.exceptions.HttpError(400, err_msg)
key = request_params.get('key')
val = request_params.get('value', '')
# TODO move values to common
if oper == 'STORE':
self._db_conn.useragent_kv_store(key, val)
elif oper == 'RETRIEVE':
try:
result = self._db_conn.useragent_kv_retrieve(key)
return {'value': result}
except NoUserAgentKey:
raise cfgm_common.exceptions.HttpError(
404, "Unknown User-Agent key " + key)
elif oper == 'DELETE':
result = self._db_conn.useragent_kv_delete(key)
else:
raise cfgm_common.exceptions.HttpError(
404, "Invalid Operation " + oper)
# end useragent_kv_http_post
def db_check(self):
""" Check database for inconsistencies. No update to database """
check_result = self._db_conn.db_check()
return {'results': check_result}
# end db_check
def fetch_records(self):
""" Retrieve and return all records """
result = self._db_conn.db_read()
return {'results': result}
# end fetch_records
def start_profile(self):
#GreenletProfiler.start()
pass
# end start_profile
def stop_profile(self):
pass
#GreenletProfiler.stop()
#stats = GreenletProfiler.get_func_stats()
#self._profile_info = stats.print_all()
#return self._profile_info
# end stop_profile
def get_profile_info(self):
return self._profile_info
# end get_profile_info
def get_resource_class(self, type_str):
try:
return self._resource_classes[type_str]
except KeyError:
raise TypeError('Invalid Contrail resource type: %s' % type_str)
def list_bulk_collection_http_post(self):
""" List collection when requested ids don't fit in query params."""
type = get_request().json.get('type') # e.g. virtual-network
resource_type, r_class = self._validate_resource_type(type)
try:
parent_uuids = get_request().json['parent_id'].split(',')
except KeyError:
parent_uuids = None
try:
back_ref_uuids = get_request().json['back_ref_id'].split(',')
except KeyError:
back_ref_uuids = None
try:
obj_uuids = get_request().json['obj_uuids'].split(',')
except KeyError:
obj_uuids = None
is_count = get_request().json.get('count', False)
is_detail = get_request().json.get('detail', False)
include_shared = get_request().json.get('shared', False)
try:
filters = utils.get_filters(get_request().json.get('filters'))
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid filter ' + get_request().json.get('filters'))
req_fields = get_request().json.get('fields', [])
if req_fields:
req_fields = req_fields.split(',')
exclude_hrefs = get_request().json.get('exclude_hrefs', False)
pagination = {}
if 'page_marker' in get_request().json:
pagination['marker'] = self._validate_page_marker(
get_request().json['page_marker'])
if 'page_limit' in get_request().json:
pagination['limit'] = self._validate_page_limit(
get_request().json['page_limit'])
return self._list_collection(r_class.object_type, parent_uuids,
back_ref_uuids, obj_uuids, is_count,
is_detail, filters, req_fields,
include_shared, exclude_hrefs,
pagination)
# end list_bulk_collection_http_post
# Get hbs
def hbs_get(self):
self._post_common(None, {})
# Get hbs fq_name from request
req_json = get_request().json
# valid json data required
if not req_json:
raise cfgm_common.exceptions.HttpError(
400, 'HBS fqname or uuid (hbs_fqname/hbs_uuid) required for hbs daemonset get')
hbs_fq_name = req_json.get('hbs_fq_name', '')
hbs_uuid = req_json.get('hbs_uuid', '')
# hbs fq_name or uuid required
if not hbs_fq_name and not hbs_uuid:
raise cfgm_common.exceptions.HttpError(
400, 'HBS fqname or uuid (hbs_fqname/hbs_uuid) required for hbs daemonset get')
# get hbs uuid, if not given for rbac checks
if not hbs_uuid:
try:
hbs_uuid = self._db_conn.fq_name_to_uuid('host_based_service', hbs_fq_name)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Name ' + pformat(hbs_fq_name) + ' not found')
# rbac check for hbs object operations
ok, result = self._permissions.check_perms_read(get_request(), hbs_uuid)
if not ok:
raise cfgm_common.exceptions.HttpError(result[0], result[1])
hbs_class = self.get_resource_class('host-based-service')
# get hbs info
hbs_info = hbs_class.get_hbs_info(hbs_fq_name, hbs_uuid)
namespace_template = hbs_class.get_hbs_namespace(hbs_info['namespace'])
ds_template = hbs_class.get_hbs_ds(hbs_info)
left_vn_template = hbs_class.get_hbs_network(hbs_info['vnleft']['uuid'],
'left',
hbs_info['namespace'])
right_vn_template = hbs_class.get_hbs_network(hbs_info['vnright']['uuid'],
'right',
hbs_info['namespace'])
hbs = {'hbs': [left_vn_template, right_vn_template, ds_template]}
return hbs
# end hbs_get
# Private Methods
def _parse_args(self, args_str):
self._args, _ = utils.parse_args(args_str)
# end _parse_args
# sigchld handler is currently not engaged. See comment @sigchld
def sigchld_handler(self):
# DB interface initialization
self._db_connect(reset_config=False)
self._db_init_entries()
# end sigchld_handler
def sigterm_handler(self, exit_arg=None):
if exit_arg:
self.config_log(exit_arg, level=SandeshLevel.SYS_ERR)
exit(exit_arg)
# sighup handler for applying new configs
def sighup_handler(self):
if self._args.conf_file:
config = SafeConfigParser()
config.read(self._args.conf_file)
if 'DEFAULTS' in config.sections():
try:
collectors = config.get('DEFAULTS', 'collectors')
if isinstance(collectors, string_types):
collectors = collectors.split()
new_chksum = hashlib.md5("".join(collectors)).hexdigest()
if new_chksum != self._chksum:
self._chksum = new_chksum
self._random_collectors = random.sample(collectors, len(collectors))
# Reconnect to achieve load-balance irrespective of list
self._sandesh.reconfig_collectors(self._random_collectors)
except NoOptionError as e:
pass
# end sighup_handler
def _load_extensions(self):
try:
conf_sections = self._args.config_sections
hostname = socket.getfqdn(self._args.listen_ip_addr)
self._extension_mgrs['resourceApi'] = ExtensionManager(
'vnc_cfg_api.resourceApi',
propagate_map_exceptions=True,
api_server_ip=hostname,
api_server_port=self._args.listen_port,
conf_sections=conf_sections, sandesh=self._sandesh)
if (self._args.auth != 'no-auth' or
self._args.auth != 'noauth'):
self._extension_mgrs['resync'] = ExtensionManager(
'vnc_cfg_api.resync', api_server_ip=hostname,
api_server_port=self._args.listen_port,
conf_sections=conf_sections, sandesh=self._sandesh)
self._extension_mgrs['resourceApi'].map_method(
'set_resync_extension_manager', self._extension_mgrs['resync'])
self._extension_mgrs['neutronApi'] = ExtensionManager(
'vnc_cfg_api.neutronApi',
api_server_ip=hostname,
api_server_port=self._args.listen_port,
conf_sections=conf_sections, sandesh=self._sandesh,
api_server_obj=self)
except Exception as e:
err_msg = cfgm_common.utils.detailed_traceback()
self.config_log("Exception in extension load: %s" %(err_msg),
level=SandeshLevel.SYS_ERR)
# end _load_extensions
def _db_connect(self, reset_config):
cass_server_list = self._args.cassandra_server_list
redis_server_ip = self._args.redis_server_ip
redis_server_port = self._args.redis_server_port
zk_server = self._args.zk_server_ip
rabbit_servers = self._args.rabbit_server
rabbit_port = self._args.rabbit_port
rabbit_user = self._args.rabbit_user
rabbit_password = self._args.rabbit_password
rabbit_vhost = self._args.rabbit_vhost
rabbit_ha_mode = self._args.rabbit_ha_mode
cassandra_user = self._args.cassandra_user
cassandra_password = self._args.cassandra_password
cassandra_use_ssl = self._args.cassandra_use_ssl
cassandra_ca_certs = self._args.cassandra_ca_certs
obj_cache_entries = int(self._args.object_cache_entries)
obj_cache_exclude_types = \
[t.replace('-', '_').strip() for t in
self._args.object_cache_exclude_types.split(',')]
debug_obj_cache_types = \
[t.replace('-', '_').strip() for t in
self._args.debug_object_cache_types.split(',')]
db_engine = self._args.db_engine
self._db_engine = db_engine
cred = None
db_server_list = None
if db_engine == 'cassandra':
if cassandra_user is not None and cassandra_password is not None:
cred = {'username':cassandra_user,'password':cassandra_password}
db_server_list = cass_server_list
self._db_conn = VncDbClient(
self, db_server_list, rabbit_servers, rabbit_port, rabbit_user,
rabbit_password, rabbit_vhost, rabbit_ha_mode, self._args.listen_ip_addr,
reset_config, zk_server, self._args.cluster_id, db_credential=cred,
db_engine=db_engine, rabbit_use_ssl=self._args.rabbit_use_ssl,
kombu_ssl_version=self._args.kombu_ssl_version,
kombu_ssl_keyfile= self._args.kombu_ssl_keyfile,
kombu_ssl_certfile=self._args.kombu_ssl_certfile,
kombu_ssl_ca_certs=self._args.kombu_ssl_ca_certs,
obj_cache_entries=obj_cache_entries,
obj_cache_exclude_types=obj_cache_exclude_types,
debug_obj_cache_types=debug_obj_cache_types,
cassandra_use_ssl=self._args.cassandra_use_ssl,
cassandra_ca_certs=self._args.cassandra_ca_certs)
#TODO refacter db connection management.
self._addr_mgmt._get_db_conn()
# end _db_connect
def _ensure_id_perms_present(self, obj_uuid, obj_dict):
"""
Called at resource creation to ensure that id_perms is present in obj
"""
# retrieve object and permissions
id_perms = self._get_default_id_perms()
if (('id_perms' not in obj_dict) or
(obj_dict['id_perms'] is None)):
# Resource creation
if obj_uuid is None:
obj_dict['id_perms'] = id_perms
return
return
# retrieve the previous version of the id_perms
# from the database and update the id_perms with
# them.
if obj_uuid is not None:
try:
old_id_perms = self._db_conn.uuid_to_obj_perms(obj_uuid)
for field, value in list(old_id_perms.items()):
if value is not None:
id_perms[field] = value
except NoIdError:
pass
# not all fields can be updated
if obj_uuid:
field_list = ['enable', 'description']
else:
field_list = ['enable', 'description', 'user_visible', 'creator']
# Start from default and update from obj_dict
req_id_perms = obj_dict['id_perms']
for key in field_list:
if key in req_id_perms:
id_perms[key] = req_id_perms[key]
# TODO handle perms present in req_id_perms
obj_dict['id_perms'] = id_perms
# end _ensure_id_perms_present
def _get_default_id_perms(self, **kwargs):
id_perms = copy.deepcopy(Provision.defaults.perms)
id_perms_json = json.dumps(id_perms, default=lambda o: dict((k, v)
for k, v in o.__dict__.items()))
id_perms_dict = json.loads(id_perms_json)
id_perms_dict.update(kwargs)
return id_perms_dict
# end _get_default_id_perms
def _ensure_perms2_present(self, obj_type, obj_uuid, obj_dict,
project_id=None):
"""
Called at resource creation to ensure that id_perms is present in obj
"""
# retrieve object and permissions
perms2 = self._get_default_perms2()
# set ownership of object to creator tenant
if obj_type == 'project' and 'uuid' in obj_dict:
perms2['owner'] = str(obj_dict['uuid']).replace('-', '')
elif obj_dict.get('perms2') and obj_dict['perms2'].get('owner'):
perms2['owner'] = obj_dict['perms2']['owner']
elif 'fq_name' in obj_dict and obj_dict['fq_name'][:-1]:
if 'parent_type' in obj_dict:
parent_type = obj_dict['parent_type'].replace('-', '_')
else:
r_class = self.get_resource_class(obj_type)
if (len(r_class.parent_types) != 1):
msg = ("Ambiguous parent to ensure permissiosn of %s, "
"please choose one parent type: %s" %
(obj_type, pformat(r_class.parent_types)))
raise cfgm_common.exceptions.HttpError(400, msg)
parent_type = r_class.parent_types[0].replace('-', '_')
if parent_type == 'domain':
if project_id:
perms2['owner'] = project_id
else:
perms2['owner'] = 'cloud-admin'
else:
parent_fq_name = obj_dict['fq_name'][:-1]
parent_uuid = obj_dict.get('parent_uuid')
try:
if parent_uuid is None:
try:
parent_uuid = self._db_conn.fq_name_to_uuid(
parent_type, parent_fq_name)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Name' + pformat(parent_fq_name) + ' not found')
ok, parent_obj_dict = self._db_conn.dbe_read(
parent_type, parent_uuid, obj_fields=['perms2'])
except NoIdError as e:
msg = "Parent %s cannot be found: %s" % (parent_type, str(e))
raise cfgm_common.exceptions.HttpError(404, msg)
perms2['owner'] = parent_obj_dict['perms2']['owner']
elif project_id:
perms2['owner'] = project_id
else:
perms2['owner'] = 'cloud-admin'
if obj_dict.get('perms2') is None:
# Resource creation
if obj_uuid is None:
obj_dict['perms2'] = perms2
return
# Resource already exists
try:
obj_dict['perms2'] = self._db_conn.uuid_to_obj_perms2(obj_uuid)
except NoIdError:
obj_dict['perms2'] = perms2
return
# retrieve the previous version of the perms2
# from the database and update the perms2 with
# them.
if obj_uuid is not None:
try:
old_perms2 = self._db_conn.uuid_to_obj_perms2(obj_uuid)
for field, value in list(old_perms2.items()):
if value is not None:
perms2[field] = value
except NoIdError:
pass
# Start from default and update from obj_dict
req_perms2 = obj_dict['perms2']
for key in req_perms2:
perms2[key] = req_perms2[key]
# TODO handle perms2 present in req_perms2
obj_dict['perms2'] = perms2
# ensure is_shared and global_access are consistent
shared = obj_dict.get('is_shared', None)
gaccess = obj_dict['perms2'].get('global_access', None)
if (gaccess is not None and shared is not None and
shared != (gaccess != 0)):
msg = ("Inconsistent is_shared (%s a) and global_access (%s)" %
(shared, gaccess))
# NOTE(ethuleau): ignore exception for the moment as it breaks the
# Neutron use case where external network have global access but
# is property 'is_shared' is False https://review.opencontrail.org/#/q/Id6a0c1a509d7663da8e5bc86f2c7c91c73d420a2
# Before patch https://review.opencontrail.org/#q,I9f53c0f21983bf191b4c51318745eb348d48dd86,n,z
# error was also ignored as all retruned errors of that method were
# not took in account
# raise cfgm_common.exceptions.HttpError(400, msg)
def _get_default_perms2(self):
perms2 = copy.deepcopy(Provision.defaults.perms2)
perms2_json = json.dumps(perms2, default=lambda o: dict((k, v)
for k, v in o.__dict__.items()))
perms2_dict = json.loads(perms2_json)
return perms2_dict
# end _get_default_perms2
def _db_init_entries(self):
# create singleton defaults if they don't exist already in db
gsc = self.create_singleton_entry(GlobalSystemConfig(
autonomous_system=64512, config_version=CONFIG_VERSION))
self._gsc_uuid = gsc.uuid
gvc = self.create_singleton_entry(GlobalVrouterConfig(
parent_obj=gsc))
domain = self.create_singleton_entry(Domain())
self._default_domain = domain.serialize_to_json()
project = self.create_singleton_entry(Project(parent_obj=domain))
self._default_project = project.serialize_to_json()
self.create_singleton_entry(Fabric())
# Global and default policy resources
pm = self.create_singleton_entry(PolicyManagement())
self._global_pm_uuid = pm.uuid
aps = self.create_singleton_entry(ApplicationPolicySet(
parent_obj=pm, all_applications=True))
ok, result = self._db_conn.ref_update(
ApplicationPolicySet.object_type,
aps.uuid,
GlobalVrouterConfig.object_type,
gvc.uuid,
{'attr': None},
'ADD',
None,
)
if not ok:
msg = ("Error while referencing global vrouter config %s with the "
"default global application policy set %s: %s" %
(gvc.uuid, aps.uuid, result[1]))
self.config_log(msg, level=SandeshLevel.SYS_ERR)
ip_fab_vn = self.create_singleton_entry(
VirtualNetwork(cfgm_common.IP_FABRIC_VN_FQ_NAME[-1],
is_provider_network=True))
self.create_singleton_entry(
RoutingInstance(cfgm_common.IP_FABRIC_VN_FQ_NAME[-1], ip_fab_vn,
routing_instance_is_default=True))
self.create_singleton_entry(
RoutingInstance('__default__', ip_fab_vn))
link_local_vn = self.create_singleton_entry(
VirtualNetwork(cfgm_common.LINK_LOCAL_VN_FQ_NAME[-1]))
self.create_singleton_entry(
RoutingInstance('__link_local__', link_local_vn,
routing_instance_is_default=True))
# dc network
dci_vn = self.create_singleton_entry(
VirtualNetwork(cfgm_common.DCI_VN_FQ_NAME[-1]))
self.create_singleton_entry(
RoutingInstance(cfgm_common.DCI_VN_FQ_NAME[-1], dci_vn,
routing_instance_is_default=True))
self.create_singleton_entry(
RoutingInstance('__default__', dci_vn))
# specifying alarm kwargs like contrail_alarm.py
alarm_kwargs = {"alarm_rules":
{"or_list" : [
{"and_list": [
{ "operand1": "UveConfigReq.err_info.*.",
"operation": "==",
"operand2": {"json_value": "True"}
} ]
} ]
},
"alarm_severity": 1,
"fq_name": [
"default-global-system-config",
"system-defined-bottle-request-size-limit"
],
"id_perms": {
"description": "Bottle request size limit exceeded."
},
"parent_type": "global-system-config",
"uve_keys": {
"uve_key": [
"config-node"
]
}
}
self.create_singleton_entry(Alarm(**alarm_kwargs))
try:
self.create_singleton_entry(
RoutingInstance('default-virtual-network',
routing_instance_is_default=True))
except Exception as e:
self.config_log('error while creating primary routing instance for'
'default-virtual-network: ' + str(e),
level=SandeshLevel.SYS_NOTICE)
# Create singleton SG __no_rule__ object for openstack
sg_rules = PolicyEntriesType()
id_perms = IdPermsType(enable=True,
description="Security group with no rules",
user_visible=True)
perms2 = PermType2(owner='cloud-admin')
perms2.set_global_access(PERMS_RX)
# Creating SG without SG-ID which will then
# be populated and attached to this SG during
# singleton create
sg_obj = SecurityGroup(
name=SG_NO_RULE_FQ_NAME[-1],
parent_obj=project,
security_group_entries=sg_rules.exportDict(''),
id_perms=id_perms.exportDict(''),
perms2=perms2.exportDict(''))
self.create_singleton_entry(sg_obj)
self.create_singleton_entry(DiscoveryServiceAssignment())
self.create_singleton_entry(GlobalQosConfig())
sc_ipam_subnet_v4 = IpamSubnetType(subnet=SubnetType('0.0.0.0', 8))
sc_ipam_subnet_v6 = IpamSubnetType(subnet=SubnetType('::ffff', 104))
sc_ipam_subnets = IpamSubnets([sc_ipam_subnet_v4, sc_ipam_subnet_v6])
sc_ipam_obj = NetworkIpam('service-chain-flat-ipam',
ipam_subnet_method="flat-subnet", ipam_subnets=sc_ipam_subnets)
self.create_singleton_entry(sc_ipam_obj)
# Create pre-defined tag-type
for type_str, type_id in list(TagTypeNameToId.items()):
type_id_hex = "0x{:04x}".format(type_id)
tag = TagType(name=type_str, tag_type_id=type_id_hex)
tag.display_name = type_str
self.create_singleton_entry(tag, user_visible=False)
if int(self._args.worker_id) == 0:
self._db_conn.db_resync()
# make default ipam available across tenants for backward compatability
obj_type = 'network_ipam'
fq_name = ['default-domain', 'default-project', 'default-network-ipam']
obj_uuid = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=['perms2'])
obj_dict['perms2']['global_access'] = PERMS_RX
self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict)
# end _db_init_entries
# generate default rbac group rule
def _create_default_rbac_rule(self):
# allow full access to cloud admin
rbac_rules = [
{
'rule_object':'fqname-to-id',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'CRUD'}]
},
{
'rule_object':'id-to-fqname',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'CRUD'}]
},
{
'rule_object':'useragent-kv',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'CRUD'}]
},
{
'rule_object':'documentation',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'R'}]
},
{
'rule_object':'/',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'R'}]
},
]
obj_type = 'api_access_list'
fq_name = ['default-global-system-config', 'default-api-access-list']
try:
# ensure global list is not missing any default rules (bug 1642464)
id = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, id)
update_obj = False
cur_rbac_rules = copy.deepcopy(obj_dict['api_access_list_entries']['rbac_rule'])
for rule in rbac_rules:
present = False
for existing_rule in cur_rbac_rules:
if rule == existing_rule:
present = True
cur_rbac_rules.remove(existing_rule)
break
if not present:
obj_dict['api_access_list_entries']['rbac_rule'].append(rule)
update_obj = True
if update_obj:
self._db_conn.dbe_update(obj_type, id, obj_dict)
return
except NoIdError:
pass
rge = RbacRuleEntriesType([])
for rule in rbac_rules:
rule_perms = [RbacPermType(role_name=p['role_name'], role_crud=p['role_crud']) for p in rule['rule_perms']]
rbac_rule = RbacRuleType(rule_object=rule['rule_object'],
rule_field=rule['rule_field'], rule_perms=rule_perms)
rge.add_rbac_rule(rbac_rule)
rge_dict = rge.exportDict('')
glb_rbac_cfg = ApiAccessList(parent_type='global-system-config',
fq_name=fq_name, api_access_list_entries = rge_dict)
try:
self.create_singleton_entry(glb_rbac_cfg)
except Exception as e:
err_msg = 'Error creating default api access list object'
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
# end _create_default_rbac_rule
def _resync_domains_projects(self, ext):
if hasattr(ext.obj, 'resync_domains_projects'):
ext.obj.resync_domains_projects()
# end _resync_domains_projects
def create_singleton_entry(self, singleton_obj, user_visible=True):
s_obj = singleton_obj
obj_type = s_obj.object_type
fq_name = s_obj.get_fq_name()
# TODO remove backward compat create mapping in zk
# for singleton START
try:
cass_uuid = self._db_conn._object_db.fq_name_to_uuid(obj_type, fq_name)
try:
zk_uuid = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except NoIdError:
# doesn't exist in zookeeper but does so in cassandra,
# migrate this info to zookeeper
self._db_conn._zk_db.create_fq_name_to_uuid_mapping(obj_type, fq_name, str(cass_uuid))
except NoIdError:
# doesn't exist in cassandra as well as zookeeper, proceed normal
pass
# TODO backward compat END
# create if it doesn't exist yet
try:
s_obj.uuid = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except NoIdError:
obj_json = json.dumps(s_obj, default=_obj_serializer_all)
obj_dict = json.loads(obj_json)
if s_obj.get_id_perms():
obj_dict['id_perms'] = s_obj.get_id_perms()
else:
obj_dict['id_perms'] = self._get_default_id_perms(
user_visible=user_visible)
if s_obj.get_perms2():
obj_dict['perms2'] = s_obj.get_perms2()
else:
obj_dict['perms2'] = self._get_default_perms2()
(ok, result) = self._db_conn.dbe_alloc(obj_type, obj_dict)
if not ok:
(code, msg) = result
raise cfgm_common.exceptions.HttpError(code, msg)
obj_id = result
s_obj.uuid = obj_id
# For virtual networks, allocate an ID
if obj_type == 'virtual_network':
vn_id = self.alloc_vn_id(s_obj.get_fq_name_str())
obj_dict['virtual_network_network_id'] = vn_id
if obj_type == 'tag':
obj_dict = self._allocate_tag_id(obj_dict)
if obj_type == 'security_group':
sg_id = self.alloc_security_group_id(s_obj.get_fq_name_str())
obj_dict['security_group_id'] = sg_id
self._db_conn.dbe_create(obj_type, obj_id, obj_dict)
self.create_default_children(obj_type, s_obj)
return s_obj
# end create_singleton_entry
# allocate tag id for tag object
def _allocate_tag_id(self, obj_dict):
type_str = obj_dict['tag_type_name']
value_str = obj_dict['tag_value']
ok, result = self.get_resource_class('tag_type').locate(
[type_str], id_perms=IdPermsType(user_visible=False))
tag_type = result
obj_dict['tag_type_refs'] = [
{
'uuid': tag_type['uuid'],
'to': tag_type['fq_name'],
},
]
# Allocate ID for tag value. Use the all fq_name to distinguish same
# tag values between global and scoped
value_id = self.get_resource_class(
'tag').vnc_zk_client.alloc_tag_value_id(
type_str, ':'.join(obj_dict['fq_name']))
# Compose Tag ID with the type ID and value ID
obj_dict['tag_id'] = "{}{:04x}".format(tag_type['tag_type_id'],
value_id)
return obj_dict
# end allocate tag id
def _validate_page_marker(self, req_page_marker):
# query params always appears as string
if req_page_marker and req_page_marker.lower() != 'none':
try:
req_page_marker_uuid = req_page_marker.split(':')[-1]
_ = str(uuid.UUID(req_page_marker_uuid))
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid page_marker %s: %s' %(
req_page_marker, e))
else:
req_page_marker = None
return req_page_marker
# end _validate_page_marker
def _validate_page_limit(self, req_page_limit):
try:
val = int(req_page_limit)
if val <= 0:
raise Exception("page_limit has to be greater than zero")
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid page_limit %s: %s' %(
req_page_limit, e))
return int(req_page_limit)
# end _validate_page_limit
def _list_collection(self, obj_type, parent_uuids=None,
back_ref_uuids=None, obj_uuids=None,
is_count=False, is_detail=False, filters=None,
req_fields=None, include_shared=False,
exclude_hrefs=False, pagination=None):
resource_type, r_class = self._validate_resource_type(obj_type)
is_admin = self.is_admin_request()
if is_admin:
field_names = req_fields
else:
field_names = [u'id_perms'] + (req_fields or [])
if is_count and is_admin:
ret_result = 0
else:
ret_result = []
page_filled = False
if 'marker' in pagination:
# if marker is None, start scanning from uuid 0
page_start = pagination['marker'] or '0'
if 'limit' in pagination:
page_count = pagination['limit']
else:
page_count = self._args.paginate_count
else:
page_start = None # cookie to start next search
page_count = None # remainder count to finish page
(ok, result) = r_class.pre_dbe_list(obj_uuids, self._db_conn)
if not ok:
(code, msg) = result
raise cfgm_common.exceptions.HttpError(code, msg)
while not page_filled:
(ok, result, ret_marker) = self._db_conn.dbe_list(obj_type,
parent_uuids, back_ref_uuids, obj_uuids, is_count and is_admin,
filters, is_detail=is_detail, field_names=field_names,
include_shared=include_shared,
paginate_start=page_start,
paginate_count=page_count)
if not ok:
self.config_object_error(None, None, '%ss' %(obj_type),
'dbe_list', result)
raise cfgm_common.exceptions.HttpError(404, result)
# If only counting, return early
if is_count and is_admin:
ret_result += result
return {'%ss' %(resource_type): {'count': ret_result}}
allowed_fields = set(['uuid', 'href', 'fq_name'])
allowed_fields |= set(req_fields or [])
obj_dicts = []
if is_admin:
for obj_result in result:
if not exclude_hrefs:
obj_result['href'] = self.generate_url(
resource_type, obj_result['uuid'])
if is_detail:
obj_result['name'] = obj_result['fq_name'][-1]
obj_dicts.append({resource_type: obj_result})
else:
obj_dicts.append(obj_result)
else:
# fetch all perms of child/ref/back_ref of listed resources in
# one DB call for performance reason
if is_detail:
ref_uuids = {ref['uuid'] for link in r_class.obj_links
for o in result for ref in o.get(link, [])}
if self.is_rbac_enabled():
fields = ['perms2']
else:
fields = ['id_perms']
ref_dicts = self._db_conn._object_db.object_raw_read(
resource_type, list(ref_uuids), fields)
ref_perms = {obj_dict['uuid']: obj_dict
for obj_dict in ref_dicts}
for obj_result in result:
id_perms = obj_result.get('id_perms')
if not id_perms:
# It is possible that the object was deleted, but received
# an update after that. We need to ignore it for now. In
# future, we should clean up such stale objects
continue
if not id_perms.get('user_visible', True):
# skip items not authorized
continue
ok, status = self._permissions.check_perms_read(
get_request(), obj_result['uuid'], obj_result)
if not ok and status[0] == 403:
continue
obj_result['name'] = obj_result['fq_name'][-1]
if is_detail:
self.obj_view(resource_type, obj_result, ref_perms)
obj_dicts.append({resource_type: obj_result})
else:
for field in set(obj_result.keys()) - allowed_fields:
del obj_result[field]
obj_dicts.append(obj_result)
if not exclude_hrefs:
obj_result['href'] = self.generate_url(
resource_type, obj_result['uuid'])
# end obj_result in result
# end not admin req
ret_result.extend(obj_dicts)
if 'marker' not in pagination:
page_filled = True
elif ret_marker is None: # pagination request and done
page_filled = True
else: # pagination request and partially filled
page_start = ret_marker
page_count -= len(result)
if page_count <= 0:
page_filled = True
# end while not page_filled
(ok, err_msg) = r_class.post_dbe_list(ret_result, self._db_conn)
if not ok:
(code, msg) = err_msg
raise cfgm_common.exceptions.HttpError(code, msg)
if 'marker' in pagination: # send next marker along with results
if is_count:
return {'%ss' %(resource_type): {'count': len(ret_result)},
'marker': ret_marker}
else:
return {'%ss' %(resource_type): ret_result,
'marker': ret_marker}
if is_count:
return {'%ss' %(resource_type): {'count': len(ret_result)}}
else:
return {'%ss' %(resource_type): ret_result}
# end _list_collection
def get_db_connection(self):
return self._db_conn
# end get_db_connection
def generate_url(self, resource_type, obj_uuid):
try:
url_parts = get_request().urlparts
netloc = url_parts.netloc.replace('<script>', '<!--script>')
netloc = netloc.replace('</script>', '</script-->')
return '%s://%s/%s/%s'\
% (url_parts.scheme, netloc, resource_type, obj_uuid)
except Exception as e:
return '%s/%s/%s' % (self._base_url, resource_type, obj_uuid)
# end generate_url
def generate_hrefs(self, resource_type, obj_dict):
# return a copy of obj_dict with href keys for:
# self, parent, children, refs, backrefs
r_class = self.get_resource_class(resource_type)
obj_dict['href'] = self.generate_url(resource_type, obj_dict['uuid'])
try:
obj_dict['parent_href'] = self.generate_url(
obj_dict['parent_type'], obj_dict['parent_uuid'])
except KeyError:
# No parent
pass
for field, field_info in itertools.chain(
list(r_class.children_field_types.items()),
list(r_class.ref_field_types.items()),
list(r_class.backref_field_types.items()),
):
try:
type = field_info[0]
for link in obj_dict[field]:
link['href'] = self.generate_url(type, link['uuid'])
except KeyError:
pass
def config_object_error(self, id, fq_name_str, obj_type,
operation, err_str):
apiConfig = VncApiCommon()
if obj_type is not None:
apiConfig.object_type = obj_type
apiConfig.identifier_name = fq_name_str
apiConfig.identifier_uuid = id
apiConfig.operation = operation
if err_str:
apiConfig.error = "%s:%s" % (obj_type, err_str)
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
# end config_object_error
def config_log(self, msg_str, level=SandeshLevel.SYS_INFO):
errcls = {
SandeshLevel.SYS_DEBUG: VncApiDebug,
SandeshLevel.SYS_INFO: VncApiInfo,
SandeshLevel.SYS_NOTICE: VncApiNotice,
SandeshLevel.SYS_ERR: VncApiError,
}
errcls.get(level, VncApiError)(
api_msg=msg_str, level=level, sandesh=self._sandesh).send(
sandesh=self._sandesh)
# end config_log
def _set_api_audit_info(self, apiConfig):
apiConfig.url = get_request().url
apiConfig.remote_ip = get_request().headers.get('X-Requestor-IP')
if not apiConfig.remote_ip:
# If the X-Requestor-IP was not sent, it's likely that the request
# did not come from node.js. In this case, try to get the remote IP as:
# 1. If present, the first IP address of HTTP_X_FORWARDED_FOR.
# 2. Else, If present, from the REMOTE_ADDR.
# 3. HTTP_X_Host
if 'HTTP_X_FORWARDED_FOR' in get_request().environ:
addr = get_request().environ.get('HTTP_X_FORWARDED_FOR').split(',')
apiConfig.remote_ip = addr[0]
elif 'REMOTE_ADDR' in get_request().environ:
apiConfig.remote_ip = get_request().environ.get('REMOTE_ADDR')
else:
apiConfig.remote_ip = get_request().headers.get('Host')
useragent = get_request().headers.get('X-Contrail-Useragent')
if not useragent:
useragent = get_request().headers.get('User-Agent')
apiConfig.useragent = useragent
apiConfig.user = get_request().headers.get('X-User-Name')
apiConfig.project = get_request().headers.get('X-Project-Name')
apiConfig.domain = get_request().headers.get('X-Domain-Name', 'None')
if apiConfig.domain.lower() == 'none':
apiConfig.domain = 'default-domain'
if int(get_request().headers.get('Content-Length', 0)) > 0:
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
# end _set_api_audit_info
# uuid is parent's for collections
def _get_common(self, request, uuid=None):
# TODO check api + resource perms etc.
if self.is_auth_needed() and uuid:
if isinstance(uuid, list):
for u_id in uuid:
ok, result = self._permissions.check_perms_read(request,
u_id)
if not ok:
return ok, result
else:
return self._permissions.check_perms_read(request, uuid)
return (True, '')
# end _get_common
def _put_common(
self, api_name, obj_type, obj_uuid, db_obj_dict, req_obj_dict=None,
req_prop_coll_updates=None, ref_args=None):
obj_fq_name = db_obj_dict.get('fq_name', 'missing-fq-name')
# ZK and rabbitmq should be functional
self._ensure_services_conn(
api_name, obj_type, obj_uuid, obj_fq_name)
resource_type, r_class = self._validate_resource_type(obj_type)
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_update' %(obj_type), obj_uuid, req_obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_update an extension had error for %s' \
%(obj_type, req_obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
db_conn = self._db_conn
# check visibility
if (not db_obj_dict['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % obj_uuid
self.config_object_error(obj_uuid, None, obj_type, api_name, result)
raise cfgm_common.exceptions.HttpError(404, result)
# properties validator (for collections validation in caller)
if req_obj_dict is not None:
ok, result = self._validate_props_in_request(r_class,
req_obj_dict, operation='UPDATE')
if not ok:
result = 'Bad property in %s: %s' %(api_name, result)
raise cfgm_common.exceptions.HttpError(400, result)
# references validator
if req_obj_dict is not None:
ok, result = self._validate_refs_in_request(r_class, req_obj_dict)
if not ok:
result = 'Bad reference in %s: %s' %(api_name, result)
raise cfgm_common.exceptions.HttpError(400, result)
# common handling for all resource put
request = get_request()
fq_name_str = ":".join(obj_fq_name or [])
if req_obj_dict:
if ('id_perms' in req_obj_dict and
req_obj_dict['id_perms'].get('uuid')):
if not self._db_conn.match_uuid(req_obj_dict, obj_uuid):
msg = (
"UUID mismatch from %s:%s" %
(request.environ.get('REMOTE_ADDR',
"Remote address not found"),
request.environ.get('HTTP_USER_AGENT',
"User agent not found"))
)
self.config_object_error(
obj_uuid, fq_name_str, obj_type, 'put', msg)
self._db_conn.set_uuid(obj_type, req_obj_dict,
uuid.UUID(obj_uuid),
do_lock=False)
# Ensure object has at least default permissions set
self._ensure_id_perms_present(obj_uuid, req_obj_dict)
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name = fq_name_str
apiConfig.identifier_uuid = obj_uuid
apiConfig.operation = api_name
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig,
sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
if self.is_auth_needed():
ok, result = self._permissions.check_perms_write(request, obj_uuid)
if not ok:
(code, msg) = result
self.config_object_error(
obj_uuid, fq_name_str, obj_type, api_name, msg)
raise cfgm_common.exceptions.HttpError(code, msg)
# no ref to a pending deleted resource
ok, result = r_class.no_pending_deleted_resource_in_refs(req_obj_dict)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
# Validate perms on references
if req_obj_dict is not None:
try:
self._validate_perms_in_request(
r_class, obj_type, req_obj_dict)
except NoIdError:
raise cfgm_common.exceptions.HttpError(400,
'Unknown reference in resource update %s %s.'
%(obj_type, req_obj_dict))
# State modification starts from here. Ensure that cleanup is done for all state changes
cleanup_on_failure = []
if req_obj_dict is not None:
req_obj_dict['uuid'] = obj_uuid
# Permit abort resource update and retrun 202 status code
get_context().set_state('PENDING_DBE_UPDATE')
ok, result = r_class.pending_dbe_update(db_obj_dict, req_obj_dict,
req_prop_coll_updates)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
if ok and isinstance(result, tuple) and result[0] == 202:
# Modifications accepted but not applied, pending update
# returns 202 HTTP OK code to aware clients
bottle.response.status = 202
return True, ''
def stateful_update():
get_context().set_state('PRE_DBE_UPDATE')
# type-specific hook
_req_obj_dict = {}
if req_obj_dict:
_req_obj_dict = req_obj_dict
(ok, result) = r_class.pre_dbe_update(
obj_uuid, obj_fq_name, _req_obj_dict, self._db_conn,
prop_collection_updates=req_prop_coll_updates)
if not ok:
return (ok, result)
attr_to_publish = None
if isinstance(result, dict):
attr_to_publish = result
# All resource type can have tag refs but there is some constraints
# Done after PRE_DBE_UPDATE as tag refs can be modifed in that hook
ok, result = self._validate_tag_refs(obj_type, req_obj_dict)
if not ok:
return False, result
get_context().set_state('DBE_UPDATE')
if api_name == 'ref-update':
# read ref_update args
ref_obj_type = ref_args.get('ref_obj_type')
ref_uuid = ref_args.get('ref_uuid')
ref_data = ref_args.get('data')
operation = ref_args.get('operation')
relax_ref_for_delete = ref_args.get('relax_ref_for_delete', False)
(ok, result) = db_conn.ref_update(
obj_type,
obj_uuid,
ref_obj_type,
ref_uuid,
ref_data,
operation,
db_obj_dict['id_perms'],
attr_to_publish=attr_to_publish,
relax_ref_for_delete=relax_ref_for_delete
)
elif req_obj_dict:
(ok, result) = db_conn.dbe_update(
obj_type,
obj_uuid,
req_obj_dict,
attr_to_publish=attr_to_publish,
)
# Update quota counter
if resource_type == 'project' and 'quota' in req_obj_dict:
path_prefix = self._path_prefix + obj_uuid
try:
QuotaHelper._zk_quota_counter_update(
path_prefix,
req_obj_dict,
obj_uuid,
db_conn,
self.quota_counter)
except NoIdError:
msg = "Error in initializing quota "\
"Internal error : Failed to read resource count"
self.config_log(msg, level=SandeshLevel.SYS_ERR)
elif req_prop_coll_updates:
(ok, result) = db_conn.prop_collection_update(
obj_type,
obj_uuid,
req_prop_coll_updates,
attr_to_publish=attr_to_publish,
)
if not ok:
return (ok, result)
get_context().set_state('POST_DBE_UPDATE')
# type-specific hook
(ok, result) = r_class.post_dbe_update(
obj_uuid, obj_fq_name, _req_obj_dict, self._db_conn,
prop_collection_updates=req_prop_coll_updates)
if not ok:
return (ok, result)
return (ok, result)
# end stateful_update
try:
ok, result = stateful_update()
except Exception as e:
ok = False
err_msg = cfgm_common.utils.detailed_traceback()
result = (500, traceback_err_msg(err_msg))
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
if not ok:
self.undo(result, obj_type, id=obj_uuid)
# Revert changes made to quota counter by using DB quota dict
if resource_type == 'project' and 'quota' in req_obj_dict:
path_prefix = self._path_prefix + obj_uuid
try:
QuotaHelper._zk_quota_counter_update(
path_prefix,
db_obj_dict,
obj_uuid,
self._db_conn,
self.quota_counter)
except NoIdError:
err_msg = "Error in rolling back quota count on undo "\
"Internal error : Failed to read resource count"
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_update' %(obj_type), obj_uuid,
req_obj_dict, db_obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In post_%s_update an extension had error for %s' \
%(obj_type, req_obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# end _put_common
# parent_type needed for perms check. None for derived objects (eg.
# routing-instance)
def _delete_common(self, request, obj_type, uuid, parent_uuid):
# If not connected to zookeeper do not allow operations that
# causes the state change
if not self._db_conn._zk_db.is_connected():
return (False,
(503, "Not connected to zookeeper. Not able to perform requested action"))
# If there are too many pending updates to rabbit, do not allow
# operations that cause state change
npending = self._db_conn.dbe_oper_publish_pending()
if (npending >= int(self._args.rabbit_max_pending_updates)):
err_str = str(MaxRabbitPendingError(npending))
return (False, (500, err_str))
fq_name = self._db_conn.uuid_to_fq_name(uuid)
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name=':'.join(fq_name)
apiConfig.identifier_uuid = uuid
apiConfig.operation = 'delete'
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
# TODO check api + resource perms etc.
if not self.is_auth_needed() or not parent_uuid:
return (True, '')
"""
Validate parent allows write access. Implicitly trust
parent info in the object since coming from our DB.
"""
return self._permissions.check_perms_delete(request, obj_type, uuid,
parent_uuid)
# end _http_delete_common
def _post_validate(self, obj_type=None, obj_dict=None):
if not obj_dict:
return
def _check_field_present(fname):
fval = obj_dict.get(fname)
if not fval:
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, no %s in POST body" %(fname))
return fval
fq_name = _check_field_present('fq_name')
# well-formed name checks
if illegal_xml_chars_RE.search(fq_name[-1]):
raise cfgm_common.exceptions.HttpError(400,
"Bad Request, name has illegal xml characters")
if obj_type == 'route_target':
invalid_chars = self._INVALID_NAME_CHARS - set(':')
else:
invalid_chars = self._INVALID_NAME_CHARS
if any((c in invalid_chars) for c in fq_name[-1]):
raise cfgm_common.exceptions.HttpError(400,
"Bad Request, name has one of invalid chars %s"
%(invalid_chars))
# end _post_validate
def validate_parent_type(self, obj_type, obj_dict):
parent_type = obj_dict.get('parent_type')
r_class = self.get_resource_class(obj_type)
allowed_parent_types = r_class.parent_types
if parent_type:
if parent_type not in allowed_parent_types:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid parent type: %s. Allowed types: %s' % (
parent_type, allowed_parent_types))
elif (len(allowed_parent_types) > 1 and
'config-root' not in allowed_parent_types):
raise cfgm_common.exceptions.HttpError(
400, 'Missing parent type: %s. Allowed types: %s' % (
parent_type, allowed_parent_types))
elif len(allowed_parent_types) == 1:
parent_type = allowed_parent_types[0]
if parent_type in ('config-root', None):
if len(obj_dict['fq_name']) != 1:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid fq-name of an object with no parent: %s' % (
obj_dict['fq_name']))
elif len(obj_dict['fq_name']) < 2:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid fq-name for object with parent_type %s: %s' % (
parent_type, obj_dict['fq_name']))
# end validate_parent_type
def _post_common(self, obj_type, obj_dict):
self._ensure_services_conn(
'http_post', obj_type, obj_fq_name=obj_dict.get('fq_name'))
if not obj_dict:
# TODO check api + resource perms etc.
return (True, None)
# Fail if object exists already
try:
obj_uuid = self._db_conn.fq_name_to_uuid(
obj_type, obj_dict['fq_name'])
raise cfgm_common.exceptions.HttpError(
409, '' + pformat(obj_dict['fq_name']) +
' already exists with uuid: ' + obj_uuid)
except NoIdError:
pass
self.validate_parent_type(obj_type, obj_dict)
# Ensure object has at least default permissions set
self._ensure_id_perms_present(None, obj_dict)
self._ensure_perms2_present(obj_type, None, obj_dict,
get_request().headers.environ.get('HTTP_X_PROJECT_ID', None))
# TODO check api + resource perms etc.
uuid_in_req = obj_dict.get('uuid', None)
# Set the display name
if (('display_name' not in obj_dict) or
(obj_dict['display_name'] is None)):
obj_dict['display_name'] = obj_dict['fq_name'][-1]
fq_name_str = ":".join(obj_dict['fq_name'])
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name=fq_name_str
apiConfig.identifier_uuid = uuid_in_req
apiConfig.operation = 'post'
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
if uuid_in_req:
if uuid_in_req != str(uuid.UUID(uuid_in_req)):
bottle.abort(400, 'Invalid UUID format: ' + uuid_in_req)
try:
fq_name = self._db_conn.uuid_to_fq_name(uuid_in_req)
raise cfgm_common.exceptions.HttpError(
409, uuid_in_req + ' already exists with fq_name: ' +
pformat(fq_name))
except NoIdError:
pass
apiConfig.identifier_uuid = uuid_in_req
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
return (True, uuid_in_req)
# end _post_common
def reset(self):
# cleanup internal state/in-flight operations
if self._amqp_client is not None:
self._amqp_client.stop()
if self._db_conn:
self._db_conn.reset()
# end reset
# allocate block of IP addresses from VN. Subnet info expected in request
# body
def vn_ip_alloc_http_post(self, id):
try:
vn_fq_name = self._db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Virtual Network ' + id + ' not found!')
# expected format {"subnet_list" : "2.1.1.0/24", "count" : 4}
req_dict = get_request().json
count = req_dict.get('count', 1)
subnet = req_dict.get('subnet')
family = req_dict.get('family')
try:
result = self.get_resource_class('virtual_network').ip_alloc(
vn_fq_name, subnet, count, family)
except vnc_addr_mgmt.AddrMgmtSubnetUndefined as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except vnc_addr_mgmt.AddrMgmtSubnetExhausted as e:
raise cfgm_common.exceptions.HttpError(409, str(e))
return result
# end vn_ip_alloc_http_post
# free block of ip addresses to subnet
def vn_ip_free_http_post(self, id):
try:
vn_fq_name = self._db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Virtual Network ' + id + ' not found!')
"""
{
"subnet" : "2.1.1.0/24",
"ip_addr": [ "2.1.1.239", "2.1.1.238", "2.1.1.237", "2.1.1.236" ]
}
"""
req_dict = get_request().json
ip_list = req_dict['ip_addr'] if 'ip_addr' in req_dict else []
result = self.get_resource_class('virtual_network').ip_free(
vn_fq_name, ip_list)
return result
# end vn_ip_free_http_post
# return no. of IP addresses from VN/Subnet
def vn_subnet_ip_count_http_post(self, id):
try:
vn_fq_name = self._db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Virtual Network ' + id + ' not found!')
# expected format {"subnet_list" : ["2.1.1.0/24", "1.1.1.0/24"]
req_dict = get_request().json
try:
(ok, result) = self._db_conn.dbe_read('virtual_network', id)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except Exception as e:
ok = False
result = cfgm_common.utils.detailed_traceback()
self.config_log(result, level=SandeshLevel.SYS_ERR)
if not ok:
raise cfgm_common.exceptions.HttpError(
500, traceback_err_msg(result))
obj_dict = result
subnet_list = req_dict[
'subnet_list'] if 'subnet_list' in req_dict else []
result = self.get_resource_class('virtual_network').subnet_ip_count(
vn_fq_name, subnet_list)
return result
# end vn_subnet_ip_count_http_post
# check if token validatation needed
def is_auth_needed(self):
return self.aaa_mode != 'no-auth'
def is_rbac_enabled(self):
return self.aaa_mode == 'rbac'
@property
def aaa_mode(self):
return self._args.aaa_mode
@aaa_mode.setter
def aaa_mode(self, mode):
self._args.aaa_mode = mode
# indication if multi tenancy with rbac is enabled or disabled
def aaa_mode_http_get(self):
return {'aaa-mode': self.aaa_mode}
def aaa_mode_http_put(self):
aaa_mode = get_request().json['aaa-mode']
if aaa_mode not in AAA_MODE_VALID_VALUES:
raise ValueError('Invalid aaa-mode %s' % aaa_mode)
ok, result = self._auth_svc.validate_user_token()
if not ok:
code, msg = result
self.config_object_error(None, None, None, 'aaa_mode_http_put',
msg)
raise cfgm_common.exceptions.HttpError(code, msg)
if not self.is_admin_request():
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
self.aaa_mode = aaa_mode
if self.is_rbac_enabled():
self._create_default_rbac_rule()
return {'aaa-mode': self.aaa_mode}
# end
@property
def cloud_admin_role(self):
return self._args.cloud_admin_role
@property
def global_read_only_role(self):
return self._args.global_read_only_role
def set_tag(self):
self._post_common(None, {})
req_dict = get_request().json
obj_type = req_dict.pop('obj_type', None)
obj_uuid = req_dict.pop('obj_uuid', None)
need_update = False
if obj_type is None or obj_uuid is None:
msg = "Object type and UUID must be specified"
raise cfgm_common.exceptions.HttpError(400, msg)
try:
r_class = self.get_resource_class(obj_type)
except TypeError as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
try:
ok, result = self._db_conn.dbe_read(
r_class.object_type,
obj_uuid,
obj_fields=['parent_type', 'perms2', 'tag_refs'],
)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
if not ok:
raise cfgm_common.exceptions.HttpError(*result)
obj_dict = result
def _locate_tag(type, value, is_global=False):
name = type + "=" + value
# unless global, inherit project id from caller
if is_global:
fq_name = [name]
else:
fq_name = copy.deepcopy(obj_dict['fq_name'])
if obj_type == 'project':
fq_name.append(name)
elif ('parent_type' in obj_dict and
obj_dict['parent_type'] == 'project'):
fq_name[-1] = name
elif ('perms2' in obj_dict and
is_uuid_like(obj_dict['perms2']['owner'])):
parent_uuid = str(uuid.UUID(obj_dict['perms2']['owner']))
try:
fq_name = self._db_conn.uuid_to_fq_name(parent_uuid)
except NoIdError:
msg = ("Cannot find %s %s owner" %
(obj_type, obj_dict['uuid']))
raise cfgm_common.exceptions.HttpError(404, msg)
fq_name.append(name)
else:
msg = ("Not able to determine the scope of the tag '%s'" %
name)
raise cfgm_common.exceptions.HttpError(404, msg)
# lookup (validate) tag
try:
tag_uuid = self._db_conn.fq_name_to_uuid('tag', fq_name)
except NoIdError:
msg = "Tag with FQName %s not found" % pformat(fq_name)
raise cfgm_common.exceptions.HttpError(404, msg)
return fq_name, tag_uuid
refs_per_type = {}
for ref in obj_dict.get('tag_refs', []):
ref_type = ref['to'][-1].partition('=')[0]
refs_per_type.setdefault(ref_type, []).append(ref)
for tag_type, attrs in list(req_dict.items()):
tag_type = tag_type.lower()
# If the body of a Tag type is None, all references to that Tag
# type are remove on the resource
if attrs is None:
for ref in refs_per_type.get(tag_type, []):
need_update = True
obj_dict['tag_refs'].remove(ref)
refs_per_type[tag_type] = []
continue
# Else get defined values and update Tag references on the resource
is_global = attrs.get('is_global', False)
value = attrs.get('value')
add_values = set(attrs.get('add_values', []))
delete_values = set(attrs.get('delete_values', []))
# Tag type is unique per object, unless
# TAG_TYPE_NOT_UNIQUE_PER_OBJECT type
if tag_type not in TAG_TYPE_NOT_UNIQUE_PER_OBJECT:
if add_values or delete_values:
msg = ("Tag type %s cannot be set multiple times on a "
"same object." % tag_type)
raise cfgm_common.exceptions.HttpError(400, msg)
# address-group object can only be associated with label
if (obj_type == 'address_group' and
tag_type not in TAG_TYPE_AUTHORIZED_ON_ADDRESS_GROUP):
msg = ("Invalid tag type %s for object type %s" %
(tag_type, obj_type))
raise cfgm_common.exceptions.HttpError(400, msg)
refs_per_values = {}
if tag_type in refs_per_type:
refs_per_values = {ref['to'][-1].partition('=')[2]: ref for ref
in refs_per_type[tag_type]}
if tag_type not in TAG_TYPE_NOT_UNIQUE_PER_OBJECT:
if value is None or isinstance(value, list):
msg = "No valid value provided for tag type %s" % tag_type
raise cfgm_common.exceptions.HttpError(400, msg)
# don't need to update if tag type with same value already
# referenced
if value in refs_per_values:
continue
for ref in list(refs_per_values.values()):
need_update = True
# object already have a reference to that tag type with a
# different value, remove it
obj_dict['tag_refs'].remove(ref)
# finally, reference the tag type with the new value
tag_fq_name, tag_uuid = _locate_tag(tag_type, value, is_global)
obj_dict.setdefault('tag_refs', []).append({
'uuid': tag_uuid,
'to': tag_fq_name,
'attr': None,
})
need_update = True
else:
# Add 'value' attribut to 'add_values' list if not null
if value is not None:
add_values.add(value)
for add_value in add_values - set(refs_per_values.keys()):
need_update = True
tag_fq_name, tag_uuid = _locate_tag(tag_type, add_value,
is_global)
obj_dict.setdefault('tag_refs', []).append({
'uuid': tag_uuid,
'to': tag_fq_name,
'attr': None,
})
for del_value in delete_values & set(refs_per_values.keys()):
need_update = True
obj_dict['tag_refs'].remove(refs_per_values[del_value])
if need_update:
self.internal_request_update(obj_type, obj_uuid, obj_dict)
return {}
def security_policy_draft(self):
self._post_common(None, {})
req_dict = get_request().json
scope_uuid = req_dict.pop('scope_uuid')
action = req_dict.pop('action')
pm_class = self.get_resource_class('policy-management')
try:
scope_type = self._db_conn.uuid_to_obj_type(scope_uuid)
except NoIdError as e:
msg = ("Cannot find scope where pending security resource are "
"own: %s" % str(e))
scope_class = self.get_resource_class(scope_type)
scope_fq_name = self._db_conn.uuid_to_fq_name(scope_uuid)
pm_fq_name = [POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT]
if (scope_type == GlobalSystemConfig.object_type and
scope_fq_name == GlobalSystemConfig().fq_name):
parent_type = PolicyManagement.resource_type
parent_fq_name = PolicyManagement().fq_name
parent_uuid = self._global_pm_uuid
else:
pm_fq_name = scope_fq_name + pm_fq_name
parent_type = scope_class.resource_type
parent_fq_name = scope_fq_name
parent_uuid = scope_uuid
ok, result = pm_class.locate(
fq_name=pm_fq_name,
create_it=False,
fields=['%ss' % type for type in SECURITY_OBJECT_TYPES],
)
if not ok and result[0] == 404:
# Draft dedicated policy management does not exists, the draft mode
# is not enabled on the scope
msg = ("Security draft mode is not enabled on the %s %s (%s)" %
(scope_type.replace('_', ' ').title(), scope_fq_name,
scope_uuid))
raise cfgm_common.exceptions.HttpError(400, msg)
if not ok:
raise cfgm_common.exceptions.HttpError(result[0], result[1])
pm = result
scope_lock = self._db_conn._zk_db._zk_client.write_lock(
'%s/%s/%s' % (
self.security_lock_prefix, scope_type,
':'.join(scope_fq_name)
),
'api-server-%s %s' % (socket.getfqdn(self._args.listen_ip_addr), action),
)
try:
acquired_lock = scope_lock.acquire(timeout=1)
except LockTimeout:
acquired_lock = False
if acquired_lock:
try:
if action == 'commit':
self._security_commit_resources(scope_type, parent_type,
parent_fq_name,
parent_uuid, pm)
elif action == 'discard':
self._security_discard_resources(pm)
else:
msg = "Only 'commit' or 'discard' actions are supported"
raise cfgm_common.exceptions.HttpError(400, msg)
finally:
scope_lock.release()
else:
contenders = scope_lock.contenders()
action_in_progress = '<unknown action>'
if len(contenders) > 0 and contenders[0]:
_, _, action_in_progress = contenders[0].partition(' ')
msg = ("Security resource modifications or commit/discard action "
"on %s '%s' (%s) scope is under progress. Try again later."
% (scope_type.replace('_', ' ').title(),
':'.join(scope_fq_name), scope_uuid))
raise cfgm_common.exceptions.HttpError(400, msg)
# TODO(ethuleau): we could return some stats or type/uuid resources
# actions which were done during commit or discard?
return {}
def _security_commit_resources(self, scope_type, parent_type,
parent_fq_name, parent_uuid, pm):
updates = []
deletes = []
held_refs = []
for type_name in SECURITY_OBJECT_TYPES:
r_class = self.get_resource_class(type_name)
for child in pm.get('%ss' % r_class.object_type, []):
ok, result = r_class.locate(child['to'], child['uuid'],
create_it=False)
if not ok:
continue
draft = result
fq_name = parent_fq_name + [child['to'][-1]]
try:
uuid = self._db_conn.fq_name_to_uuid(r_class.object_type,
fq_name)
except NoIdError:
# No original version found, new resource created
uuid = None
self._holding_backrefs(updates, held_refs, scope_type,
r_class.object_type, fq_name, draft)
# Purge pending resource as we re-use the same UUID
self.internal_request_delete(r_class.object_type,
child['uuid'])
if uuid and draft['draft_mode_state'] == 'deleted':
# The resource is removed, we can purge original resource
deletes.append((r_class.object_type, uuid))
elif uuid and draft['draft_mode_state'] == 'updated':
# Update orginal resource with pending resource
draft.pop('fq_name', None)
draft.pop('uuid', None)
draft.pop('draft_mode_state', None)
if 'id_perms' in draft:
draft['id_perms'].pop('uuid', None)
draft['parent_type'] = parent_type
draft['parent_uuid'] = parent_uuid
# if a ref type was purge when the draft mode is enabled,
# set the ref to an empty list to ensure all refs will be
# removed when resource will be updated/committed
for ref_type in r_class.ref_fields:
if ref_type not in draft:
draft[ref_type] = []
self._update_fq_name_security_refs(
parent_fq_name, pm['fq_name'], type_name, draft)
updates.append(('update', (r_class.resource_type, uuid,
copy.deepcopy(draft))))
elif not uuid and draft['draft_mode_state'] == 'created':
# Create new resource with pending values (re-use UUID)
draft.pop('id_perms', None)
draft.pop('perms2', None)
draft.pop('draft_mode_state', None)
draft['fq_name'] = fq_name
draft['parent_type'] = parent_type
draft['parent_uuid'] = parent_uuid
self._update_fq_name_security_refs(
parent_fq_name, pm['fq_name'], type_name, draft)
updates.append(('create', (r_class.resource_type,
copy.deepcopy(draft))))
else:
msg = (
"Try to commit a security resource %s (%s) with "
"invalid state '%s'. Ignore it." %
(':'.join(draft.get('fq_name', ['FQ name unknown'])),
draft.get('uuid', 'UUID unknown'),
draft.get('draft_mode_state', 'No draft mode state'))
)
self.config_log(msg, level=SandeshLevel.SYS_WARN)
# Need to create/update leaf resources first as they could be
# referenced by another create/updated resource (e.g.: FP -> FP)
updates.reverse() # order is: AG, SG, FR, FP and APS
for action, args in updates:
getattr(self, 'internal_request_%s' % action)(*args)
# Postpone delete to be sure deleted resource not anymore
# referenced and delete resource with ref before resource with backref
for args in deletes: # order is: APS, FP, FR, SG and AG
self.internal_request_delete(*args)
for args, kwargs in held_refs:
self.internal_request_ref_update(*args, **kwargs)
@staticmethod
def _update_fq_name_security_refs(parent_fq_name, pm_fq_name, res_type,
draft):
for ref_type in SECURITY_OBJECT_TYPES:
for ref in draft.get('%s_refs' % ref_type, []):
if ref['to'][:-1] == pm_fq_name:
ref['to'] = parent_fq_name + [ref['to'][-1]]
if res_type == 'firewall_rule':
for ep in [draft.get('endpoint_1', {}),
draft.get('endpoint_2', {})]:
ag_fq_name = ep.get('address_group', [])
if ag_fq_name and ag_fq_name.split(':')[:-1] == pm_fq_name:
ep['address_group'] = ':'.join(parent_fq_name + [
ag_fq_name.split(':')[-1]])
def _holding_backrefs(self, updates, held_refs, scope_type, obj_type,
fq_name, obj_dict):
backref_fields = {'%s_back_refs' % t for t in SECURITY_OBJECT_TYPES}
if (scope_type == GlobalSystemConfig().object_type and
obj_dict['draft_mode_state'] != 'deleted'):
for backref_field in set(obj_dict.keys()) & backref_fields:
backref_type = backref_field[:-10]
for backref in copy.deepcopy(obj_dict.get(backref_field, [])):
# if it's a backref to global resource let it
if backref['to'][0] in [PolicyManagement().name,
POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT]:
continue
self.internal_request_ref_update(
backref_type,
backref['uuid'],
'DELETE',
obj_type,
ref_uuid=obj_dict['uuid'],
)
if obj_type == AddressGroup.object_type:
# Is not allowed to directly create Address Group
# reference to a Firewall Rule, use its endpoints
# address-group property
backref_class = self.get_resource_class(backref_type)
ok, result = backref_class.locate(
backref['to'],
backref['uuid'],
create_it=False,
fields=['endpoint_1', 'endpoint_2'])
if not ok:
msg = ("Cannot read Firewall Rule %s (%s)" %
(backref['to'], backref['uuid']))
raise cfgm_common.exceptions.HttpError(400, msg)
fr = result
for ep_type in ['endpoint_1', 'endpoint_2']:
if (ep_type in fr and
fr[ep_type].get('address_group', '') ==\
':'.join(obj_dict['fq_name'])):
ept = FirewallRuleEndpointType(
address_group=':'.join(fq_name))
updates.append(
('update',
(FirewallRule.resource_type, fr['uuid'],
{ep_type: vars(ept)})))
else:
held_refs.append(
((backref_type, backref['uuid'], 'ADD', obj_type),
{
'ref_fq_name': fq_name,
'attr': backref.get('attr')
}
)
)
obj_dict[backref_field].remove(backref)
def _security_discard_resources(self, pm):
for type_name in SECURITY_OBJECT_TYPES:
r_class = self.get_resource_class(type_name)
for child in pm.get('%ss' % r_class.object_type, []):
self.internal_request_delete(r_class.object_type,
child['uuid'])
# end VncApiServer
def main(args_str=None, server=None):
vnc_api_server = server
pipe_start_app = vnc_api_server.get_pipe_start_app()
server_ip = vnc_api_server.get_listen_ip()
server_port = vnc_api_server.get_server_port()
enable_ssl = vnc_api_server.get_enable_ssl()
if enable_ssl:
certfile=vnc_api_server.get_certfile()
keyfile=vnc_api_server.get_keyfile()
ca_cert=vnc_api_server.get_ca_cert()
""" @sigchld
Disable handling of SIG_CHLD for now as every keystone request to validate
token sends SIG_CHLD signal to API server.
"""
#hub.signal(signal.SIGCHLD, vnc_api_server.sigchld_handler)
hub.signal(signal.SIGTERM, vnc_api_server.sigterm_handler)
hub.signal(signal.SIGHUP, vnc_api_server.sighup_handler)
if pipe_start_app is None:
pipe_start_app = vnc_api_server.api_bottle
try:
vnc_args = vnc_api_server.get_args()
if enable_ssl:
if not (certfile and keyfile and ca_cert):
msg = "SSL is enabled but one or more of these options " \
"config_api_ssl_keyfile, config_api_ssl_certfile, " \
"config_api_ssl_ca_cert not specified"
raise cfgm_common.exceptions.VncError(msg)
bottle.run(app=pipe_start_app, host=server_ip, port=server_port,
ca_certs=ca_cert, keyfile=keyfile, certfile=certfile,
server=get_bottle_server(server._args.max_requests,
tcp_keepalive_interval=vnc_args.tcp_keepalive_interval,
tcp_keepalive_idle_time=vnc_args.tcp_keepalive_idle_time,
tcp_keepalive_probes=vnc_args.tcp_keepalive_probes),
ssl_version=ssl.PROTOCOL_TLSv1_2)
else:
bottle.run(app=pipe_start_app, host=server_ip, port=server_port,
server=get_bottle_server(server._args.max_requests,
tcp_keepalive_interval=vnc_args.tcp_keepalive_interval,
tcp_keepalive_idle_time=vnc_args.tcp_keepalive_idle_time,
tcp_keepalive_probes=vnc_args.tcp_keepalive_probes))
except KeyboardInterrupt:
# quietly handle Ctrl-C
pass
finally:
# always cleanup gracefully
vnc_api_server.reset()
# end main
def server_main(args_str=None):
vnc_cgitb.enable(format='text')
main(args_str, VncApiServer(args_str))
#server_main
if __name__ == "__main__":
server_main()
| []
| []
| [
"CONTRAIL_VERSION"
]
| [] | ["CONTRAIL_VERSION"] | python | 1 | 0 | |
vendor/golang.org/x/sys/unix/mksyscall.go | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
This program reads a file containing function prototypes
(like syscall_darwin.go) and generates system call bodies.
The prototypes are marked by lines beginning with "//sys"
and read like func declarations if //sys is replaced by func, but:
* The parameter lists must give a name for each argument.
This includes return parameters.
* The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
* If the return parameter is an error number, it must be named errno.
A line beginning with //sysnb is like //sys, except that the
goroutine will not be suspended during the execution of the system
call. This must only be used for system calls which can never
block, as otherwise the system call could cause all goroutines to
hang.
*/
package main
import (
"bufio"
"flag"
"fmt"
"os"
"regexp"
"strings"
)
var (
b32 = flag.Bool("b32", false, "32bit big-endian")
l32 = flag.Bool("l32", false, "32bit little-endian")
plan9 = flag.Bool("plan9", false, "plan9")
openbsd = flag.Bool("openbsd", false, "openbsd")
netbsd = flag.Bool("netbsd", false, "netbsd")
dragonfly = flag.Bool("dragonfly", false, "dragonfly")
arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair
tags = flag.String("tags", "", "build tags")
filename = flag.String("output", "", "output file name (standard output if omitted)")
)
// cmdLine returns this programs's commandline arguments
func cmdLine() string {
return "go run mksyscall.go " + strings.Join(os.Args[1:], " ")
}
// buildTags returns build tags
func buildTags() string {
return *tags
}
// Param is function parameter
type Param struct {
Name string
Type string
}
// usage prints the program usage
func usage() {
fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n")
os.Exit(1)
}
// parseParamList parses parameter list and returns a slice of parameters
func parseParamList(list string) []string {
list = strings.TrimSpace(list)
if list == "" {
return []string{}
}
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
}
// parseParam splits a parameter into name and type
func parseParam(p string) Param {
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
if ps == nil {
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
os.Exit(1)
}
return Param{ps[1], ps[2]}
}
func main() {
goos := os.Getenv("GOOS_TARGET")
if goos == "" {
goos = os.Getenv("GOOS")
}
if goos == "" {
fmt.Fprintln(os.Stderr, "GOOS not defined in environment")
os.Exit(1)
}
// Check that we are using the Docker-based build system if we should
if goos == "linux" {
if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n")
fmt.Fprintf(os.Stderr, "See README.md\n")
os.Exit(1)
}
}
flag.Usage = usage
flag.Parse()
if len(flag.Args()) <= 0 {
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
usage()
}
endianness := ""
if *b32 {
endianness = "big-endian"
} else if *l32 {
endianness = "little-endian"
}
libc := false
if goos == "darwin" {
libc = true
}
trampolines := map[string]bool{}
text := ""
for _, path := range flag.Args() {
file, err := os.Open(path)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
s := bufio.NewScanner(file)
for s.Scan() {
t := s.Text()
t = strings.TrimSpace(t)
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
continue
}
// Line must be of the form
// func Open(path string, mode int, perm int) (fd int, errno error)
// Split into name, in params, out params.
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t)
if f == nil {
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
os.Exit(1)
}
funct, inps, outps, sysname := f[2], f[3], f[4], f[5]
// Split argument lists on comma.
in := parseParamList(inps)
out := parseParamList(outps)
// Try in vain to keep people from editing this file.
// The theory is that they jump into the middle of the file
// without reading the header.
text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
// Go function header.
outDecl := ""
if len(out) > 0 {
outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", "))
}
text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl)
// Check if err return available
errvar := ""
for _, param := range out {
p := parseParam(param)
if p.Type == "error" {
errvar = p.Name
break
}
}
// Prepare arguments to Syscall.
var args []string
n := 0
for _, param := range in {
p := parseParam(param)
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
} else if p.Type == "string" && errvar != "" {
text += fmt.Sprintf("\tvar _p%d *byte\n", n)
text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name)
text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
n++
} else if p.Type == "string" {
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
text += fmt.Sprintf("\tvar _p%d *byte\n", n)
text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name)
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
n++
} else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
// Convert slice into pointer, length.
// Have to be careful not to take address of &a[0] if len == 0:
// pass dummy pointer in that case.
// Used to pass nil, but some OSes or simulators reject write(fd, nil, 0).
text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n)
text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name)
text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n)
args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
n++
} else if p.Type == "int64" && (*openbsd || *netbsd) {
args = append(args, "0")
if endianness == "big-endian" {
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
} else if endianness == "little-endian" {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
}
} else if p.Type == "int64" && *dragonfly {
if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil {
args = append(args, "0")
}
if endianness == "big-endian" {
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
} else if endianness == "little-endian" {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
}
} else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" {
if len(args)%2 == 1 && *arm {
// arm abi specifies 64-bit argument uses
// (even, odd) pair
args = append(args, "0")
}
if endianness == "big-endian" {
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
}
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
}
}
// Determine which form to use; pad args with zeros.
asm := "Syscall"
if nonblock != nil {
if errvar == "" && goos == "linux" {
asm = "RawSyscallNoError"
} else {
asm = "RawSyscall"
}
} else {
if errvar == "" && goos == "linux" {
asm = "SyscallNoError"
}
}
if len(args) <= 3 {
for len(args) < 3 {
args = append(args, "0")
}
} else if len(args) <= 6 {
asm += "6"
for len(args) < 6 {
args = append(args, "0")
}
} else if len(args) <= 9 {
asm += "9"
for len(args) < 9 {
args = append(args, "0")
}
} else {
fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct)
}
// System call number.
if sysname == "" {
sysname = "SYS_" + funct
sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
sysname = strings.ToUpper(sysname)
}
var libcFn string
if libc {
asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call
sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_
sysname = strings.ToLower(sysname) // lowercase
libcFn = sysname
sysname = "funcPC(libc_" + sysname + "_trampoline)"
}
// Actual call.
arglist := strings.Join(args, ", ")
call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist)
// Assign return values.
body := ""
ret := []string{"_", "_", "_"}
doErrno := false
for i := 0; i < len(out); i++ {
p := parseParam(out[i])
reg := ""
if p.Name == "err" && !*plan9 {
reg = "e1"
ret[2] = reg
doErrno = true
} else if p.Name == "err" && *plan9 {
ret[0] = "r0"
ret[2] = "e1"
break
} else {
reg = fmt.Sprintf("r%d", i)
ret[i] = reg
}
if p.Type == "bool" {
reg = fmt.Sprintf("%s != 0", reg)
}
if p.Type == "int64" && endianness != "" {
// 64-bit number in r1:r0 or r0:r1.
if i+2 > len(out) {
fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct)
}
if endianness == "big-endian" {
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
} else {
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
}
ret[i] = fmt.Sprintf("r%d", i)
ret[i+1] = fmt.Sprintf("r%d", i+1)
}
if reg != "e1" || *plan9 {
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
}
}
if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
text += fmt.Sprintf("\t%s\n", call)
} else {
if errvar == "" && goos == "linux" {
// raw syscall without error on Linux, see golang.org/issue/22924
text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call)
} else {
text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
}
}
text += body
if *plan9 && ret[2] == "e1" {
text += "\tif int32(r0) == -1 {\n"
text += "\t\terr = e1\n"
text += "\t}\n"
} else if doErrno {
text += "\tif e1 != 0 {\n"
text += "\t\terr = errnoErr(e1)\n"
text += "\t}\n"
}
text += "\treturn\n"
text += "}\n\n"
if libc && !trampolines[libcFn] {
// some system calls share a trampoline, like read and readlen.
trampolines[libcFn] = true
// Declare assembly trampoline.
text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn)
// Assembly trampoline calls the libc_* function, which this magic
// redirects to use the function from libSystem.
text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn)
text += "\n"
}
}
if err := s.Err(); err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
file.Close()
}
fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
}
const srcTemplate = `// %s
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s
package unix
import (
"syscall"
"unsafe"
)
var _ syscall.Errno
%s
`
| [
"\"GOOS_TARGET\"",
"\"GOOS\"",
"\"GOLANG_SYS_BUILD\""
]
| []
| [
"GOLANG_SYS_BUILD",
"GOOS",
"GOOS_TARGET"
]
| [] | ["GOLANG_SYS_BUILD", "GOOS", "GOOS_TARGET"] | go | 3 | 0 | |
improver/cli/between_thresholds.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Script to calculate probabilities of occurrence between thresholds."""
from improver import cli
@cli.clizefy
@cli.with_output
def process(
cube: cli.inputcube, *, threshold_ranges: cli.inputjson, threshold_units=None
):
"""
Calculate the probabilities of occurrence between thresholds
Args:
cube (iris.cube.Cube):
Cube containing input probabilities above or below threshold
threshold_ranges (list):
List of 2-item iterables specifying thresholds between which
probabilities should be calculated
threshold_units (str):
Units in which the thresholds are specified. If None, defaults
to the units of the threshold coordinate on the input cube.
Returns:
iris.cube.Cube:
Cube containing probability of occurrences between the thresholds
specified
"""
from improver.between_thresholds import OccurrenceBetweenThresholds
from improver.metadata.probabilistic import find_threshold_coordinate
if threshold_units is None:
threshold_units = str(find_threshold_coordinate(cube).units)
plugin = OccurrenceBetweenThresholds(threshold_ranges, threshold_units)
return plugin(cube)
| []
| []
| []
| [] | [] | python | null | null | null |
doc.go | /*
Package lokalise provides functions to access the Lokalise web API.
Usage:
import "github.com/lokalise/go-lokalise-api/v2" // with go modules enabled (GO111MODULE=on or outside GOPATH)
import "github.com/lokalise/go-lokalise-api" // with go modules disabled
Initializing the client
token := os.Getenv("lokalise_token")
client, err := lokalise.New(token)
General options
You can set global API parameters with the ClientOption functions during the initialization. The following functions are available:
* WithBaseURL
* WithRetryCount
* WithRetryTimeout
* WithConnectionTimeout
* WithDebug
* WithPageLimit
Usage:
Api, err := lokalise.New(
"token-string",
lokalise.WithDebug(true),
...
)
Objects and models
Individual objects are represented as instances of according structs. Different objects are used for creating and updating in most cases.
Here are some object types:
* Create/Update request objects, i.e. NewKey, NewContributor etc
* Response objects: single/multiple, i.e. KeyResponse/KeysResponse and special , i.e. DeleteKeyResponse. There is no separate ErrorResponse - errors are encapsulated into concrete method's response.
* List options that are used for sending certain options and pagination, i.e. KeyListOptions.
Request options and pagination
Some resources, such as Projects, Keys, Files, Tasks, Screenshots, Translations have optional parameters for List method (Keys also have an option for Retrieve). These parameters should be set before calling.
All request options could be set inline and separately:
// separately:
keys := client.Keys()
keys.SetListOptions(lokalise.KeyListOptions{
IncludeTranslations: 1,
IncludeComments: 1,
})
resp, err := keys.List("{PROJECT_ID}")
// inline:
client.Keys().WithListOptions(lokalise.KeyListOptions{Limit: 3}).List("{PROJECT_ID}")
There are two parameters, used for pagination: Limit and Page.
t := Api.Teams()
t.SetPageOptions(lokalise.PageOptions{
Page: 2,
Limit: 10,
})
resp, err := t.List()
*/
package lokalise
| [
"\"lokalise_token\""
]
| []
| [
"lokalise_token"
]
| [] | ["lokalise_token"] | go | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.