filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
teku/src/main/java/tech/pegasys/teku/Teku.java | /*
* Copyright ConsenSys Software Inc., 2022
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package tech.pegasys.teku;
import java.io.PrintWriter;
import java.nio.charset.Charset;
import java.security.Security;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicReference;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
import tech.pegasys.teku.bls.impl.blst.BlstLoader;
import tech.pegasys.teku.cli.BeaconNodeCommand;
import tech.pegasys.teku.cli.BeaconNodeCommand.StartAction;
import tech.pegasys.teku.config.TekuConfiguration;
import tech.pegasys.teku.infrastructure.logging.LoggingConfigurator;
public final class Teku {
static {
// Disable libsodium in tuweni Hash because the check for it's presence can be very slow.
System.setProperty("org.apache.tuweni.crypto.useSodium", "false");
Security.addProvider(new BouncyCastleProvider());
}
public static void main(String[] args) {
Thread.setDefaultUncaughtExceptionHandler(new TekuDefaultExceptionHandler());
try {
Optional<Node> maybeNode = Teku.startFromCLIArgs(args);
maybeNode.ifPresent(
node ->
// Detect SIGTERM
Runtime.getRuntime()
.addShutdownHook(
new Thread(
() -> {
System.out.println("Teku is shutting down");
node.stop();
})));
} catch (CLIException e) {
System.exit(e.getResultCode());
}
}
private static int start(StartAction startAction, final String... args) {
final PrintWriter outputWriter = new PrintWriter(System.out, true, Charset.defaultCharset());
final PrintWriter errorWriter = new PrintWriter(System.err, true, Charset.defaultCharset());
final LoggingConfigurator loggingConfigurator = new LoggingConfigurator();
return new BeaconNodeCommand(
outputWriter, errorWriter, System.getenv(), startAction, loggingConfigurator)
.parse(args);
}
private static Node start(final TekuConfiguration config, final boolean validatorOnly) {
final Node node;
if (validatorOnly) {
node = new ValidatorNode(config);
} else {
node = new BeaconNode(config);
}
// Check that BLS is available before starting to ensure we get a nice error message if it's not
if (BlstLoader.INSTANCE.isEmpty()) {
throw new UnsupportedOperationException("BLS native library unavailable for this platform");
}
node.start();
return node;
}
static Optional<Node> startFromCLIArgs(String[] cliArgs) throws CLIException {
AtomicReference<Node> nodeRef = new AtomicReference<>();
int result =
start((config, validatorClient) -> nodeRef.set(start(config, validatorClient)), cliArgs);
if (result != 0) {
throw new CLIException(result);
}
return Optional.ofNullable(nodeRef.get());
}
static BeaconNode startBeaconNode(TekuConfiguration config) {
return (BeaconNode) start(config, false);
}
static ValidatorNode startValidatorNode(TekuConfiguration config) {
return (ValidatorNode) start(config, true);
}
private static class CLIException extends RuntimeException {
private final int resultCode;
public CLIException(int resultCode) {
super("Unable to start Teku. Exit code: " + resultCode);
this.resultCode = resultCode;
}
public int getResultCode() {
return resultCode;
}
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
app/core/web_sockets/send_massage.py | import logging
import os
import sys
import django
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
""" for local usage -> """
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path = os.path.expanduser(BASE_DIR)
if path not in sys.path:
sys.path.insert(0, path)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
django.setup()
""" <- for local usage """
logger = logging.getLogger(__name__)
class SendMassageWS:
@classmethod
def send_ws_msg(cls,
chat_name: str,
title: str,
msg: str) -> None:
try:
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
chat_name,
{
'type': 'chat.message',
'title': title,
'message': msg,
}
)
except Exception as ex:
logger.error(f"send_msg(): {ex}")
return None
if __name__ == "__main__":
SendMassageWS.send_ws_msg(
chat_name='lobby',
title='hello',
msg='world'
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
deepface/commons/functions.py | import os
import numpy as np
import pandas as pd
import cv2
import base64
from pathlib import Path
from PIL import Image
import requests
from deepface.detectors import FaceDetector
import tensorflow as tf
tf_version = tf.__version__
tf_major_version = int(tf_version.split(".")[0])
tf_minor_version = int(tf_version.split(".")[1])
if tf_major_version == 1:
import keras
from keras.preprocessing.image import load_img, save_img, img_to_array
from keras.applications.imagenet_utils import preprocess_input
from keras.preprocessing import image
elif tf_major_version == 2:
from tensorflow import keras
from tensorflow.keras.preprocessing.image import load_img, save_img, img_to_array
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras.preprocessing import image
#--------------------------------------------------
def initialize_input(img1_path, img2_path = None):
if type(img1_path) == list:
bulkProcess = True
img_list = img1_path.copy()
else:
bulkProcess = False
if (
(type(img2_path) == str and img2_path != None) #exact image path, base64 image
or (isinstance(img2_path, np.ndarray) and img2_path.any()) #numpy array
):
img_list = [[img1_path, img2_path]]
else: #analyze function passes just img1_path
img_list = [img1_path]
return img_list, bulkProcess
def initialize_folder():
home = get_deepface_home()
if not os.path.exists(home+"/.deepface"):
os.makedirs(home+"/.deepface")
print("Directory ", home, "/.deepface created")
if not os.path.exists(home+"/.deepface/weights"):
os.makedirs(home+"/.deepface/weights")
print("Directory ", home, "/.deepface/weights created")
def get_deepface_home():
return str(os.getenv('DEEPFACE_HOME', default=Path.home()))
def loadBase64Img(uri):
encoded_data = uri.split(',')[1]
nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return img
def load_image(img):
exact_image = False; base64_img = False; url_img = False
if type(img).__module__ == np.__name__:
exact_image = True
elif len(img) > 11 and img[0:11] == "data:image/":
base64_img = True
elif len(img) > 11 and img.startswith("http"):
url_img = True
#---------------------------
if base64_img == True:
img = loadBase64Img(img)
elif url_img:
img = np.array(Image.open(requests.get(img, stream=True).raw))
elif exact_image != True: #image path passed as input
if os.path.isfile(img) != True:
raise ValueError("Confirm that ",img," exists")
img = cv2.imread(img)
return img
def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_detection = True, align = True):
img_region = [0, 0, img.shape[0], img.shape[1]]
#----------------------------------------------
#people would like to skip detection and alignment if they already have pre-processed images
if detector_backend == 'skip':
return img, img_region
#----------------------------------------------
#detector stored in a global variable in FaceDetector object.
#this call should be completed very fast because it will return found in memory
#it will not build face detector model in each call (consider for loops)
face_detector = FaceDetector.build_model(detector_backend)
try:
detected_face, img_region = FaceDetector.detect_face(face_detector, detector_backend, img, align)
except: #if detected face shape is (0, 0) and alignment cannot be performed, this block will be run
detected_face = None
if (isinstance(detected_face, np.ndarray)):
return detected_face, img_region
else:
if detected_face == None:
if enforce_detection != True:
return img, img_region
else:
raise ValueError("Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False.")
def normalize_input(img, normalization = 'base'):
#issue 131 declares that some normalization techniques improves the accuracy
if normalization == 'base':
return img
else:
#@trevorgribble and @davedgd contributed this feature
img *= 255 #restore input in scale of [0, 255] because it was normalized in scale of [0, 1] in preprocess_face
if normalization == 'raw':
pass #return just restored pixels
elif normalization == 'Facenet':
mean, std = img.mean(), img.std()
img = (img - mean) / std
elif(normalization=="Facenet2018"):
# simply / 127.5 - 1 (similar to facenet 2018 model preprocessing step as @iamrishab posted)
img /= 127.5
img -= 1
elif normalization == 'VGGFace':
# mean subtraction based on VGGFace1 training data
img[..., 0] -= 93.5940
img[..., 1] -= 104.7624
img[..., 2] -= 129.1863
elif(normalization == 'VGGFace2'):
# mean subtraction based on VGGFace2 training data
img[..., 0] -= 91.4953
img[..., 1] -= 103.8827
img[..., 2] -= 131.0912
elif(normalization == 'ArcFace'):
#Reference study: The faces are cropped and resized to 112×112,
#and each pixel (ranged between [0, 255]) in RGB images is normalised
#by subtracting 127.5 then divided by 128.
img -= 127.5
img /= 128
#-----------------------------
return img
def preprocess_face(img, target_size, grayscale = False, enforce_detection = True, detector_backend = 'opencv', return_region = False, align = True):
#img might be path, base64 or numpy array. Convert it to numpy whatever it is.
img = load_image(img)
# cv2.imshow(img)
# cv2.waitKey(0)
base_img = img.copy()
img, region = detect_face(img = img, detector_backend = detector_backend, grayscale = grayscale, enforce_detection = enforce_detection, align = align)
# cv2.imshow("img",img)
# cv2.waitKey(0)
#--------------------------
if img.shape[0] == 0 or img.shape[1] == 0:
if enforce_detection == True:
raise ValueErtarget_sizeror("Detected face shape is ", img.shape,". Consider to set enforce_detection argument to False.")
else: #restore base image
img = base_img.copy()
#--------------------------
#post-processing
if grayscale == True:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#---------------------------------------------------
#resize image to expected shape
# img = cv2.resize(img, target_size) #resize causes transformation on base image, adding black pixels to resize will not deform the base image
print(img.shape)
if img.shape[0] > 0 and img.shape[1] > 0:
factor_0 = target_size[0] / img.shape[0]
factor_1 = target_size[1] / img.shape[1]
factor = min(factor_0, factor_1)
dsize = (int(img.shape[1] * factor), int(img.shape[0] * factor))
img = cv2.resize(img, dsize)
# Then pad the other side to the target size by adding black pixels
diff_0 = target_size[0] - img.shape[0]
diff_1 = target_size[1] - img.shape[1]
if grayscale == False:
# Put the base image in the middle of the padded image
img = np.pad(img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2), (0, 0)), 'constant')
else:
img = np.pad(img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2)), 'constant')
#------------------------------------------
#double check: if target image is not still the same size with target.
if img.shape[0:2] != target_size:
img = cv2.resize(img, target_size)
#---------------------------------------------------
#normalizing the image pixels
img_pixels = image.img_to_array(img) #what this line doing? must?
img_pixels = np.expand_dims(img_pixels, axis = 0)
img_pixels /= 255 #normalize input in [0, 1]
#---------------------------------------------------
if return_region == True:
return img_pixels, region
else:
return img_pixels
import torch
# def preprocess_face_In(img, target_size, grayscale = False, enforce_detection = True, detector_backend = 'opencv', return_region = False, align = True):
# #img might be path, base64 or numpy array. Convert it to numpy whatever it is.
# img = load_image(img)
# base_img = img.copy()
# img, region = detect_face(img = img, detector_backend = detector_backend, grayscale = grayscale, enforce_detection = enforce_detection, align = align)
# #--------------------------
# print("img.shape[0]",img.shape[0])
# if img.shape[0] == 0 or img.shape[1] == 0:
# if enforce_detection == True:
# raise ValueErtarget_sizeror("Detected face shape is ", img.shape,". Consider to set enforce_detection argument to False.")
# else: #restore base image
# img = base_img.copy()
# #--------------------------
# #post-processing
# if grayscale == True:
# img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# #---------------------------------------------------
# #resize image to expected shape
# # img = cv2.resize(img, target_size) #resize causes transformation on base image, adding black pixels to resize will not deform the base image
# # cv2.imwrite("2.jpg",img)
# # cv2.imshow("img",img)
# # cv2.waitKey(0)
# # print(img.shape)
# if img.shape[0] > 0 and img.shape[1] > 0:
# factor_0 = target_size[0] / img.shape[0]
# factor_1 = target_size[1] / img.shape[1]
# factor = min(factor_0, factor_1)
# dsize = (int(img.shape[1] * factor), int(img.shape[0] * factor))
# img = cv2.resize(img, dsize)
# # Then pad the other side to the target size by adding black pixels
# diff_0 = target_size[0] - img.shape[0]
# diff_1 = target_size[1] - img.shape[1]
# if grayscale == False:
# # Put the base image in the middle of the padded image
# img = np.pad(img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2), (0, 0)), 'constant')
# else:
# img = np.pad(img, ((diff_0 // 2, diff_0 - diff_0 // 2), (diff_1 // 2, diff_1 - diff_1 // 2)), 'constant')
# #------------------------------------------
# #double check: if target image is not still the same size with target.
# if img.shape[0:2] != target_size:
# img = cv2.resize(img, target_size)
# #---------------------------------------------------
# # img_pixels = image.img_to_array(img) #what this line doing? must?
# # img_pixels = np.expand_dims(img_pixels, axis = 0)
# # img_pixels /= 255 #normalize input in [0, 1]
# #normalizing the image pixels
# img_pixels = image.img_to_array(img) #what this line doing? must?
# img_pixels = np.expand_dims(img_pixels, axis = 0)
# img_pixels /= 255
# # img_pixels = np.transpose(img_pixels, (0,3,1,2))
# # img_pixels = torch.from_numpy(img_pixels).float()
# # print("img_pixels",img_pixels.shape)
# # img_pixels = image.img_to_array(img) #what this line doing? must?
# # img_pixels /= 255 #normalize input in [0, 1]
# # img_pixels = image.img_to_array(img) #what this line doing? must?
# # img_pixels = np.expand_dims(img_pixels, axis = 0)
# # img_pixels /= 255 #normalize input in [0, 1]
# #---------------------------------------------------
# # cv2.imshow("img_pixels",img_pixels)
# # cv2.waitKey(0)
# if return_region == True:
# return img_pixels, region
# else:
# return img_pixels
def find_input_shape(model):
#face recognition models have different size of inputs
#my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.
input_shape = model.layers[0].input_shape
if type(input_shape) == list:
input_shape = input_shape[0][1:3]
else:
input_shape = input_shape[1:3]
#----------------------(img)
# cv2.waitKey(0)
#issue 289: it seems that tf 2.5 expects you to resize images with (x, y)
#whereas its older versions expect (y, x)
if tf_major_version == 2 and tf_minor_version >= 5:
x = input_shape[0]; y = input_shape[1]
input_shape = (y, x)
#----------------------
if type(input_shape) == list: #issue 197: some people got array here instead of tuple
input_shape = tuple(input_shape)
return input_shape
if __name__=="__main__":
img1 = preprocess_face("/home/quang/Documents/FACE/deepface/tests/dataset/img1.jpg", 240) | []
| []
| [
"DEEPFACE_HOME"
]
| [] | ["DEEPFACE_HOME"] | python | 1 | 0 | |
viper.go | // Copyright © 2014 Steve Francia <[email protected]>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// Viper is a application configuration system.
// It believes that applications can be configured a variety of ways
// via flags, ENVIRONMENT variables, configuration files retrieved
// from the file system, or a remote key/value store.
// Each item takes precedence over the item below it:
// overrides
// flag
// env
// config
// key/value store
// default
package viper
import (
"bytes"
"encoding/csv"
"encoding/json"
"fmt"
"io"
"log"
"os"
"path/filepath"
"reflect"
"strings"
"sync"
"time"
yaml "gopkg.in/yaml.v2"
"github.com/fsnotify/fsnotify"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/printer"
"github.com/magiconair/properties"
"github.com/mitchellh/mapstructure"
toml "github.com/pelletier/go-toml"
"github.com/spf13/afero"
"github.com/spf13/cast"
jww "github.com/spf13/jwalterweatherman"
"github.com/spf13/pflag"
)
// ConfigMarshalError happens when failing to marshal the configuration.
type ConfigMarshalError struct {
err error
}
// Error returns the formatted configuration error.
func (e ConfigMarshalError) Error() string {
return fmt.Sprintf("While marshaling config: %s", e.err.Error())
}
var v *Viper
type RemoteResponse struct {
Value []byte
Error error
}
func init() {
v = New()
}
type remoteConfigFactory interface {
Get(rp RemoteProvider) (io.Reader, error)
Watch(rp RemoteProvider) (io.Reader, error)
WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool)
}
// RemoteConfig is optional, see the remote package
var RemoteConfig remoteConfigFactory
// UnsupportedConfigError denotes encountering an unsupported
// configuration filetype.
type UnsupportedConfigError string
// Error returns the formatted configuration error.
func (str UnsupportedConfigError) Error() string {
return fmt.Sprintf("Unsupported Config Type %q", string(str))
}
// UnsupportedRemoteProviderError denotes encountering an unsupported remote
// provider. Currently only etcd and Consul are supported.
type UnsupportedRemoteProviderError string
// Error returns the formatted remote provider error.
func (str UnsupportedRemoteProviderError) Error() string {
return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str))
}
// RemoteConfigError denotes encountering an error while trying to
// pull the configuration from the remote provider.
type RemoteConfigError string
// Error returns the formatted remote provider error
func (rce RemoteConfigError) Error() string {
return fmt.Sprintf("Remote Configurations Error: %s", string(rce))
}
// ConfigFileNotFoundError denotes failing to find configuration file.
type ConfigFileNotFoundError struct {
name, locations string
}
// Error returns the formatted configuration error.
func (fnfe ConfigFileNotFoundError) Error() string {
return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations)
}
// A DecoderConfigOption can be passed to viper.Unmarshal to configure
// mapstructure.DecoderConfig options
type DecoderConfigOption func(*mapstructure.DecoderConfig)
// DecodeHook returns a DecoderConfigOption which overrides the default
// DecoderConfig.DecodeHook value, the default is:
//
// mapstructure.ComposeDecodeHookFunc(
// mapstructure.StringToTimeDurationHookFunc(),
// mapstructure.StringToSliceHookFunc(","),
// )
func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption {
return func(c *mapstructure.DecoderConfig) {
c.DecodeHook = hook
}
}
// Viper is a prioritized configuration registry. It
// maintains a set of configuration sources, fetches
// values to populate those, and provides them according
// to the source's priority.
// The priority of the sources is the following:
// 1. overrides
// 2. flags
// 3. env. variables
// 4. config file
// 5. key/value store
// 6. defaults
//
// For example, if values from the following sources were loaded:
//
// Defaults : {
// "secret": "",
// "user": "default",
// "endpoint": "https://localhost"
// }
// Config : {
// "user": "root"
// "secret": "defaultsecret"
// }
// Env : {
// "secret": "somesecretkey"
// }
//
// The resulting config will have the following values:
//
// {
// "secret": "somesecretkey",
// "user": "root",
// "endpoint": "https://localhost"
// }
type Viper struct {
// Delimiter that separates a list of keys
// used to access a nested value in one go
keyDelim string
// A set of paths to look for the config file in
configPaths []string
// The filesystem to read config from.
fs afero.Fs
// A set of remote providers to search for the configuration
remoteProviders []*defaultRemoteProvider
// Name of file to look for inside the path
configName string
configFile string
configType string
configPermissions os.FileMode
envPrefix string
automaticEnvApplied bool
envKeyReplacer *strings.Replacer
allowEmptyEnv bool
caseSensitiveKeys bool
config map[string]interface{}
override map[string]interface{}
defaults map[string]interface{}
kvstore map[string]interface{}
pflags map[string]FlagValue
env map[string]string
aliases map[string]string
typeByDefValue bool
// Store read properties on the object so that we can write back in order with comments.
// This will only be used if the configuration read is a properties file.
properties *properties.Properties
onConfigChange func(fsnotify.Event)
}
// New returns an initialized Viper instance.
func New() *Viper {
v := new(Viper)
v.keyDelim = "."
v.configName = "config"
v.configPermissions = os.FileMode(0644)
v.fs = afero.NewOsFs()
v.config = make(map[string]interface{})
v.override = make(map[string]interface{})
v.defaults = make(map[string]interface{})
v.kvstore = make(map[string]interface{})
v.pflags = make(map[string]FlagValue)
v.env = make(map[string]string)
v.aliases = make(map[string]string)
v.typeByDefValue = false
return v
}
// Intended for testing, will reset all to default settings.
// In the public interface for the viper package so applications
// can use it in their testing as well.
func Reset() {
v = New()
SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"}
SupportedRemoteProviders = []string{"etcd", "consul"}
}
type defaultRemoteProvider struct {
provider string
endpoint string
path string
secretKeyring string
}
func (rp defaultRemoteProvider) Provider() string {
return rp.provider
}
func (rp defaultRemoteProvider) Endpoint() string {
return rp.endpoint
}
func (rp defaultRemoteProvider) Path() string {
return rp.path
}
func (rp defaultRemoteProvider) SecretKeyring() string {
return rp.secretKeyring
}
// RemoteProvider stores the configuration necessary
// to connect to a remote key/value store.
// Optional secretKeyring to unencrypt encrypted values
// can be provided.
type RemoteProvider interface {
Provider() string
Endpoint() string
Path() string
SecretKeyring() string
}
// SupportedExts are universally supported extensions.
var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"}
// SupportedRemoteProviders are universally supported remote providers.
var SupportedRemoteProviders = []string{"etcd", "consul"}
func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) }
func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) {
v.onConfigChange = run
}
func WatchConfig() { v.WatchConfig() }
func (v *Viper) WatchConfig() {
initWG := sync.WaitGroup{}
initWG.Add(1)
go func() {
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
// we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way
filename, err := v.getConfigFile()
if err != nil {
log.Printf("error: %v\n", err)
return
}
configFile := filepath.Clean(filename)
configDir, _ := filepath.Split(configFile)
realConfigFile, _ := filepath.EvalSymlinks(filename)
eventsWG := sync.WaitGroup{}
eventsWG.Add(1)
go func() {
for {
select {
case event, ok := <-watcher.Events:
if !ok { // 'Events' channel is closed
eventsWG.Done()
return
}
currentConfigFile, _ := filepath.EvalSymlinks(filename)
// we only care about the config file with the following cases:
// 1 - if the config file was modified or created
// 2 - if the real path to the config file changed (eg: k8s ConfigMap replacement)
const writeOrCreateMask = fsnotify.Write | fsnotify.Create
if (filepath.Clean(event.Name) == configFile &&
event.Op&writeOrCreateMask != 0) ||
(currentConfigFile != "" && currentConfigFile != realConfigFile) {
realConfigFile = currentConfigFile
err := v.ReadInConfig()
if err != nil {
log.Printf("error reading config file: %v\n", err)
}
if v.onConfigChange != nil {
v.onConfigChange(event)
}
} else if filepath.Clean(event.Name) == configFile &&
event.Op&fsnotify.Remove&fsnotify.Remove != 0 {
eventsWG.Done()
return
}
case err, ok := <-watcher.Errors:
if ok { // 'Errors' channel is not closed
log.Printf("watcher error: %v\n", err)
}
eventsWG.Done()
return
}
}
}()
watcher.Add(configDir)
initWG.Done() // done initalizing the watch in this go routine, so the parent routine can move on...
eventsWG.Wait() // now, wait for event loop to end in this go-routine...
}()
initWG.Wait() // make sure that the go routine above fully ended before returning
}
// SetConfigFile explicitly defines the path, name and extension of the config file.
// Viper will use this and not check any of the config paths.
func SetConfigFile(in string) { v.SetConfigFile(in) }
func (v *Viper) SetConfigFile(in string) {
if in != "" {
v.configFile = in
}
}
// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use.
// E.g. if your prefix is "spf", the env registry will look for env
// variables that start with "SPF_".
func SetEnvPrefix(in string) { v.SetEnvPrefix(in) }
func (v *Viper) SetEnvPrefix(in string) {
if in != "" {
v.envPrefix = in
}
}
func (v *Viper) mergeWithEnvPrefix(in string) string {
if v.envPrefix != "" {
return strings.ToUpper(v.envPrefix + "_" + in)
}
return strings.ToUpper(in)
}
// AllowEmptyEnv tells Viper to consider set,
// but empty environment variables as valid values instead of falling back.
// For backward compatibility reasons this is false by default.
func AllowEmptyEnv(allowEmptyEnv bool) { v.AllowEmptyEnv(allowEmptyEnv) }
func (v *Viper) AllowEmptyEnv(allowEmptyEnv bool) {
v.allowEmptyEnv = allowEmptyEnv
}
// TODO: should getEnv logic be moved into find(). Can generalize the use of
// rewriting keys many things, Ex: Get('someKey') -> some_key
// (camel case to snake case for JSON keys perhaps)
// getEnv is a wrapper around os.Getenv which replaces characters in the original
// key. This allows env vars which have different keys than the config object
// keys.
func (v *Viper) getEnv(key string) (string, bool) {
if v.envKeyReplacer != nil {
key = v.envKeyReplacer.Replace(key)
}
val, ok := os.LookupEnv(key)
return val, ok && (v.allowEmptyEnv || val != "")
}
// ConfigFileUsed returns the file used to populate the config registry.
func ConfigFileUsed() string { return v.ConfigFileUsed() }
func (v *Viper) ConfigFileUsed() string { return v.configFile }
// AddConfigPath adds a path for Viper to search for the config file in.
// Can be called multiple times to define multiple search paths.
func AddConfigPath(in string) { v.AddConfigPath(in) }
func (v *Viper) AddConfigPath(in string) {
if in != "" {
absin := absPathify(in)
jww.INFO.Println("adding", absin, "to paths to search")
if !stringInSlice(absin, v.configPaths) {
v.configPaths = append(v.configPaths, absin)
}
}
}
// AddRemoteProvider adds a remote configuration source.
// Remote Providers are searched in the order they are added.
// provider is a string value, "etcd" or "consul" are currently supported.
// endpoint is the url. etcd requires http://ip:port consul requires ip:port
// path is the path in the k/v store to retrieve configuration
// To retrieve a config file called myapp.json from /configs/myapp.json
// you should set path to /configs and set config name (SetConfigName()) to
// "myapp"
func AddRemoteProvider(provider, endpoint, path string) error {
return v.AddRemoteProvider(provider, endpoint, path)
}
func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error {
if !stringInSlice(provider, SupportedRemoteProviders) {
return UnsupportedRemoteProviderError(provider)
}
if provider != "" && endpoint != "" {
jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
rp := &defaultRemoteProvider{
endpoint: endpoint,
provider: provider,
path: path,
}
if !v.providerPathExists(rp) {
v.remoteProviders = append(v.remoteProviders, rp)
}
}
return nil
}
// AddSecureRemoteProvider adds a remote configuration source.
// Secure Remote Providers are searched in the order they are added.
// provider is a string value, "etcd" or "consul" are currently supported.
// endpoint is the url. etcd requires http://ip:port consul requires ip:port
// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg
// path is the path in the k/v store to retrieve configuration
// To retrieve a config file called myapp.json from /configs/myapp.json
// you should set path to /configs and set config name (SetConfigName()) to
// "myapp"
// Secure Remote Providers are implemented with github.com/xordataexchange/crypt
func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring)
}
func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
if !stringInSlice(provider, SupportedRemoteProviders) {
return UnsupportedRemoteProviderError(provider)
}
if provider != "" && endpoint != "" {
jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
rp := &defaultRemoteProvider{
endpoint: endpoint,
provider: provider,
path: path,
secretKeyring: secretkeyring,
}
if !v.providerPathExists(rp) {
v.remoteProviders = append(v.remoteProviders, rp)
}
}
return nil
}
func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool {
for _, y := range v.remoteProviders {
if reflect.DeepEqual(y, p) {
return true
}
}
return false
}
// searchMap recursively searches for a value for path in source map.
// Returns nil if not found.
func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} {
if len(path) == 0 {
return source
}
next, ok := source[path[0]]
if ok {
// Fast path
if len(path) == 1 {
return next
}
// Nested case
switch next.(type) {
case map[interface{}]interface{}:
return v.searchMap(cast.ToStringMap(next), path[1:])
case map[string]interface{}:
// Type assertion is safe here since it is only reached
// if the type of `next` is the same as the type being asserted
return v.searchMap(next.(map[string]interface{}), path[1:])
default:
// got a value but nested key expected, return "nil" for not found
return nil
}
}
return nil
}
// searchMapWithPathPrefixes recursively searches for a value for path in source map.
//
// While searchMap() considers each path element as a single map key, this
// function searches for, and prioritizes, merged path elements.
// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar"
// is also defined, this latter value is returned for path ["foo", "bar"].
//
// This should be useful only at config level (other maps may not contain dots
// in their keys).
//
// Note: This assumes that the path entries and map keys are lower cased.
func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} {
if len(path) == 0 {
return source
}
// search for path prefixes, starting from the longest one
for i := len(path); i > 0; i-- {
prefixKey := v.caseKey(strings.Join(path[0:i], v.keyDelim))
next, ok := source[prefixKey]
if ok {
// Fast path
if i == len(path) {
return next
}
// Nested case
var val interface{}
switch next.(type) {
case map[interface{}]interface{}:
val = v.searchMapWithPathPrefixes(cast.ToStringMap(next), path[i:])
case map[string]interface{}:
// Type assertion is safe here since it is only reached
// if the type of `next` is the same as the type being asserted
val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:])
default:
// got a value but nested key expected, do nothing and look for next prefix
}
if val != nil {
return val
}
}
}
// not found
return nil
}
// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere
// on its path in the map.
// e.g., if "foo.bar" has a value in the given map, it “shadows”
// "foo.bar.baz" in a lower-priority map
func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string {
var parentVal interface{}
for i := 1; i < len(path); i++ {
parentVal = v.searchMap(m, path[0:i])
if parentVal == nil {
// not found, no need to add more path elements
return ""
}
switch parentVal.(type) {
case map[interface{}]interface{}:
continue
case map[string]interface{}:
continue
default:
// parentVal is a regular value which shadows "path"
return strings.Join(path[0:i], v.keyDelim)
}
}
return ""
}
// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere
// in a sub-path of the map.
// e.g., if "foo.bar" has a value in the given map, it “shadows”
// "foo.bar.baz" in a lower-priority map
func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string {
// unify input map
var m map[string]interface{}
switch mi.(type) {
case map[string]string, map[string]FlagValue:
m = cast.ToStringMap(mi)
default:
return ""
}
// scan paths
var parentKey string
for i := 1; i < len(path); i++ {
parentKey = strings.Join(path[0:i], v.keyDelim)
if _, ok := m[parentKey]; ok {
return parentKey
}
}
return ""
}
// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere
// in the environment, when automatic env is on.
// e.g., if "foo.bar" has a value in the environment, it “shadows”
// "foo.bar.baz" in a lower-priority map
func (v *Viper) isPathShadowedInAutoEnv(path []string) string {
var parentKey string
for i := 1; i < len(path); i++ {
parentKey = strings.Join(path[0:i], v.keyDelim)
if _, ok := v.getEnv(v.mergeWithEnvPrefix(parentKey)); ok {
return parentKey
}
}
return ""
}
// SetTypeByDefaultValue enables or disables the inference of a key value's
// type when the Get function is used based upon a key's default value as
// opposed to the value returned based on the normal fetch logic.
//
// For example, if a key has a default value of []string{} and the same key
// is set via an environment variable to "a b c", a call to the Get function
// would return a string slice for the key if the key's type is inferred by
// the default value and the Get function would return:
//
// []string {"a", "b", "c"}
//
// Otherwise the Get function would return:
//
// "a b c"
func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) }
func (v *Viper) SetTypeByDefaultValue(enable bool) {
v.typeByDefValue = enable
}
// GetViper gets the global Viper instance.
func GetViper() *Viper {
return v
}
// Get can retrieve any value given the key to use.
// Get's case-sensitivity for key is determined by viper.keyCaseSensitivity.
// Get has the behavior of returning the value associated with the first
// place from where it is set. Viper will check in the following order:
// override, flag, env, config file, key/value store, default
//
// Get returns an interface. For a specific value use one of the Get____ methods.
func Get(key string) interface{} { return v.Get(key) }
func (v *Viper) Get(key string) interface{} {
casedKey := v.caseKey(key)
val := v.find(casedKey)
if val == nil {
return nil
}
if v.typeByDefValue {
// TODO(bep) this branch isn't covered by a single test.
valType := val
path := strings.Split(casedKey, v.keyDelim)
defVal := v.searchMap(v.defaults, path)
if defVal != nil {
valType = defVal
}
switch valType.(type) {
case bool:
return cast.ToBool(val)
case string:
return cast.ToString(val)
case int32, int16, int8, int:
return cast.ToInt(val)
case int64:
return cast.ToInt64(val)
case float64, float32:
return cast.ToFloat64(val)
case time.Time:
return cast.ToTime(val)
case time.Duration:
return cast.ToDuration(val)
case []string:
return cast.ToStringSlice(val)
}
}
return val
}
// Sub returns new Viper instance representing a sub tree of this instance.
// Sub's case-sensitivity for key is determined by viper.keyCaseSensitivity.
func Sub(key string) *Viper { return v.Sub(key) }
func (v *Viper) Sub(key string) *Viper {
subv := New()
data := v.Get(key)
if data == nil {
return nil
}
if reflect.TypeOf(data).Kind() == reflect.Map {
subv.config = cast.ToStringMap(data)
return subv
}
return nil
}
// GetString returns the value associated with the key as a string.
func GetString(key string) string { return v.GetString(key) }
func (v *Viper) GetString(key string) string {
return cast.ToString(v.Get(key))
}
// GetBool returns the value associated with the key as a boolean.
func GetBool(key string) bool { return v.GetBool(key) }
func (v *Viper) GetBool(key string) bool {
return cast.ToBool(v.Get(key))
}
// GetInt returns the value associated with the key as an integer.
func GetInt(key string) int { return v.GetInt(key) }
func (v *Viper) GetInt(key string) int {
return cast.ToInt(v.Get(key))
}
// GetInt32 returns the value associated with the key as an integer.
func GetInt32(key string) int32 { return v.GetInt32(key) }
func (v *Viper) GetInt32(key string) int32 {
return cast.ToInt32(v.Get(key))
}
// GetInt64 returns the value associated with the key as an integer.
func GetInt64(key string) int64 { return v.GetInt64(key) }
func (v *Viper) GetInt64(key string) int64 {
return cast.ToInt64(v.Get(key))
}
// GetFloat64 returns the value associated with the key as a float64.
func GetFloat64(key string) float64 { return v.GetFloat64(key) }
func (v *Viper) GetFloat64(key string) float64 {
return cast.ToFloat64(v.Get(key))
}
// GetTime returns the value associated with the key as time.
func GetTime(key string) time.Time { return v.GetTime(key) }
func (v *Viper) GetTime(key string) time.Time {
return cast.ToTime(v.Get(key))
}
// GetDuration returns the value associated with the key as a duration.
func GetDuration(key string) time.Duration { return v.GetDuration(key) }
func (v *Viper) GetDuration(key string) time.Duration {
return cast.ToDuration(v.Get(key))
}
// GetStringSlice returns the value associated with the key as a slice of strings.
func GetStringSlice(key string) []string { return v.GetStringSlice(key) }
func (v *Viper) GetStringSlice(key string) []string {
return cast.ToStringSlice(v.Get(key))
}
// GetStringMap returns the value associated with the key as a map of interfaces.
func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) }
func (v *Viper) GetStringMap(key string) map[string]interface{} {
return cast.ToStringMap(v.Get(key))
}
// GetStringMapString returns the value associated with the key as a map of strings.
func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) }
func (v *Viper) GetStringMapString(key string) map[string]string {
return cast.ToStringMapString(v.Get(key))
}
// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings.
func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) }
func (v *Viper) GetStringMapStringSlice(key string) map[string][]string {
return cast.ToStringMapStringSlice(v.Get(key))
}
// GetSizeInBytes returns the size of the value associated with the given key
// in bytes.
func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) }
func (v *Viper) GetSizeInBytes(key string) uint {
sizeStr := cast.ToString(v.Get(key))
return parseSizeInBytes(sizeStr)
}
// UnmarshalKey takes a single key and unmarshals it into a Struct.
func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
return v.UnmarshalKey(key, rawVal, opts...)
}
func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
err := decode(v.Get(key), defaultDecoderConfig(rawVal, opts...))
if err != nil {
return err
}
return nil
}
// Unmarshal unmarshals the config into a Struct. Make sure that the tags
// on the fields of the structure are properly set.
func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
return v.Unmarshal(rawVal, opts...)
}
func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
err := decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...))
if err != nil {
return err
}
return nil
}
// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot
// of time.Duration values & string slices
func defaultDecoderConfig(output interface{}, opts ...DecoderConfigOption) *mapstructure.DecoderConfig {
c := &mapstructure.DecoderConfig{
Metadata: nil,
Result: output,
WeaklyTypedInput: true,
DecodeHook: mapstructure.ComposeDecodeHookFunc(
mapstructure.StringToTimeDurationHookFunc(),
mapstructure.StringToSliceHookFunc(","),
),
}
for _, opt := range opts {
opt(c)
}
return c
}
// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality
func decode(input interface{}, config *mapstructure.DecoderConfig) error {
decoder, err := mapstructure.NewDecoder(config)
if err != nil {
return err
}
return decoder.Decode(input)
}
// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent
// in the destination struct.
func (v *Viper) UnmarshalExact(rawVal interface{}) error {
config := defaultDecoderConfig(rawVal)
config.ErrorUnused = true
return decode(v.AllSettings(), config)
}
// BindPFlags binds a full flag set to the configuration, using each flag's long
// name as the config key.
func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) }
func (v *Viper) BindPFlags(flags *pflag.FlagSet) error {
return v.BindFlagValues(pflagValueSet{flags})
}
// BindPFlag binds a specific key to a pflag (as used by cobra).
// Example (where serverCmd is a Cobra instance):
//
// serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
//
func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) }
func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error {
return v.BindFlagValue(key, pflagValue{flag})
}
// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long
// name as the config key.
func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) }
func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) {
flags.VisitAll(func(flag FlagValue) {
if err = v.BindFlagValue(flag.Name(), flag); err != nil {
return
}
})
return nil
}
// BindFlagValue binds a specific key to a FlagValue.
// Example (where serverCmd is a Cobra instance):
//
// serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
// Viper.BindFlagValue("port", serverCmd.Flags().Lookup("port"))
//
func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) }
func (v *Viper) BindFlagValue(key string, flag FlagValue) error {
if flag == nil {
return fmt.Errorf("flag for %q is nil", key)
}
v.pflags[v.caseKey(key)] = flag
return nil
}
// BindEnv binds a Viper key to a ENV variable.
// ENV variables are case sensitive.
// If only a key is provided, it will use the env key matching the key, uppercased.
// EnvPrefix will be used when set when env name is not provided.
func BindEnv(input ...string) error { return v.BindEnv(input...) }
func (v *Viper) BindEnv(input ...string) error {
var key, envkey string
if len(input) == 0 {
return fmt.Errorf("BindEnv missing key to bind to")
}
key = v.caseKey(input[0])
if len(input) == 1 {
envkey = v.mergeWithEnvPrefix(key)
} else {
envkey = input[1]
}
v.env[key] = envkey
return nil
}
// Given a key, find the value.
// Viper will check in the following order:
// flag, env, config file, key/value store, default.
// Viper will check to see if an alias exists first.
// Note: By default, this assumes that a lowercase key is given.
// This behavior can be modified with viper.SetKeysCaseSensitive.
func (v *Viper) find(key string) interface{} {
var (
val interface{}
exists bool
path = strings.Split(key, v.keyDelim)
nested = len(path) > 1
)
// compute the path through the nested maps to the nested value
if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" {
return nil
}
// if the requested key is an alias, then return the proper key
key = v.realKey(key)
path = strings.Split(key, v.keyDelim)
nested = len(path) > 1
// Set() override first
val = v.searchMap(v.override, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.override) != "" {
return nil
}
// PFlag override next
flag, exists := v.pflags[key]
if exists && flag.HasChanged() {
switch flag.ValueType() {
case "int", "int8", "int16", "int32", "int64":
return cast.ToInt(flag.ValueString())
case "bool":
return cast.ToBool(flag.ValueString())
case "stringSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return res
default:
return flag.ValueString()
}
}
if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" {
return nil
}
// Env override next
if v.automaticEnvApplied {
// even if it hasn't been registered, if automaticEnv is used,
// check any Get request
if val, ok := v.getEnv(v.mergeWithEnvPrefix(key)); ok {
return val
}
if nested && v.isPathShadowedInAutoEnv(path) != "" {
return nil
}
}
envkey, exists := v.env[key]
if exists {
if val, ok := v.getEnv(envkey); ok {
return val
}
}
if nested && v.isPathShadowedInFlatMap(path, v.env) != "" {
return nil
}
// Config file next
val = v.searchMapWithPathPrefixes(v.config, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.config) != "" {
return nil
}
// K/V store next
val = v.searchMap(v.kvstore, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" {
return nil
}
// Default next
val = v.searchMap(v.defaults, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" {
return nil
}
// last chance: if no other value is returned and a flag does exist for the value,
// get the flag's value even if the flag's value has not changed
if flag, exists := v.pflags[key]; exists {
switch flag.ValueType() {
case "int", "int8", "int16", "int32", "int64":
return cast.ToInt(flag.ValueString())
case "bool":
return cast.ToBool(flag.ValueString())
case "stringSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return res
default:
return flag.ValueString()
}
}
// last item, no need to check shadowing
return nil
}
func readAsCSV(val string) ([]string, error) {
if val == "" {
return []string{}, nil
}
stringReader := strings.NewReader(val)
csvReader := csv.NewReader(stringReader)
return csvReader.Read()
}
// IsSet checks to see if the key has been set in any of the data locations.
// IsSet is case-insensitive for a key. This behavior can be modified
// with viper.SetKeysCaseSensitive.
func IsSet(key string) bool { return v.IsSet(key) }
func (v *Viper) IsSet(key string) bool {
casedKey := v.caseKey(key)
val := v.find(casedKey)
return val != nil
}
// AutomaticEnv has Viper check ENV variables for all.
// keys set in config, default & flags
func AutomaticEnv() { v.AutomaticEnv() }
func (v *Viper) AutomaticEnv() {
v.automaticEnvApplied = true
}
// SetEnvKeyReplacer sets the strings.Replacer on the viper object
// Useful for mapping an environmental variable to a key that does
// not match it.
func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) }
func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) {
v.envKeyReplacer = r
}
// Aliases provide another accessor for the same key.
// This enables one to change a name without breaking the application
func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) }
func (v *Viper) RegisterAlias(alias string, key string) {
v.registerAlias(alias, v.caseKey(key))
}
func (v *Viper) registerAlias(alias string, key string) {
alias = v.caseKey(alias)
if alias != key && alias != v.realKey(key) {
_, exists := v.aliases[alias]
if !exists {
// if we alias something that exists in one of the maps to another
// name, we'll never be able to get that value using the original
// name, so move the config value to the new realkey.
if val, ok := v.config[alias]; ok {
delete(v.config, alias)
v.config[key] = val
}
if val, ok := v.kvstore[alias]; ok {
delete(v.kvstore, alias)
v.kvstore[key] = val
}
if val, ok := v.defaults[alias]; ok {
delete(v.defaults, alias)
v.defaults[key] = val
}
if val, ok := v.override[alias]; ok {
delete(v.override, alias)
v.override[key] = val
}
v.aliases[alias] = key
}
} else {
jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key))
}
}
func (v *Viper) realKey(key string) string {
newkey, exists := v.aliases[key]
if exists {
jww.DEBUG.Println("Alias", key, "to", newkey)
return v.realKey(newkey)
}
return key
}
// InConfig checks to see if the given key (or an alias) is in the config file.
func InConfig(key string) bool { return v.InConfig(key) }
func (v *Viper) InConfig(key string) bool {
// if the requested key is an alias, then return the proper key
key = v.realKey(key)
_, exists := v.config[key]
return exists
}
// SetDefault sets the default value for this key.
// SetDefault is case-insensitive for a key. This behavior can be modified
// with viper.SetKeysCaseSensitive.
// Default only used when no value is provided by the user via flag, config or ENV.
func SetDefault(key string, value interface{}) { v.SetDefault(key, value) }
func (v *Viper) SetDefault(key string, value interface{}) {
// If alias passed in, then set the proper default
key = v.realKey(v.caseKey(key))
if !v.caseSensitiveKeys {
value = toCaseInsensitiveValue(value)
}
path := strings.Split(key, v.keyDelim)
lastKey := v.caseKey(path[len(path)-1])
deepestMap := deepSearch(v.defaults, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
// Set sets the value for the key in the override register.
// Set is case-insensitive for a key. This behavior can be modified
// with viper.SetKeysCaseSensitive.
// Will be used instead of values obtained via
// flags, config file, ENV, default, or key/value store.
func Set(key string, value interface{}) { v.Set(key, value) }
func (v *Viper) Set(key string, value interface{}) {
// If alias passed in, then set the proper override
key = v.realKey(v.caseKey(key))
if !v.caseSensitiveKeys {
value = toCaseInsensitiveValue(value)
}
path := strings.Split(key, v.keyDelim)
lastKey := v.caseKey(path[len(path)-1])
deepestMap := deepSearch(v.override, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
// ReadInConfig will discover and load the configuration file from disk
// and key/value stores, searching in one of the defined paths.
func ReadInConfig() error { return v.ReadInConfig() }
func (v *Viper) ReadInConfig() error {
jww.INFO.Println("Attempting to read in config file")
filename, err := v.getConfigFile()
if err != nil {
return err
}
if !stringInSlice(v.getConfigType(), SupportedExts) {
return UnsupportedConfigError(v.getConfigType())
}
jww.DEBUG.Println("Reading file: ", filename)
file, err := afero.ReadFile(v.fs, filename)
if err != nil {
return err
}
config := make(map[string]interface{})
err = v.unmarshalReader(bytes.NewReader(file), config)
if err != nil {
return err
}
v.config = config
return nil
}
// MergeInConfig merges a new configuration with an existing config.
func MergeInConfig() error { return v.MergeInConfig() }
func (v *Viper) MergeInConfig() error {
jww.INFO.Println("Attempting to merge in config file")
filename, err := v.getConfigFile()
if err != nil {
return err
}
if !stringInSlice(v.getConfigType(), SupportedExts) {
return UnsupportedConfigError(v.getConfigType())
}
file, err := afero.ReadFile(v.fs, filename)
if err != nil {
return err
}
return v.MergeConfig(bytes.NewReader(file))
}
// ReadConfig will read a configuration file, setting existing keys to nil if the
// key does not exist in the file.
func ReadConfig(in io.Reader) error { return v.ReadConfig(in) }
func (v *Viper) ReadConfig(in io.Reader) error {
v.config = make(map[string]interface{})
return v.unmarshalReader(in, v.config)
}
// MergeConfig merges a new configuration with an existing config.
func MergeConfig(in io.Reader) error { return v.MergeConfig(in) }
func (v *Viper) MergeConfig(in io.Reader) error {
cfg := make(map[string]interface{})
if err := v.unmarshalReader(in, cfg); err != nil {
return err
}
return v.MergeConfigMap(cfg)
}
// MergeConfigMap merges the configuration from the map given with an existing config.
// Note that the map given may be modified.
func MergeConfigMap(cfg map[string]interface{}) error { return v.MergeConfigMap(cfg) }
func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error {
if v.config == nil {
v.config = make(map[string]interface{})
}
if !v.caseSensitiveKeys {
insensitiviseMap(cfg)
}
mergeMaps(cfg, v.config, nil)
return nil
}
// WriteConfig writes the current configuration to a file.
func WriteConfig() error { return v.WriteConfig() }
func (v *Viper) WriteConfig() error {
filename, err := v.getConfigFile()
if err != nil {
return err
}
return v.writeConfig(filename, true)
}
// SafeWriteConfig writes current configuration to file only if the file does not exist.
func SafeWriteConfig() error { return v.SafeWriteConfig() }
func (v *Viper) SafeWriteConfig() error {
filename, err := v.getConfigFile()
if err != nil {
return err
}
return v.writeConfig(filename, false)
}
// WriteConfigAs writes current configuration to a given filename.
func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) }
func (v *Viper) WriteConfigAs(filename string) error {
return v.writeConfig(filename, true)
}
// SafeWriteConfigAs writes current configuration to a given filename if it does not exist.
func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) }
func (v *Viper) SafeWriteConfigAs(filename string) error {
return v.writeConfig(filename, false)
}
func writeConfig(filename string, force bool) error { return v.writeConfig(filename, force) }
func (v *Viper) writeConfig(filename string, force bool) error {
jww.INFO.Println("Attempting to write configuration to file.")
ext := filepath.Ext(filename)
if len(ext) <= 1 {
return fmt.Errorf("Filename: %s requires valid extension.", filename)
}
configType := ext[1:]
if !stringInSlice(configType, SupportedExts) {
return UnsupportedConfigError(configType)
}
if v.config == nil {
v.config = make(map[string]interface{})
}
var flags int
if force == true {
flags = os.O_CREATE | os.O_TRUNC | os.O_WRONLY
} else {
if _, err := os.Stat(filename); os.IsNotExist(err) {
flags = os.O_WRONLY
} else {
return fmt.Errorf("File: %s exists. Use WriteConfig to overwrite.", filename)
}
}
f, err := v.fs.OpenFile(filename, flags, v.configPermissions)
if err != nil {
return err
}
return v.marshalWriter(f, configType)
}
// Unmarshal a Reader into a map.
// Should probably be an unexported function.
func unmarshalReader(in io.Reader, c map[string]interface{}) error {
return v.unmarshalReader(in, c)
}
func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error {
buf := new(bytes.Buffer)
buf.ReadFrom(in)
switch strings.ToLower(v.getConfigType()) {
case "yaml", "yml":
if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil {
return ConfigParseError{err}
}
case "json":
if err := json.Unmarshal(buf.Bytes(), &c); err != nil {
return ConfigParseError{err}
}
case "hcl":
obj, err := hcl.Parse(string(buf.Bytes()))
if err != nil {
return ConfigParseError{err}
}
if err = hcl.DecodeObject(&c, obj); err != nil {
return ConfigParseError{err}
}
case "toml":
tree, err := toml.LoadReader(buf)
if err != nil {
return ConfigParseError{err}
}
tmap := tree.ToMap()
for k, v := range tmap {
c[k] = v
}
case "properties", "props", "prop":
v.properties = properties.NewProperties()
var err error
if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil {
return ConfigParseError{err}
}
for _, key := range v.properties.Keys() {
value, _ := v.properties.Get(key)
// recursively build nested maps
path := strings.Split(key, ".")
lastKey := v.caseKey(path[len(path)-1])
deepestMap := deepSearch(c, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
}
if !v.caseSensitiveKeys {
insensitiviseMap(c)
}
return nil
}
// Marshal a map into Writer.
func marshalWriter(f afero.File, configType string) error {
return v.marshalWriter(f, configType)
}
func (v *Viper) marshalWriter(f afero.File, configType string) error {
c := v.AllSettings()
switch configType {
case "json":
b, err := json.MarshalIndent(c, "", " ")
if err != nil {
return ConfigMarshalError{err}
}
_, err = f.WriteString(string(b))
if err != nil {
return ConfigMarshalError{err}
}
case "hcl":
b, err := json.Marshal(c)
ast, err := hcl.Parse(string(b))
if err != nil {
return ConfigMarshalError{err}
}
err = printer.Fprint(f, ast.Node)
if err != nil {
return ConfigMarshalError{err}
}
case "prop", "props", "properties":
if v.properties == nil {
v.properties = properties.NewProperties()
}
p := v.properties
for _, key := range v.AllKeys() {
_, _, err := p.Set(key, v.GetString(key))
if err != nil {
return ConfigMarshalError{err}
}
}
_, err := p.WriteComment(f, "#", properties.UTF8)
if err != nil {
return ConfigMarshalError{err}
}
case "toml":
t, err := toml.TreeFromMap(c)
if err != nil {
return ConfigMarshalError{err}
}
s := t.String()
if _, err := f.WriteString(s); err != nil {
return ConfigMarshalError{err}
}
case "yaml", "yml":
b, err := yaml.Marshal(c)
if err != nil {
return ConfigMarshalError{err}
}
if _, err = f.WriteString(string(b)); err != nil {
return ConfigMarshalError{err}
}
}
return nil
}
func keyExists(k string, m map[string]interface{}) string {
ck := v.caseKey(k)
for mk := range m {
if mk == ck {
return mk
}
}
return ""
}
func castToMapStringInterface(
src map[interface{}]interface{}) map[string]interface{} {
tgt := map[string]interface{}{}
for k, v := range src {
tgt[fmt.Sprintf("%v", k)] = v
}
return tgt
}
func castMapStringToMapInterface(src map[string]string) map[string]interface{} {
tgt := map[string]interface{}{}
for k, v := range src {
tgt[k] = v
}
return tgt
}
func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} {
tgt := map[string]interface{}{}
for k, v := range src {
tgt[k] = v
}
return tgt
}
// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's
// insistence on parsing nested structures as `map[interface{}]interface{}`
// instead of using a `string` as the key for nest structures beyond one level
// deep. Both map types are supported as there is a go-yaml fork that uses
// `map[string]interface{}` instead.
func mergeMaps(
src, tgt map[string]interface{}, itgt map[interface{}]interface{}) {
for sk, sv := range src {
tk := keyExists(sk, tgt)
if tk == "" {
jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv)
tgt[sk] = sv
if itgt != nil {
itgt[sk] = sv
}
continue
}
tv, ok := tgt[tk]
if !ok {
jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv)
tgt[sk] = sv
if itgt != nil {
itgt[sk] = sv
}
continue
}
svType := reflect.TypeOf(sv)
tvType := reflect.TypeOf(tv)
if svType != tvType {
jww.ERROR.Printf(
"svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v",
sk, svType, tvType, sv, tv)
continue
}
jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v",
sk, svType, tvType, sv, tv)
switch ttv := tv.(type) {
case map[interface{}]interface{}:
jww.TRACE.Printf("merging maps (must convert)")
tsv := sv.(map[interface{}]interface{})
ssv := castToMapStringInterface(tsv)
stv := castToMapStringInterface(ttv)
mergeMaps(ssv, stv, ttv)
case map[string]interface{}:
jww.TRACE.Printf("merging maps")
mergeMaps(sv.(map[string]interface{}), ttv, nil)
default:
jww.TRACE.Printf("setting value")
tgt[tk] = sv
if itgt != nil {
itgt[tk] = sv
}
}
}
}
// ReadRemoteConfig attempts to get configuration from a remote source
// and read it in the remote configuration registry.
func ReadRemoteConfig() error { return v.ReadRemoteConfig() }
func (v *Viper) ReadRemoteConfig() error {
return v.getKeyValueConfig()
}
func WatchRemoteConfig() error { return v.WatchRemoteConfig() }
func (v *Viper) WatchRemoteConfig() error {
return v.watchKeyValueConfig()
}
func (v *Viper) WatchRemoteConfigOnChannel() error {
return v.watchKeyValueConfigOnChannel()
}
// Retrieve the first found remote configuration.
func (v *Viper) getKeyValueConfig() error {
if RemoteConfig == nil {
return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'")
}
for _, rp := range v.remoteProviders {
val, err := v.getRemoteConfig(rp)
if err != nil {
continue
}
v.kvstore = val
return nil
}
return RemoteConfigError("No Files Found")
}
func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
reader, err := RemoteConfig.Get(provider)
if err != nil {
return nil, err
}
err = v.unmarshalReader(reader, v.kvstore)
return v.kvstore, err
}
// Retrieve the first found remote configuration.
func (v *Viper) watchKeyValueConfigOnChannel() error {
for _, rp := range v.remoteProviders {
respc, _ := RemoteConfig.WatchChannel(rp)
//Todo: Add quit channel
go func(rc <-chan *RemoteResponse) {
for {
b := <-rc
reader := bytes.NewReader(b.Value)
v.unmarshalReader(reader, v.kvstore)
}
}(respc)
return nil
}
return RemoteConfigError("No Files Found")
}
// Retrieve the first found remote configuration.
func (v *Viper) watchKeyValueConfig() error {
for _, rp := range v.remoteProviders {
val, err := v.watchRemoteConfig(rp)
if err != nil {
continue
}
v.kvstore = val
return nil
}
return RemoteConfigError("No Files Found")
}
func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
reader, err := RemoteConfig.Watch(provider)
if err != nil {
return nil, err
}
err = v.unmarshalReader(reader, v.kvstore)
return v.kvstore, err
}
// AllKeys returns all keys holding a value, regardless of where they are set.
// Nested keys are returned with a v.keyDelim (= ".") separator
func AllKeys() []string { return v.AllKeys() }
func (v *Viper) AllKeys() []string {
m := map[string]bool{}
// add all paths, by order of descending priority to ensure correct shadowing
m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "")
m = v.flattenAndMergeMap(m, v.override, "")
m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags))
m = v.mergeFlatMap(m, castMapStringToMapInterface(v.env))
m = v.flattenAndMergeMap(m, v.config, "")
m = v.flattenAndMergeMap(m, v.kvstore, "")
m = v.flattenAndMergeMap(m, v.defaults, "")
// convert set of paths to list
a := []string{}
for x := range m {
a = append(a, x)
}
return a
}
// flattenAndMergeMap recursively flattens the given map into a map[string]bool
// of key paths (used as a set, easier to manipulate than a []string):
// - each path is merged into a single key string, delimited with v.keyDelim (= ".")
// - if a path is shadowed by an earlier value in the initial shadow map,
// it is skipped.
// The resulting set of paths is merged to the given shadow set at the same time.
func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool {
if shadow != nil && prefix != "" && shadow[prefix] {
// prefix is shadowed => nothing more to flatten
return shadow
}
if shadow == nil {
shadow = make(map[string]bool)
}
var m2 map[string]interface{}
if prefix != "" {
prefix += v.keyDelim
}
for k, val := range m {
fullKey := prefix + k
switch val.(type) {
case map[string]interface{}:
m2 = val.(map[string]interface{})
case map[interface{}]interface{}:
m2 = cast.ToStringMap(val)
default:
// immediate value
shadow[v.caseKey(fullKey)] = true
continue
}
// recursively merge to shadow map
shadow = v.flattenAndMergeMap(shadow, m2, fullKey)
}
return shadow
}
// mergeFlatMap merges the given maps, excluding values of the second map
// shadowed by values from the first map.
func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool {
// scan keys
outer:
for k, _ := range m {
path := strings.Split(k, v.keyDelim)
// scan intermediate paths
var parentKey string
for i := 1; i < len(path); i++ {
parentKey = strings.Join(path[0:i], v.keyDelim)
if shadow[parentKey] {
// path is shadowed, continue
continue outer
}
}
// add key
shadow[v.caseKey(k)] = true
}
return shadow
}
// AllSettings merges all settings and returns them as a map[string]interface{}.
func AllSettings() map[string]interface{} { return v.AllSettings() }
func (v *Viper) AllSettings() map[string]interface{} {
m := map[string]interface{}{}
// start from the list of keys, and construct the map one value at a time
for _, k := range v.AllKeys() {
value := v.Get(k)
if value == nil {
// should not happen, since AllKeys() returns only keys holding a value,
// check just in case anything changes
continue
}
path := strings.Split(k, v.keyDelim)
lastKey := v.caseKey(path[len(path)-1])
deepestMap := deepSearch(m, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
return m
}
// SetFs sets the filesystem to use to read configuration.
func SetFs(fs afero.Fs) { v.SetFs(fs) }
func (v *Viper) SetFs(fs afero.Fs) {
v.fs = fs
}
// SetConfigName sets name for the config file.
// Does not include extension.
func SetConfigName(in string) { v.SetConfigName(in) }
func (v *Viper) SetConfigName(in string) {
if in != "" {
v.configName = in
v.configFile = ""
}
}
// SetConfigType sets the type of the configuration returned by the
// remote source, e.g. "json".
func SetConfigType(in string) { v.SetConfigType(in) }
func (v *Viper) SetConfigType(in string) {
if in != "" {
v.configType = in
}
}
// SetKeysCaseSensitive disables the default behaviour of
// case-insensitivising (lowercasing) keys and preserves the key casing
// as they are found in the config files. It is important
// to note that operations such as set and merge when
// case sensitivity is 'on', and whtn it is turneed 'off',
// are incompatible. A key that is set when case sentivity
// is 'on' may not be retrievable when case sensitivity is turned 'off',
// as the original casing is permanently lost in the former mode.
// That is ideally, this should only be invoked only once,
// during initialisation, and the subsequent usage must adhere
// to the same case sentivity.
func SetKeysCaseSensitive(on bool) { v.SetKeysCaseSensitive(on) }
func (v *Viper) SetKeysCaseSensitive(on bool) {
v.caseSensitiveKeys = on
}
func (v *Viper) getConfigType() string {
if v.configType != "" {
return v.configType
}
cf, err := v.getConfigFile()
if err != nil {
return ""
}
ext := filepath.Ext(cf)
if len(ext) > 1 {
return ext[1:]
}
return ""
}
func (v *Viper) getConfigFile() (string, error) {
if v.configFile == "" {
cf, err := v.findConfigFile()
if err != nil {
return "", err
}
v.configFile = cf
}
return v.configFile, nil
}
func (v *Viper) searchInPath(in string) (filename string) {
jww.DEBUG.Println("Searching for config in ", in)
for _, ext := range SupportedExts {
jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext))
if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b {
jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext))
return filepath.Join(in, v.configName+"."+ext)
}
}
return ""
}
// caseKey cases (preserves sensitivity or lowercases) a
// given key based on the keyCaseSensitivity config.
func (v *Viper) caseKey(in string) (filename string) {
if v.caseSensitiveKeys {
return in
}
return strings.ToLower(in)
}
// Search all configPaths for any config file.
// Returns the first path that exists (and is a config file).
func (v *Viper) findConfigFile() (string, error) {
jww.INFO.Println("Searching for config in ", v.configPaths)
for _, cp := range v.configPaths {
file := v.searchInPath(cp)
if file != "" {
return file, nil
}
}
return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)}
}
// Debug prints all configuration registries for debugging
// purposes.
func Debug() { v.Debug() }
func (v *Viper) Debug() {
fmt.Printf("Aliases:\n%#v\n", v.aliases)
fmt.Printf("Override:\n%#v\n", v.override)
fmt.Printf("PFlags:\n%#v\n", v.pflags)
fmt.Printf("Env:\n%#v\n", v.env)
fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore)
fmt.Printf("Config:\n%#v\n", v.config)
fmt.Printf("Defaults:\n%#v\n", v.defaults)
}
| []
| []
| []
| [] | [] | go | 0 | 0 | |
vendor/github.com/docker/docker/integration-cli/docker_test_vars.go | package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"github.com/docker/docker/pkg/reexec"
)
var (
// the docker client binary to use
dockerBinary = "docker"
// the docker daemon binary to use
dockerdBinary = "dockerd"
// path to containerd's ctr binary
ctrBinary = "docker-containerd-ctr"
// the private registry image to use for tests involving the registry
registryImageName = "registry"
// the private registry to use for tests
privateRegistryURL = "127.0.0.1:5000"
// TODO Windows CI. These are incorrect and need fixing into
// platform specific pieces.
runtimePath = "/var/run/docker"
workingDirectory string
// isLocalDaemon is true if the daemon under test is on the same
// host as the CLI.
isLocalDaemon bool
// daemonPlatform is held globally so that tests can make intelligent
// decisions on how to configure themselves according to the platform
// of the daemon. This is initialized in docker_utils by sending
// a version call to the daemon and examining the response header.
daemonPlatform string
// windowsDaemonKV is used on Windows to distinguish between different
// versions. This is necessary to enable certain tests based on whether
// the platform supports it. For example, Windows Server 2016 TP3 did
// not support volumes, but TP4 did.
windowsDaemonKV int
// daemonDefaultImage is the name of the default image to use when running
// tests. This is platform dependent.
daemonDefaultImage string
// For a local daemon on Linux, these values will be used for testing
// user namespace support as the standard graph path(s) will be
// appended with the root remapped uid.gid prefix
dockerBasePath string
volumesConfigPath string
containerStoragePath string
// daemonStorageDriver is held globally so that tests can know the storage
// driver of the daemon. This is initialized in docker_utils by sending
// a version call to the daemon and examining the response header.
daemonStorageDriver string
// WindowsBaseImage is the name of the base image for Windows testing
// Environment variable WINDOWS_BASE_IMAGE can override this
WindowsBaseImage = "windowsservercore"
// daemonPid is the pid of the main test daemon
daemonPid int
)
const (
// DefaultImage is the name of the base image for the majority of tests that
// are run across suites
DefaultImage = "busybox"
)
func init() {
reexec.Init()
if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" {
dockerBinary = dockerBin
}
var err error
dockerBinary, err = exec.LookPath(dockerBinary)
if err != nil {
fmt.Printf("ERROR: couldn't resolve full path to the Docker binary (%v)\n", err)
os.Exit(1)
}
if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" {
registryImageName = registryImage
}
if registry := os.Getenv("REGISTRY_URL"); registry != "" {
privateRegistryURL = registry
}
workingDirectory, _ = os.Getwd()
// Deterministically working out the environment in which CI is running
// to evaluate whether the daemon is local or remote is not possible through
// a build tag.
//
// For example Windows to Linux CI under Jenkins tests the 64-bit
// Windows binary build with the daemon build tag, but calls a remote
// Linux daemon.
//
// We can't just say if Windows then assume the daemon is local as at
// some point, we will be testing the Windows CLI against a Windows daemon.
//
// Similarly, it will be perfectly valid to also run CLI tests from
// a Linux CLI (built with the daemon tag) against a Windows daemon.
if len(os.Getenv("DOCKER_REMOTE_DAEMON")) > 0 {
isLocalDaemon = false
} else {
isLocalDaemon = true
}
// TODO Windows CI. This are incorrect and need fixing into
// platform specific pieces.
// This is only used for a tests with local daemon true (Linux-only today)
// default is "/var/lib/docker", but we'll try and ask the
// /info endpoint for the specific root dir
dockerBasePath = "/var/lib/docker"
type Info struct {
DockerRootDir string
}
var i Info
status, b, err := sockRequest("GET", "/info", nil)
if err == nil && status == 200 {
if err = json.Unmarshal(b, &i); err == nil {
dockerBasePath = i.DockerRootDir
}
}
volumesConfigPath = dockerBasePath + "/volumes"
containerStoragePath = dockerBasePath + "/containers"
if len(os.Getenv("WINDOWS_BASE_IMAGE")) > 0 {
WindowsBaseImage = os.Getenv("WINDOWS_BASE_IMAGE")
fmt.Println("INFO: Windows Base image is ", WindowsBaseImage)
}
dest := os.Getenv("DEST")
b, err = ioutil.ReadFile(filepath.Join(dest, "docker.pid"))
if err == nil {
if p, err := strconv.ParseInt(string(b), 10, 32); err == nil {
daemonPid = int(p)
}
}
}
| [
"\"DOCKER_BINARY\"",
"\"REGISTRY_IMAGE\"",
"\"REGISTRY_URL\"",
"\"DOCKER_REMOTE_DAEMON\"",
"\"WINDOWS_BASE_IMAGE\"",
"\"WINDOWS_BASE_IMAGE\"",
"\"DEST\""
]
| []
| [
"DOCKER_BINARY",
"REGISTRY_URL",
"REGISTRY_IMAGE",
"DOCKER_REMOTE_DAEMON",
"DEST",
"WINDOWS_BASE_IMAGE"
]
| [] | ["DOCKER_BINARY", "REGISTRY_URL", "REGISTRY_IMAGE", "DOCKER_REMOTE_DAEMON", "DEST", "WINDOWS_BASE_IMAGE"] | go | 6 | 0 | |
sdk-py/global_sign_out.py | import os
import boto3
from getpass import getpass
from dotenv import load_dotenv
dotenv_path = os.path.join(os.path.dirname(__file__), ".env")
load_dotenv(dotenv_path)
REGION_NAME = os.getenv("REGION_NAME")
CLIENT_ID = os.getenv("CLIENT_ID")
client = boto3.client("cognito-idp", region_name=REGION_NAME)
username = input("[*] Enter Your Email Address: ")
password = getpass("[*] Enter Your Password: ")
response = client.initiate_auth(
ClientId=CLIENT_ID,
AuthFlow="USER_PASSWORD_AUTH",
AuthParameters={"USERNAME": username, "PASSWORD": password},
)
access_token = response["AuthenticationResult"]["AccessToken"]
refresh_token = response["AuthenticationResult"]["RefreshToken"]
print("[*] Issued Access Token and Refresh Token")
result = client.global_sign_out(
AccessToken=access_token,
)
assert result["ResponseMetadata"]["HTTPStatusCode"] == 200
print("[*] Global Sign Out Successful")
try:
client.initiate_auth(
ClientId=CLIENT_ID,
AuthFlow="REFRESH_TOKEN_AUTH",
AuthParameters={"USERNAME": username, "REFRESH_TOKEN": refresh_token},
)
except client.exceptions.NotAuthorizedException as e:
assert "Refresh Token has been revoked" in str(e)
print("[*] Refresh Token has been revoked")
print("[*] All done!")
| []
| []
| [
"REGION_NAME",
"CLIENT_ID"
]
| [] | ["REGION_NAME", "CLIENT_ID"] | python | 2 | 0 | |
libpod/runtime.go | package libpod
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/pkg/sysregistriesv2"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/libpod/events"
"github.com/containers/podman/v2/libpod/image"
"github.com/containers/podman/v2/libpod/lock"
"github.com/containers/podman/v2/pkg/cgroups"
"github.com/containers/podman/v2/pkg/registries"
"github.com/containers/podman/v2/pkg/rootless"
"github.com/containers/podman/v2/pkg/util"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/docker/pkg/namesgenerator"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// A RuntimeOption is a functional option which alters the Runtime created by
// NewRuntime
type RuntimeOption func(*Runtime) error
type storageSet struct {
RunRootSet bool
GraphRootSet bool
StaticDirSet bool
VolumePathSet bool
GraphDriverNameSet bool
TmpDirSet bool
}
// Runtime is the core libpod runtime
type Runtime struct {
config *config.Config
storageConfig storage.StoreOptions
storageSet storageSet
state State
store storage.Store
storageService *storageService
imageContext *types.SystemContext
defaultOCIRuntime OCIRuntime
ociRuntimes map[string]OCIRuntime
netPlugin ocicni.CNIPlugin
conmonPath string
imageRuntime *image.Runtime
lockManager lock.Manager
// doRenumber indicates that the runtime should perform a lock renumber
// during initialization.
// Once the runtime has been initialized and returned, this variable is
// unused.
doRenumber bool
doMigrate bool
// System migrate can move containers to a new runtime.
// We make no promises that these migrated containers work on the new
// runtime, though.
migrateRuntime string
// valid indicates whether the runtime is ready to use.
// valid is set to true when a runtime is returned from GetRuntime(),
// and remains true until the runtime is shut down (rendering its
// storage unusable). When valid is false, the runtime cannot be used.
valid bool
lock sync.RWMutex
// mechanism to read and write even logs
eventer events.Eventer
// noStore indicates whether we need to interact with a store or not
noStore bool
}
// SetXdgDirs ensures the XDG_RUNTIME_DIR env and XDG_CONFIG_HOME variables are set.
// containers/image uses XDG_RUNTIME_DIR to locate the auth file, XDG_CONFIG_HOME is
// use for the containers.conf configuration file.
func SetXdgDirs() error {
if !rootless.IsRootless() {
return nil
}
// Setup XDG_RUNTIME_DIR
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir == "" {
var err error
runtimeDir, err = util.GetRuntimeDir()
if err != nil {
return err
}
}
if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
}
if rootless.IsRootless() && os.Getenv("DBUS_SESSION_BUS_ADDRESS") == "" {
sessionAddr := filepath.Join(runtimeDir, "bus")
if _, err := os.Stat(sessionAddr); err == nil {
os.Setenv("DBUS_SESSION_BUS_ADDRESS", fmt.Sprintf("unix:path=%s", sessionAddr))
}
}
// Setup XDG_CONFIG_HOME
if cfgHomeDir := os.Getenv("XDG_CONFIG_HOME"); cfgHomeDir == "" {
cfgHomeDir, err := util.GetRootlessConfigHomeDir()
if err != nil {
return err
}
if err := os.Setenv("XDG_CONFIG_HOME", cfgHomeDir); err != nil {
return errors.Wrapf(err, "cannot set XDG_CONFIG_HOME")
}
}
return nil
}
// NewRuntime creates a new container runtime
// Options can be passed to override the default configuration for the runtime
func NewRuntime(ctx context.Context, options ...RuntimeOption) (*Runtime, error) {
conf, err := config.NewConfig("")
if err != nil {
return nil, err
}
conf.CheckCgroupsAndAdjustConfig()
return newRuntimeFromConfig(ctx, conf, options...)
}
// NewRuntimeFromConfig creates a new container runtime using the given
// configuration file for its default configuration. Passed RuntimeOption
// functions can be used to mutate this configuration further.
// An error will be returned if the configuration file at the given path does
// not exist or cannot be loaded
func NewRuntimeFromConfig(ctx context.Context, userConfig *config.Config, options ...RuntimeOption) (*Runtime, error) {
return newRuntimeFromConfig(ctx, userConfig, options...)
}
func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...RuntimeOption) (*Runtime, error) {
runtime := new(Runtime)
if conf.Engine.OCIRuntime == "" {
conf.Engine.OCIRuntime = "runc"
// If we're running on cgroups v2, default to using crun.
if onCgroupsv2, _ := cgroups.IsCgroup2UnifiedMode(); onCgroupsv2 {
conf.Engine.OCIRuntime = "crun"
}
}
runtime.config = conf
storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
if err != nil {
return nil, err
}
runtime.storageConfig = storeOpts
// Overwrite config with user-given configuration options
for _, opt := range options {
if err := opt(runtime); err != nil {
return nil, errors.Wrapf(err, "error configuring runtime")
}
}
if err := makeRuntime(ctx, runtime); err != nil {
return nil, err
}
return runtime, nil
}
func getLockManager(runtime *Runtime) (lock.Manager, error) {
var err error
var manager lock.Manager
switch runtime.config.Engine.LockType {
case "file":
lockPath := filepath.Join(runtime.config.Engine.TmpDir, "locks")
manager, err = lock.OpenFileLockManager(lockPath)
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
manager, err = lock.NewFileLockManager(lockPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to get new file lock manager")
}
} else {
return nil, err
}
}
case "", "shm":
lockPath := define.DefaultSHMLockPath
if rootless.IsRootless() {
lockPath = fmt.Sprintf("%s_%d", define.DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
}
// Set up the lock manager
manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
if err != nil {
switch {
case os.IsNotExist(errors.Cause(err)):
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
if err != nil {
return nil, errors.Wrapf(err, "failed to get new shm lock manager")
}
case errors.Cause(err) == syscall.ERANGE && runtime.doRenumber:
logrus.Debugf("Number of locks does not match - removing old locks")
// ERANGE indicates a lock numbering mismatch.
// Since we're renumbering, this is not fatal.
// Remove the earlier set of locks and recreate.
if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
}
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
if err != nil {
return nil, err
}
default:
return nil, err
}
}
default:
return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.Engine.LockType)
}
return manager, nil
}
// Make a new runtime based on the given configuration
// Sets up containers/storage, state store, OCI runtime
func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) {
// Find a working conmon binary
cPath, err := runtime.config.FindConmon()
if err != nil {
return err
}
runtime.conmonPath = cPath
// Make the static files directory if it does not exist
if err := os.MkdirAll(runtime.config.Engine.StaticDir, 0700); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrapf(err, "error creating runtime static files directory %s",
runtime.config.Engine.StaticDir)
}
}
// Set up the state.
//
// TODO - if we further break out the state implementation into
// libpod/state, the config could take care of the code below. It
// would further allow to move the types and consts into a coherent
// package.
switch runtime.config.Engine.StateType {
case config.InMemoryStateStore:
state, err := NewInMemoryState()
if err != nil {
return err
}
runtime.state = state
case config.SQLiteStateStore:
return errors.Wrapf(define.ErrInvalidArg, "SQLite state is currently disabled")
case config.BoltDBStateStore:
dbPath := filepath.Join(runtime.config.Engine.StaticDir, "bolt_state.db")
state, err := NewBoltState(dbPath, runtime)
if err != nil {
return err
}
runtime.state = state
default:
return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed (%v)", runtime.config.Engine.StateType)
}
// Grab config from the database so we can reset some defaults
dbConfig, err := runtime.state.GetDBConfig()
if err != nil {
return errors.Wrapf(err, "error retrieving runtime configuration from database")
}
runtime.mergeDBConfig(dbConfig)
logrus.Debugf("Using graph driver %s", runtime.storageConfig.GraphDriverName)
logrus.Debugf("Using graph root %s", runtime.storageConfig.GraphRoot)
logrus.Debugf("Using run root %s", runtime.storageConfig.RunRoot)
logrus.Debugf("Using static dir %s", runtime.config.Engine.StaticDir)
logrus.Debugf("Using tmp dir %s", runtime.config.Engine.TmpDir)
logrus.Debugf("Using volume path %s", runtime.config.Engine.VolumePath)
// Validate our config against the database, now that we've set our
// final storage configuration
if err := runtime.state.ValidateDBConfig(runtime); err != nil {
return err
}
if err := runtime.state.SetNamespace(runtime.config.Engine.Namespace); err != nil {
return errors.Wrapf(err, "error setting libpod namespace in state")
}
logrus.Debugf("Set libpod namespace to %q", runtime.config.Engine.Namespace)
// Set up containers/storage
var store storage.Store
if os.Geteuid() != 0 {
logrus.Debug("Not configuring container store")
} else if runtime.noStore {
logrus.Debug("No store required. Not opening container store.")
} else if err := runtime.configureStore(); err != nil {
return err
}
defer func() {
if retErr != nil && store != nil {
// Don't forcibly shut down
// We could be opening a store in use by another libpod
if _, err := store.Shutdown(false); err != nil {
logrus.Errorf("Error removing store for partially-created runtime: %s", err)
}
}
}()
// Setup the eventer
eventer, err := runtime.newEventer()
if err != nil {
return err
}
runtime.eventer = eventer
if runtime.imageRuntime != nil {
runtime.imageRuntime.Eventer = eventer
}
// Set up containers/image
if runtime.imageContext == nil {
runtime.imageContext = &types.SystemContext{}
}
runtime.imageContext.SignaturePolicyPath = runtime.config.Engine.SignaturePolicyPath
// Create the tmpDir
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrapf(err, "error creating tmpdir %s", runtime.config.Engine.TmpDir)
}
}
// Create events log dir
if err := os.MkdirAll(filepath.Dir(runtime.config.Engine.EventsLogFilePath), 0700); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrapf(err, "error creating events dirs %s", filepath.Dir(runtime.config.Engine.EventsLogFilePath))
}
}
// Get us at least one working OCI runtime.
runtime.ociRuntimes = make(map[string]OCIRuntime)
// Initialize remaining OCI runtimes
for name, paths := range runtime.config.Engine.OCIRuntimes {
ociRuntime, err := newConmonOCIRuntime(name, paths, runtime.conmonPath, runtime.config)
if err != nil {
// Don't fatally error.
// This will allow us to ship configs including optional
// runtimes that might not be installed (crun, kata).
// Only a warnf so default configs don't spec errors.
logrus.Warnf("Error initializing configured OCI runtime %s: %v", name, err)
continue
}
runtime.ociRuntimes[name] = ociRuntime
}
// Do we have a default OCI runtime?
if runtime.config.Engine.OCIRuntime != "" {
// If the string starts with / it's a path to a runtime
// executable.
if strings.HasPrefix(runtime.config.Engine.OCIRuntime, "/") {
name := filepath.Base(runtime.config.Engine.OCIRuntime)
ociRuntime, err := newConmonOCIRuntime(name, []string{runtime.config.Engine.OCIRuntime}, runtime.conmonPath, runtime.config)
if err != nil {
return err
}
runtime.ociRuntimes[name] = ociRuntime
runtime.defaultOCIRuntime = ociRuntime
} else {
ociRuntime, ok := runtime.ociRuntimes[runtime.config.Engine.OCIRuntime]
if !ok {
return errors.Wrapf(define.ErrInvalidArg, "default OCI runtime %q not found", runtime.config.Engine.OCIRuntime)
}
runtime.defaultOCIRuntime = ociRuntime
}
}
// Do we have at least one valid OCI runtime?
if len(runtime.ociRuntimes) == 0 {
return errors.Wrapf(define.ErrInvalidArg, "no OCI runtime has been configured")
}
// Do we have a default runtime?
if runtime.defaultOCIRuntime == nil {
return errors.Wrapf(define.ErrInvalidArg, "no default OCI runtime was configured")
}
// Make the per-boot files directory if it does not exist
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0755); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrapf(err, "error creating runtime temporary files directory %s",
runtime.config.Engine.TmpDir)
}
}
// Set up the CNI net plugin
if !rootless.IsRootless() {
netPlugin, err := ocicni.InitCNI(runtime.config.Network.DefaultNetwork, runtime.config.Network.NetworkConfigDir, runtime.config.Network.CNIPluginDirs...)
if err != nil {
return errors.Wrapf(err, "error configuring CNI network plugin")
}
runtime.netPlugin = netPlugin
}
// We now need to see if the system has restarted
// We check for the presence of a file in our tmp directory to verify this
// This check must be locked to prevent races
runtimeAliveLock := filepath.Join(runtime.config.Engine.TmpDir, "alive.lck")
runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive")
aliveLock, err := storage.GetLockfile(runtimeAliveLock)
if err != nil {
return errors.Wrapf(err, "error acquiring runtime init lock")
}
// Acquire the lock and hold it until we return
// This ensures that no two processes will be in runtime.refresh at once
// TODO: we can't close the FD in this lock, so we should keep it around
// and use it to lock important operations
aliveLock.Lock()
doRefresh := false
defer func() {
if aliveLock.Locked() {
aliveLock.Unlock()
}
}()
_, err = os.Stat(runtimeAliveFile)
if err != nil {
// If we need to refresh, then it is safe to assume there are
// no containers running. Create immediately a namespace, as
// we will need to access the storage.
if os.Geteuid() != 0 {
aliveLock.Unlock() // Unlock to avoid deadlock as BecomeRootInUserNS will reexec.
pausePid, err := util.GetRootlessPauseProcessPidPath()
if err != nil {
return errors.Wrapf(err, "could not get pause process pid file path")
}
became, ret, err := rootless.BecomeRootInUserNS(pausePid)
if err != nil {
return err
}
if became {
os.Exit(ret)
}
}
// If the file doesn't exist, we need to refresh the state
// This will trigger on first use as well, but refreshing an
// empty state only creates a single file
// As such, it's not really a performance concern
if os.IsNotExist(err) {
doRefresh = true
} else {
return errors.Wrapf(err, "error reading runtime status file %s", runtimeAliveFile)
}
}
runtime.lockManager, err = getLockManager(runtime)
if err != nil {
return err
}
// If we're renumbering locks, do it now.
// It breaks out of normal runtime init, and will not return a valid
// runtime.
if runtime.doRenumber {
if err := runtime.renumberLocks(); err != nil {
return err
}
}
// If we need to refresh the state, do it now - things are guaranteed to
// be set up by now.
if doRefresh {
// Ensure we have a store before refresh occurs
if runtime.store == nil {
if err := runtime.configureStore(); err != nil {
return err
}
}
if err2 := runtime.refresh(runtimeAliveFile); err2 != nil {
return err2
}
}
// Mark the runtime as valid - ready to be used, cannot be modified
// further
runtime.valid = true
if runtime.doMigrate {
if err := runtime.migrate(ctx); err != nil {
return err
}
}
return nil
}
// GetConfig returns a copy of the configuration used by the runtime
func (r *Runtime) GetConfig() (*config.Config, error) {
r.lock.RLock()
defer r.lock.RUnlock()
if !r.valid {
return nil, define.ErrRuntimeStopped
}
config := new(config.Config)
// Copy so the caller won't be able to modify the actual config
if err := JSONDeepCopy(r.config, config); err != nil {
return nil, errors.Wrapf(err, "error copying config")
}
return config, nil
}
// DeferredShutdown shuts down the runtime without exposing any
// errors. This is only meant to be used when the runtime is being
// shutdown within a defer statement; else use Shutdown
func (r *Runtime) DeferredShutdown(force bool) {
_ = r.Shutdown(force)
}
// Shutdown shuts down the runtime and associated containers and storage
// If force is true, containers and mounted storage will be shut down before
// cleaning up; if force is false, an error will be returned if there are
// still containers running or mounted
func (r *Runtime) Shutdown(force bool) error {
r.lock.Lock()
defer r.lock.Unlock()
if !r.valid {
return define.ErrRuntimeStopped
}
r.valid = false
// Shutdown all containers if --force is given
if force {
ctrs, err := r.state.AllContainers()
if err != nil {
logrus.Errorf("Error retrieving containers from database: %v", err)
} else {
for _, ctr := range ctrs {
if err := ctr.StopWithTimeout(r.config.Engine.StopTimeout); err != nil {
logrus.Errorf("Error stopping container %s: %v", ctr.ID(), err)
}
}
}
}
var lastError error
// If no store was requested, it can be nil and there is no need to
// attempt to shut it down
if r.store != nil {
if _, err := r.store.Shutdown(force); err != nil {
lastError = errors.Wrapf(err, "Error shutting down container storage")
}
}
if err := r.state.Close(); err != nil {
if lastError != nil {
logrus.Errorf("%v", lastError)
}
lastError = err
}
return lastError
}
// Reconfigures the runtime after a reboot
// Refreshes the state, recreating temporary files
// Does not check validity as the runtime is not valid until after this has run
func (r *Runtime) refresh(alivePath string) error {
logrus.Debugf("Podman detected system restart - performing state refresh")
// First clear the state in the database
if err := r.state.Refresh(); err != nil {
return err
}
// Next refresh the state of all containers to recreate dirs and
// namespaces, and all the pods to recreate cgroups.
// Containers, pods, and volumes must also reacquire their locks.
ctrs, err := r.state.AllContainers()
if err != nil {
return errors.Wrapf(err, "error retrieving all containers from state")
}
pods, err := r.state.AllPods()
if err != nil {
return errors.Wrapf(err, "error retrieving all pods from state")
}
vols, err := r.state.AllVolumes()
if err != nil {
return errors.Wrapf(err, "error retrieving all volumes from state")
}
// No locks are taken during pod, volume, and container refresh.
// Furthermore, the pod/volume/container refresh() functions are not
// allowed to take locks themselves.
// We cannot assume that any pod/volume/container has a valid lock until
// after this function has returned.
// The runtime alive lock should suffice to provide mutual exclusion
// until this has run.
for _, ctr := range ctrs {
if err := ctr.refresh(); err != nil {
logrus.Errorf("Error refreshing container %s: %v", ctr.ID(), err)
}
}
for _, pod := range pods {
if err := pod.refresh(); err != nil {
logrus.Errorf("Error refreshing pod %s: %v", pod.ID(), err)
}
}
for _, vol := range vols {
if err := vol.refresh(); err != nil {
logrus.Errorf("Error refreshing volume %s: %v", vol.Name(), err)
}
}
// Create a file indicating the runtime is alive and ready
file, err := os.OpenFile(alivePath, os.O_RDONLY|os.O_CREATE, 0644)
if err != nil {
return errors.Wrapf(err, "error creating runtime status file %s", alivePath)
}
defer file.Close()
r.newSystemEvent(events.Refresh)
return nil
}
// Info returns the store and host information
func (r *Runtime) Info() (*define.Info, error) {
return r.info()
}
// generateName generates a unique name for a container or pod.
func (r *Runtime) generateName() (string, error) {
for {
name := namesgenerator.GetRandomName(0)
// Make sure container with this name does not exist
if _, err := r.state.LookupContainer(name); err == nil {
continue
} else if errors.Cause(err) != define.ErrNoSuchCtr {
return "", err
}
// Make sure pod with this name does not exist
if _, err := r.state.LookupPod(name); err == nil {
continue
} else if errors.Cause(err) != define.ErrNoSuchPod {
return "", err
}
return name, nil
}
// The code should never reach here.
}
// Configure store and image runtime
func (r *Runtime) configureStore() error {
store, err := storage.GetStore(r.storageConfig)
if err != nil {
return err
}
r.store = store
is.Transport.SetStore(store)
// Set up a storage service for creating container root filesystems from
// images
r.storageService = getStorageService(r.store)
ir := image.NewImageRuntimeFromStore(r.store)
ir.SignaturePolicyPath = r.config.Engine.SignaturePolicyPath
ir.EventsLogFilePath = r.config.Engine.EventsLogFilePath
ir.EventsLogger = r.config.Engine.EventsLogger
r.imageRuntime = ir
return nil
}
// ImageRuntime returns the imageruntime for image operations.
// If WithNoStore() was used, no image runtime will be available, and this
// function will return nil.
func (r *Runtime) ImageRuntime() *image.Runtime {
return r.imageRuntime
}
// SystemContext returns the imagecontext
func (r *Runtime) SystemContext() *types.SystemContext {
return r.imageContext
}
// GetOCIRuntimePath retrieves the path of the default OCI runtime.
func (r *Runtime) GetOCIRuntimePath() string {
return r.defaultOCIRuntime.Path()
}
// StorageConfig retrieves the storage options for the container runtime
func (r *Runtime) StorageConfig() storage.StoreOptions {
return r.storageConfig
}
// GetStore returns the runtime stores
func (r *Runtime) GetStore() storage.Store {
return r.store
}
// GetName retrieves the name associated with a given full ID.
// This works for both containers and pods, and does not distinguish between the
// two.
// If the given ID does not correspond to any existing Pod or Container,
// ErrNoSuchCtr is returned.
func (r *Runtime) GetName(id string) (string, error) {
r.lock.RLock()
defer r.lock.RUnlock()
if !r.valid {
return "", define.ErrRuntimeStopped
}
return r.state.GetName(id)
}
// DBConfig is a set of Libpod runtime configuration settings that are saved in
// a State when it is first created, and can subsequently be retrieved.
type DBConfig struct {
LibpodRoot string
LibpodTmp string
StorageRoot string
StorageTmp string
GraphDriver string
VolumePath string
}
// mergeDBConfig merges the configuration from the database.
func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) {
c := &r.config.Engine
if !r.storageSet.RunRootSet && dbConfig.StorageTmp != "" {
if r.storageConfig.RunRoot != dbConfig.StorageTmp &&
r.storageConfig.RunRoot != "" {
logrus.Debugf("Overriding run root %q with %q from database",
r.storageConfig.RunRoot, dbConfig.StorageTmp)
}
r.storageConfig.RunRoot = dbConfig.StorageTmp
}
if !r.storageSet.GraphRootSet && dbConfig.StorageRoot != "" {
if r.storageConfig.GraphRoot != dbConfig.StorageRoot &&
r.storageConfig.GraphRoot != "" {
logrus.Debugf("Overriding graph root %q with %q from database",
r.storageConfig.GraphRoot, dbConfig.StorageRoot)
}
r.storageConfig.GraphRoot = dbConfig.StorageRoot
}
if !r.storageSet.GraphDriverNameSet && dbConfig.GraphDriver != "" {
if r.storageConfig.GraphDriverName != dbConfig.GraphDriver &&
r.storageConfig.GraphDriverName != "" {
logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve",
r.storageConfig.GraphDriverName, dbConfig.GraphDriver)
}
r.storageConfig.GraphDriverName = dbConfig.GraphDriver
}
if !r.storageSet.StaticDirSet && dbConfig.LibpodRoot != "" {
if c.StaticDir != dbConfig.LibpodRoot && c.StaticDir != "" {
logrus.Debugf("Overriding static dir %q with %q from database", c.StaticDir, dbConfig.LibpodRoot)
}
c.StaticDir = dbConfig.LibpodRoot
}
if !r.storageSet.TmpDirSet && dbConfig.LibpodTmp != "" {
if c.TmpDir != dbConfig.LibpodTmp && c.TmpDir != "" {
logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp)
}
c.TmpDir = dbConfig.LibpodTmp
c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log")
}
if !r.storageSet.VolumePathSet && dbConfig.VolumePath != "" {
if c.VolumePath != dbConfig.VolumePath && c.VolumePath != "" {
logrus.Debugf("Overriding volume path %q with %q from database", c.VolumePath, dbConfig.VolumePath)
}
c.VolumePath = dbConfig.VolumePath
}
}
func (r *Runtime) EnableLabeling() bool {
return r.config.Containers.EnableLabeling
}
// Reload reloads the configurations files
func (r *Runtime) Reload() error {
if err := r.reloadContainersConf(); err != nil {
return err
}
if err := r.reloadStorageConf(); err != nil {
return err
}
if err := reloadRegistriesConf(); err != nil {
return err
}
return nil
}
// reloadContainersConf reloads the containers.conf
func (r *Runtime) reloadContainersConf() error {
config, err := config.Reload()
if err != nil {
return err
}
r.config = config
logrus.Infof("applied new containers configuration: %v", config)
return nil
}
// reloadRegistries reloads the registries.conf
func reloadRegistriesConf() error {
sysregistriesv2.InvalidateCache()
registries, err := sysregistriesv2.GetRegistries(&types.SystemContext{SystemRegistriesConfPath: registries.SystemRegistriesConfPath()})
if err != nil {
return err
}
logrus.Infof("applied new registry configuration: %+v", registries)
return nil
}
// reloadStorageConf reloads the storage.conf
func (r *Runtime) reloadStorageConf() error {
configFile, err := storage.DefaultConfigFile(rootless.IsRootless())
if err != nil {
return err
}
storage.ReloadConfigurationFile(configFile, &r.storageConfig)
logrus.Infof("applied new storage configuration: %v", r.storageConfig)
return nil
}
| [
"\"XDG_RUNTIME_DIR\"",
"\"DBUS_SESSION_BUS_ADDRESS\"",
"\"XDG_CONFIG_HOME\""
]
| []
| [
"XDG_RUNTIME_DIR",
"DBUS_SESSION_BUS_ADDRESS",
"XDG_CONFIG_HOME"
]
| [] | ["XDG_RUNTIME_DIR", "DBUS_SESSION_BUS_ADDRESS", "XDG_CONFIG_HOME"] | go | 3 | 0 | |
routes/callback.go | package routes
import (
"database/sql"
"encoding/json"
"net/http"
"os"
"time"
"github.com/code-golf/code-golf/session"
"golang.org/x/oauth2"
"golang.org/x/oauth2/github"
)
var config = oauth2.Config{
ClientID: "7f6709819023e9215205",
ClientSecret: os.Getenv("CLIENT_SECRET"),
Endpoint: github.Endpoint,
}
// /callback/dev exists because GitHub doesn't support multiple URLs.
// CallbackDev serves GET /callback/dev
func CallbackDev(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "https://localhost/callback?"+r.URL.RawQuery, http.StatusSeeOther)
}
// Callback serves GET /callback
func Callback(w http.ResponseWriter, r *http.Request) {
if r.FormValue("code") == "" {
w.WriteHeader(http.StatusBadRequest)
return
}
token, err := config.Exchange(r.Context(), r.FormValue("code"))
if err != nil {
panic(err)
}
req, err := http.NewRequestWithContext(
r.Context(), "GET", "https://api.github.com/user", nil)
if err != nil {
panic(err)
}
req.Header.Add("Authorization", "Bearer "+token.AccessToken)
res, err := http.DefaultClient.Do(req)
if err != nil {
panic(err)
}
defer res.Body.Close()
var user struct {
ID int
Login string
}
if err := json.NewDecoder(res.Body).Decode(&user); err != nil {
panic(err)
}
cookie := http.Cookie{
HttpOnly: true,
Name: "__Host-session",
Path: "/",
SameSite: http.SameSiteLaxMode,
Secure: true,
}
var timeZone sql.NullString
if tz, _ := time.LoadLocation(r.FormValue("time_zone")); tz != nil {
timeZone = sql.NullString{
String: tz.String(),
Valid: tz != time.Local && tz != time.UTC,
}
}
// Replace default 'UTC' time zone but don't overwrite a chosen time zone.
if err := session.Database(r).QueryRow(
`WITH golfer AS (
INSERT INTO users (id, login, time_zone) VALUES ($1, $2, $3)
ON CONFLICT (id)
DO UPDATE SET login = excluded.login,
time_zone = COALESCE(users.time_zone, excluded.time_zone)
RETURNING id
) INSERT INTO sessions (user_id) SELECT * FROM golfer RETURNING id`,
user.ID, user.Login, timeZone,
).Scan(&cookie.Value); err != nil {
panic(err)
}
http.SetCookie(w, &cookie)
uri := r.FormValue("redirect_uri")
if uri == "" {
uri = "/"
}
http.Redirect(w, r, uri, http.StatusSeeOther)
}
| [
"\"CLIENT_SECRET\""
]
| []
| [
"CLIENT_SECRET"
]
| [] | ["CLIENT_SECRET"] | go | 1 | 0 | |
examples/src/main/java/com/twitter/clientlib/auth/OAuth20AppOnlyGetAccessToken.java | /*
Copyright 2020 Twitter, Inc.
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
https://openapi-generator.tech
Do not edit the class manually.
*/
package com.twitter.clientlib.auth;
import java.util.HashSet;
import java.util.Set;
import com.github.scribejava.core.model.OAuth2AccessToken;
import com.twitter.clientlib.ApiException;
import com.twitter.clientlib.TwitterCredentialsBearer;
import com.twitter.clientlib.api.TwitterApi;
import com.twitter.clientlib.auth.TwitterOAuth20AppOnlyService;
import com.twitter.clientlib.model.ResourceUnauthorizedProblem;
import com.twitter.clientlib.model.Get2TweetsIdResponse;
/**
* This is an example of getting an OAuth2 bearer boken (app-only) and using it to call an API.
* It's expected to set the TWITTER_CONSUMER_KEY & TWITTER_CONSUMER_SECRET variables in TwitterOAuth20AppOnlyService.
*
* Example steps:
* 1. Set the TwitterOAuth20AppOnlyService with TWITTER_CONSUMER_KEY & TWITTER_CONSUMER_SECRET.
* 2. Get the Bearer token by calling the getAccessTokenClientCredentialsGrant().
* 3. After receiving the access token, setting its value into TwitterCredentialsBearer.
* 4. Call the API.
*/
public class OAuth20AppOnlyGetAccessToken {
public static void main(String[] args) {
OAuth2AccessToken accessToken = getAccessToken();
if (accessToken == null) {
return;
}
// Setting the bearer token into TwitterCredentials
TwitterCredentialsBearer credentials = new TwitterCredentialsBearer(accessToken.getAccessToken());
callApi(credentials);
}
public static OAuth2AccessToken getAccessToken() {
TwitterOAuth20AppOnlyService service = new TwitterOAuth20AppOnlyService(
System.getenv("TWITTER_CONSUMER_KEY"),
System.getenv("TWITTER_CONSUMER_SECRET"));
OAuth2AccessToken accessToken = null;
try {
accessToken = service.getAccessTokenClientCredentialsGrant();
System.out.println("Access token: " + accessToken.getAccessToken());
System.out.println("Token type: " + accessToken.getTokenType());
} catch (Exception e) {
System.err.println("Error while getting the access token:\n " + e);
e.printStackTrace();
}
return accessToken;
}
public static void callApi(TwitterCredentialsBearer credentials) {
TwitterApi apiInstance = new TwitterApi(credentials);
Set<String> tweetFields = new HashSet<>();
tweetFields.add("author_id");
tweetFields.add("id");
tweetFields.add("created_at");
try {
// findTweetById
Get2TweetsIdResponse result = apiInstance.tweets().findTweetById("20")
.tweetFields(tweetFields)
.execute();
if (result.getErrors() != null && result.getErrors().size() > 0) {
System.out.println("Error:");
result.getErrors().forEach(e -> {
System.out.println(e.toString());
if (e instanceof ResourceUnauthorizedProblem) {
System.out.println(
((ResourceUnauthorizedProblem) e).getTitle() + " " + ((ResourceUnauthorizedProblem) e).getDetail());
}
});
} else {
System.out.println("findTweetById - Tweet Text: " + result.toString());
}
} catch (ApiException e) {
System.err.println("Status code: " + e.getCode());
System.err.println("Reason: " + e.getResponseBody());
System.err.println("Response headers: " + e.getResponseHeaders());
e.printStackTrace();
} catch (Exception e) {
e.printStackTrace();
}
}
}
| [
"\"TWITTER_CONSUMER_KEY\"",
"\"TWITTER_CONSUMER_SECRET\""
]
| []
| [
"TWITTER_CONSUMER_SECRET",
"TWITTER_CONSUMER_KEY"
]
| [] | ["TWITTER_CONSUMER_SECRET", "TWITTER_CONSUMER_KEY"] | java | 2 | 0 | |
pkg/chaosdaemon/jvm_server.go | // Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package chaosdaemon
import (
"context"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
"github.com/chaos-mesh/chaos-mesh/pkg/bpm"
pb "github.com/chaos-mesh/chaos-mesh/pkg/chaosdaemon/pb"
)
const (
bmInstallCommand = "bminstall.sh -b -Dorg.jboss.byteman.transform.all -Dorg.jboss.byteman.verbose -p %d %d"
bmSubmitCommand = "bmsubmit.sh -p %d -%s %s"
)
func (s *DaemonServer) InstallJVMRules(ctx context.Context,
req *pb.InstallJVMRulesRequest) (*empty.Empty, error) {
log.Info("InstallJVMRules", "request", req)
pid, err := s.crClient.GetPidFromContainerID(ctx, req.ContainerId)
if err != nil {
log.Error(err, "GetPidFromContainerID")
return nil, err
}
containerPids := []uint32{pid}
childPids, err := GetChildProcesses(pid)
if err != nil {
log.Error(err, "GetChildProcesses")
}
containerPids = append(containerPids, childPids...)
for _, containerPid := range containerPids {
name, err := ReadCommName(int(containerPid))
if err != nil {
log.Error(err, "ReadCommName")
continue
}
if name == "java\n" {
pid = containerPid
break
}
}
bytemanHome := os.Getenv("BYTEMAN_HOME")
if len(bytemanHome) == 0 {
return nil, errors.New("environment variable BYTEMAN_HOME not set")
}
// copy agent.jar to container's namespace
if req.EnterNS {
processBuilder := bpm.DefaultProcessBuilder("sh", "-c", fmt.Sprintf("mkdir -p %s/lib/", bytemanHome)).SetContext(ctx).SetNS(pid, bpm.MountNS)
output, err := processBuilder.Build().CombinedOutput()
if err != nil {
return nil, err
}
if len(output) > 0 {
log.Info("mkdir", "output", string(output))
}
agentFile, err := os.Open(fmt.Sprintf("%s/lib/byteman.jar", bytemanHome))
if err != nil {
return nil, err
}
processBuilder = bpm.DefaultProcessBuilder("sh", "-c", "cat > /usr/local/byteman/lib/byteman.jar").SetContext(ctx)
processBuilder = processBuilder.SetNS(pid, bpm.MountNS).SetStdin(agentFile)
output, err = processBuilder.Build().CombinedOutput()
if err != nil {
return nil, err
}
if len(output) > 0 {
log.Info("copy agent.jar", "output", string(output))
}
}
bmInstallCmd := fmt.Sprintf(bmInstallCommand, req.Port, pid)
processBuilder := bpm.DefaultProcessBuilder("sh", "-c", bmInstallCmd).SetContext(ctx)
if req.EnterNS {
processBuilder = processBuilder.EnableLocalMnt()
}
cmd := processBuilder.Build()
output, err := cmd.CombinedOutput()
if err != nil {
// this error will occured when install agent more than once, and will ignore this error and continue to submit rule
errMsg1 := "Agent JAR loaded but agent failed to initialize"
// these two errors will occured when java version less or euqal to 1.8, and don't know why
// but it can install agent success even with this error, so just ignore it now.
// TODO: Investigate the cause of these two error
errMsg2 := "Provider sun.tools.attach.LinuxAttachProvider not found"
errMsg3 := "install java.io.IOException: Non-numeric value found"
// this error is caused by the different attach result codes in different java versions. In fact, the agent has attached success, just ignore it here.
// refer to https://stackoverflow.com/questions/54340438/virtualmachine-attach-throws-com-sun-tools-attach-agentloadexception-0-when-usi/54454418#54454418
errMsg4 := "install com.sun.tools.attach.AgentLoadException"
if !strings.Contains(string(output), errMsg1) && !strings.Contains(string(output), errMsg2) &&
!strings.Contains(string(output), errMsg3) && !strings.Contains(string(output), errMsg4) {
log.Error(err, string(output))
return nil, err
}
log.Info("exec comamnd", "cmd", cmd.String(), "output", string(output), "error", err.Error())
}
// submit rules
filename, err := writeDataIntoFile(req.Rule, "rule.btm")
if err != nil {
return nil, err
}
bmSubmitCmd := fmt.Sprintf(bmSubmitCommand, req.Port, "l", filename)
processBuilder = bpm.DefaultProcessBuilder("sh", "-c", bmSubmitCmd).SetContext(ctx)
if req.EnterNS {
processBuilder = processBuilder.SetNS(pid, bpm.NetNS)
}
output, err = processBuilder.Build().CombinedOutput()
if err != nil {
log.Error(err, string(output))
return nil, err
}
if len(output) > 0 {
log.Info("submit rules", "output", string(output))
}
return &empty.Empty{}, nil
}
func (s *DaemonServer) UninstallJVMRules(ctx context.Context,
req *pb.UninstallJVMRulesRequest) (*empty.Empty, error) {
log.Info("InstallJVMRules", "request", req)
pid, err := s.crClient.GetPidFromContainerID(ctx, req.ContainerId)
if err != nil {
log.Error(err, "GetPidFromContainerID")
return nil, err
}
filename, err := writeDataIntoFile(req.Rule, "rule.btm")
if err != nil {
return nil, err
}
log.Info("create btm file", "file", filename)
bmSubmitCmd := fmt.Sprintf(bmSubmitCommand, req.Port, "u", filename)
processBuilder := bpm.DefaultProcessBuilder("sh", "-c", bmSubmitCmd).SetContext(ctx)
if req.EnterNS {
processBuilder = processBuilder.SetNS(pid, bpm.NetNS)
}
output, err := processBuilder.Build().CombinedOutput()
if err != nil {
log.Error(err, string(output))
if strings.Contains(string(output), "No rule scripts to remove") {
return &empty.Empty{}, nil
}
return nil, err
}
if len(output) > 0 {
log.Info(string(output))
}
return &empty.Empty{}, nil
}
func writeDataIntoFile(data string, filename string) (string, error) {
tmpfile, err := ioutil.TempFile("", filename)
if err != nil {
return "", err
}
if _, err := tmpfile.WriteString(data); err != nil {
return "", err
}
if err := tmpfile.Close(); err != nil {
return "", err
}
return tmpfile.Name(), err
}
| [
"\"BYTEMAN_HOME\""
]
| []
| [
"BYTEMAN_HOME"
]
| [] | ["BYTEMAN_HOME"] | go | 1 | 0 | |
arcEC.py | import arcpy
import sys
## Version 1.8 (8 functions) '130213/MaHvi
def SetMsg(msg, severity=0): # 0:Message, 1:Warning, 2:Error
#print msg
try:
for string in msg.split('\n'):
string = ":) "+string
if severity == 0:
arcpy.AddMessage(string)
elif severity == 1:
arcpy.AddWarning(string)
elif severity == 2:
arcpy.AddError(string)
except:
pass
def ecMessage(strI,numI=0,severity=0):
""" Neither message number nor severity is mandatory """
if numI == 0:
SetMsg(" Message: "+strI,0)
else:
SetMsg(" Message: "+str(numI)+" : "+strI,0)
def ecWarning(strI,numI,severity=0):
""" Severity is not mandatory """
SetMsg(" ! Warning: "+str(numI)+" : "+strI,1)
def ecError(strI,numI,severity):
""" Severity > 0 causes program termination """
SetMsg("!!!Error: "+str(numI)+" : "+strI,2)
if severity > 0:
sys.exit(numI)
def Describe2String(desIn):
strReport = ""
if hasattr(desIn, "Name"):
strReport +="\n Name: "+desIn.Name
if hasattr(desIn, "baseName"):
strReport +="\n baseName: "+desIn.baseName
if hasattr(desIn, "dataType"):
strReport +="\n dataType: "+desIn.dataType
#if hasattr(desIn, "dataElementType"):
# strReport +="\n dataElementType: "+desIn.dataElementType
if hasattr(desIn, "catalogPath"):
strReport +="\n catalogPath: "+desIn.catalogPath
if hasattr(desIn, "children"):
strReport +="\n children: "+str(len(desIn.children))
if hasattr(desIn, "fields"):
strReport +="\n fields: "+str(len(desIn.fields))
if len(desIn.fields) > 0:
for fldX in desIn.fields:
strReport +="\n field: "+fldX.name
if hasattr(desIn, "pludder"):
strReport +="\n pludder: "+desIn.pludder
return strReport
def Table2Ascii(tblIn):
strReport = ""
desIn = arcpy.Describe(tblIn)
if hasattr(desIn, "dataType"):
if desIn.dataType == "Table":
strReport +="\n Table2Ascii ::"
if hasattr(desIn, "fields"):
strReport +="\n fields: "+str(len(desIn.fields))+"\n"
if len(desIn.fields) > 0:
for fldX in desIn.fields:
strReport +="|"+fldX.name+" <"+fldX.type+">"
rows = arcpy.SearchCursor(tblIn)
numRows = 0
for rowX in rows:
strReport += "\n "
for fldX in desIn.fields:
strReport += "|"+str(rowX.getValue(fldX.name))
numRows += 1
strReport += "\n Row count: "+str(numRows)
else:
strReport +="No Fields in tabel ..."
return strReport
def Table2Ascii_byFields(tblIn):
strReport = ""
desIn = arcpy.Describe(tblIn)
if hasattr(desIn, "dataType"):
if desIn.dataType == "Table":
strReport +="Table2Ascii_ByFields"
if hasattr(desIn, "fields"):
strReport +="\n fields: "+str(len(desIn.fields))
if len(desIn.fields) > 0:
for fldX in desIn.fields:
rows = arcpy.SearchCursor(tblIn)
strReport +="\n field: "+fldX.name+" <"+fldX.type+">"
strReport += "\n "
for rowX in rows:
strReport += "|"+str(rowX.getValue(fldX.name))
rows.reset()
return strReport
def Dict2String(dicIn):
strReport = ""
lstK = dicIn.keys()
lstK.sort()
for K in lstK:
strReport += str(K)+" : "+str(dicIn[K])+"\n"
return strReport
# Music that accompanied the coding of this script:
# Deep Forest - Savana Dance
| []
| []
| []
| [] | [] | python | null | null | null |
testing/petsc2h5.py | ### ====================================================================
### Python-file
### author: Ethan T. Coon
### filename: petsc2h5.py
### version:
### created: 15 November 2011
### on: 10:07:25 MST
### last modified: 15 November 2011
### at: 10:26:02 MST
### URL: http://www.ldeo.columbia.edu/~ecoon/
### email: ecoon _at_ lanl.gov
###
### ====================================================================
import sys,os
sys.path.append(os.path.join(os.environ['LBM_DIR'], 'src', 'testing'))
import solution_reader3
import h5py
import numpy as np
def _create_coords(out, shape):
coords = out.create_group('Coordinates')
coords.create_dataset(name='X [m]', shape=(shape[0]+1,), dtype=np.float, data=np.arange(shape[0]+1))
coords.create_dataset(name='Y [m]', shape=(shape[1]+1,), dtype=np.float, data=np.arange(shape[1]+1))
coords.create_dataset(name='Z [m]', shape=(shape[2]+1,), dtype=np.float, data=np.arange(shape[2]+1))
def petsc2h5(directory, infile='input_data', outfile='soln.h5'):
sr = solution_reader3.SolutionReader(infile)
out = h5py.File(outfile, 'w')
# make the coordinates, which are exteriors of cells if pflotran's h5 format
if len(sr._size) < 3:
_create_coords(out, sr._size+(1,))
else:
_create_coords(out, sr._size)
for i in range(int(sr._options['npasses'])/int(sr._options['kwrite'])+1):
if len(sr._size) < 3:
prs = np.array(sr.loadVec('prs%03d.dat'%i)[:,:,0], dtype=np.float)
rho_raw = np.array(sr.loadVec('rho%03d.dat'%i), dtype=np.float)
rho = [rho[:,:,i] for i in range(rho.shape[2])]
u = np.array(sr.loadVec('u%03d.dat'%i), dtype=np.float)
u_x = u[:,:,0]
u_y = u[:,:,1]
u_z = np.zeros(u_x.shape, u_x.dtype)
shape = sr._size+(1,)
else:
prs = np.array(sr.loadVec('prs%03d.dat'%i)[:,:,:,0], dtype=np.float)
rho_raw = np.array(sr.loadVec('rho%03d.dat'%i), dtype=np.float)
rho = [rho[:,:,:,i] for i in range(rho.shape[3])]
u = np.array(sr.loadVec('u%03d.dat'%i),dtype=np.float)
u_x = u[:,:,:,0]
u_y = u[:,:,:,1]
u_z = u[:,:,:,2]
shape = sr._size
groupname = 'Time: %d.0000E+00 s'%i
group = out.create_group(groupname)
group.create_dataset(name='Pressure', shape=shape, data=prs)
group.create_dataset(name='Liquid X-Velocity', shape=shape, data=u_x)
group.create_dataset(name='Liquid Y-Velocity', shape=shape, data=u_y)
group.create_dataset(name='Liquid Z-Velocity', shape=shape, data=u_z)
for i,rho in enumerate(rho):
group.create_dataset(name='Component %d Density'%i, shape=shape,
data=rho)
out.close()
if __name__ == '__main__':
directory = sys.argv[-1]
petsc2h5(directory)
| []
| []
| [
"LBM_DIR"
]
| [] | ["LBM_DIR"] | python | 1 | 0 | |
uploader/s3/s3_test.go | package s3_test
import (
"log"
"os"
"testing"
. "github.com/image-server/image-server/test"
"github.com/image-server/image-server/uploader/s3"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/aws"
)
func TestItemToHash(t *testing.T) {
if !hasAwsAuthentication() {
return
}
bucketName := os.Getenv("AWS_BUCKET")
regionName := os.Getenv("AWS_REGION")
sess := session.Must(session.NewSession(&aws.Config{
Region: aws.String(regionName),
}))
Assert(t, sess != nil, "We need AWS access for integration tests")
s3.Initialize(bucketName, regionName)
uploader := s3.Uploader{}
existing, err := uploader.ListDirectory("p/543/47c/442/1c41f9467a3f5afed64943b")
Ok(t, err)
log.Println(existing)
// Equals(t, "6ad5544baa6f5e852e1af26f8c2e45db", image.ToHash())
}
func hasAwsAuthentication() bool {
hasRegion := len(os.Getenv("AWS_REGION")) > 0
hasBucket := len(os.Getenv("AWS_BUCKET")) > 0
return hasRegion && hasBucket
} | [
"\"AWS_BUCKET\"",
"\"AWS_REGION\"",
"\"AWS_REGION\"",
"\"AWS_BUCKET\""
]
| []
| [
"AWS_BUCKET",
"AWS_REGION"
]
| [] | ["AWS_BUCKET", "AWS_REGION"] | go | 2 | 0 | |
HackerNews_app_api/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.config")
os.environ.setdefault("DJANGO_CONFIGURATION", "Local")
try:
from configurations.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'topcoder.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
build/transaction.go | package build
import (
"encoding/hex"
"fmt"
"github.com/diamnet/go/network"
"github.com/diamnet/go/support/errors"
"github.com/diamnet/go/xdr"
)
// Transaction groups the creation of a new TransactionBuilder with a call
// to Mutate.
func Transaction(muts ...TransactionMutator) (*TransactionBuilder, error) {
result := &TransactionBuilder{}
err := result.Mutate(muts...)
if err != nil {
return nil, err
}
err = result.Mutate(Defaults{})
if err != nil {
return nil, err
}
return result, nil
}
// TransactionMutator is a interface that wraps the
// MutateTransaction operation. types may implement this interface to
// specify how they modify an xdr.Transaction object
type TransactionMutator interface {
MutateTransaction(*TransactionBuilder) error
}
// TransactionBuilder represents a Transaction that is being constructed.
// Deprecated use txnbuild.Transaction instead
type TransactionBuilder struct {
TX *xdr.Transaction
NetworkPassphrase string
BaseFee uint64
}
// Mutate applies the provided TransactionMutators to this builder's transaction
func (b *TransactionBuilder) Mutate(muts ...TransactionMutator) error {
if b.TX == nil {
b.TX = &xdr.Transaction{}
}
for i, m := range muts {
err := m.MutateTransaction(b)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("mutator:%d failed", i))
}
}
return nil
}
// Hash returns the hash of this builder's transaction.
func (b *TransactionBuilder) Hash() ([32]byte, error) {
return network.HashTransaction(b.TX, b.NetworkPassphrase)
}
// HashHex returns the hex-encoded hash of this builder's transaction
func (b *TransactionBuilder) HashHex() (string, error) {
hash, err := b.Hash()
if err != nil {
return "", err
}
return hex.EncodeToString(hash[:]), nil
}
// Sign returns an new TransactionEnvelopeBuilder using this builder's
// transaction as the basis and with signatures of that transaction from the
// provided Signers.
func (b *TransactionBuilder) Sign(signers ...string) (TransactionEnvelopeBuilder, error) {
var result TransactionEnvelopeBuilder
err := result.Mutate(b)
if err != nil {
return result, err
}
for _, s := range signers {
err := result.Mutate(Sign{s})
if err != nil {
return result, err
}
}
return result, nil
}
// Envelope returns a new TransactionEnvelopeBuilder using this
// builder's transaction as the basis and with the provided
// mutators applied.
func (b *TransactionBuilder) Envelope(muts ...TransactionEnvelopeMutator) (TransactionEnvelopeBuilder, error) {
var teb TransactionEnvelopeBuilder
err := teb.Mutate(b)
if err != nil {
return teb, err
}
err = teb.Mutate(muts...)
return teb, err
}
// ------------------------------------------------------------
//
// Mutator implementations
//
// ------------------------------------------------------------
// MutateTransaction for AccountMergeBuilder causes the underylying Destination
// to be added to the operation list for the provided transaction
func (m AccountMergeBuilder) MutateTransaction(o *TransactionBuilder) error {
if m.Err != nil {
return m.Err
}
m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeAccountMerge, m.Destination)
o.TX.Operations = append(o.TX.Operations, m.O)
return m.Err
}
// MutateTransaction for AllowTrustBuilder causes the underylying AllowTrustOp
// to be added to the operation list for the provided transaction
func (m AllowTrustBuilder) MutateTransaction(o *TransactionBuilder) error {
if m.Err != nil {
return m.Err
}
m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeAllowTrust, m.AT)
o.TX.Operations = append(o.TX.Operations, m.O)
return m.Err
}
// MutateTransaction for AutoSequence loads the sequence and sets it on the tx.
// NOTE: this mutator assumes that the source account has already been set on
// the transaction and will error if that has not occurred.
func (m AutoSequence) MutateTransaction(o *TransactionBuilder) error {
source := o.TX.SourceAccount
if source == (xdr.AccountId{}) {
return errors.New("auto sequence used prior to setting source account")
}
seq, err := m.SequenceForAccount(source.Address())
if err != nil {
return errors.Wrap(err, "couldn't load account for auto sequence")
}
o.TX.SeqNum = seq + 1
return nil
}
// MutateTransaction for BumpSequenceBuilder causes the underylying BumpSequenceOp
// to be added to the operation list for the provided transaction
func (m BumpSequenceBuilder) MutateTransaction(o *TransactionBuilder) error {
if m.Err != nil {
return m.Err
}
m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeBumpSequence, m.BS)
o.TX.Operations = append(o.TX.Operations, m.O)
return m.Err
}
// MutateTransaction for ChangeTrustBuilder causes the underylying
// CreateAccountOp to be added to the operation list for the provided
// transaction
func (m ChangeTrustBuilder) MutateTransaction(o *TransactionBuilder) error {
if m.Err != nil {
return m.Err
}
m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeChangeTrust, m.CT)
o.TX.Operations = append(o.TX.Operations, m.O)
return m.Err
}
// MutateTransaction for CreateAccountBuilder causes the underylying
// CreateAccountOp to be added to the operation list for the provided
// transaction
func (m CreateAccountBuilder) MutateTransaction(o *TransactionBuilder) error {
if m.Err != nil {
return m.Err
}
m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeCreateAccount, m.CA)
o.TX.Operations = append(o.TX.Operations, m.O)
return m.Err
}
// DefaultBaseFee is used to calculate the transaction fee by default
var DefaultBaseFee uint64 = 100
// MutateTransaction for Defaults sets reasonable defaults on the transaction being built
func (m Defaults) MutateTransaction(o *TransactionBuilder) error {
if o.BaseFee == 0 {
o.BaseFee = DefaultBaseFee
}
if o.TX.Fee == 0 {
o.TX.Fee = xdr.Uint32(int(o.BaseFee) * len(o.TX.Operations))
}
if o.NetworkPassphrase == "" {
o.NetworkPassphrase = DefaultNetwork.Passphrase
}
return nil
}
// MutateTransaction for InflationBuilder causes the underylying
// InflationOp to be added to the operation list for the provided
// transaction
func (m InflationBuilder) MutateTransaction(o *TransactionBuilder) error {
if m.Err != nil {
return m.Err
}
m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeInflation, nil)
o.TX.Operations = append(o.TX.Operations, m.O)
return m.Err
}
// MutateTransaction for ManageDataBuilder causes the underylying
// ManageData to be added to the operation list for the provided
// transaction
func (m ManageDataBuilder) MutateTransaction(o *TransactionBuilder) error {
if m.Err != nil {
return m.Err
}
m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeManageData, m.MD)
o.TX.Operations = append(o.TX.Operations, m.O)
return m.Err
}
// MutateTransaction for ManageOfferBuilder causes the underylying
// ManageData to be added to the operation list for the provided
// transaction
func (m ManageOfferBuilder) MutateTransaction(o *TransactionBuilder) error {
if m.Err != nil {
return m.Err
}
if m.PassiveOffer {
m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeCreatePassiveSellOffer, m.PO)
o.TX.Operations = append(o.TX.Operations, m.O)
} else {
m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeManageSellOffer, m.MO)
o.TX.Operations = append(o.TX.Operations, m.O)
}
return m.Err
}
// MutateTransaction for MemoHash sets the memo.
func (m MemoHash) MutateTransaction(o *TransactionBuilder) (err error) {
o.TX.Memo, err = xdr.NewMemo(xdr.MemoTypeMemoHash, m.Value)
return
}
// MutateTransaction for MemoID sets the memo.
func (m MemoID) MutateTransaction(o *TransactionBuilder) (err error) {
o.TX.Memo, err = xdr.NewMemo(xdr.MemoTypeMemoId, xdr.Uint64(m.Value))
return
}
// MutateTransaction for MemoReturn sets the memo.
func (m MemoReturn) MutateTransaction(o *TransactionBuilder) (err error) {
o.TX.Memo, err = xdr.NewMemo(xdr.MemoTypeMemoReturn, m.Value)
return
}
// MutateTransaction for MemoText sets the memo.
func (m MemoText) MutateTransaction(o *TransactionBuilder) (err error) {
if len([]byte(m.Value)) > MemoTextMaxLength {
err = errors.New("Memo too long; over 28 bytes")
return
}
o.TX.Memo, err = xdr.NewMemo(xdr.MemoTypeMemoText, m.Value)
return
}
func (m Timebounds) MutateTransaction(o *TransactionBuilder) error {
o.TX.TimeBounds = &xdr.TimeBounds{MinTime: xdr.TimePoint(m.MinTime), MaxTime: xdr.TimePoint(m.MaxTime)}
return nil
}
// MutateTransaction for Network sets the Network ID to use when signing this transaction
func (m Network) MutateTransaction(o *TransactionBuilder) error {
o.NetworkPassphrase = m.Passphrase
return nil
}
// MutateTransaction for PaymentBuilder causes the underylying PaymentOp
// or PathPaymentOp to be added to the operation list for the provided transaction
func (m PaymentBuilder) MutateTransaction(o *TransactionBuilder) error {
if m.Err != nil {
return m.Err
}
if m.PathPayment {
m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypePathPayment, m.PP)
o.TX.Operations = append(o.TX.Operations, m.O)
return m.Err
}
m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypePayment, m.P)
o.TX.Operations = append(o.TX.Operations, m.O)
return m.Err
}
// MutateTransaction for SetOptionsBuilder causes the underylying
// SetOptionsOp to be added to the operation list for the provided
// transaction
func (m SetOptionsBuilder) MutateTransaction(o *TransactionBuilder) error {
if m.Err != nil {
return m.Err
}
m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeSetOptions, m.SO)
o.TX.Operations = append(o.TX.Operations, m.O)
return m.Err
}
// MutateTransaction for Sequence sets the SeqNum on the transaction.
func (m Sequence) MutateTransaction(o *TransactionBuilder) error {
o.TX.SeqNum = xdr.SequenceNumber(m.Sequence)
return nil
}
// MutateTransaction for SourceAccount sets the transaction's SourceAccount
// to the pubilic key for the address provided
func (m SourceAccount) MutateTransaction(o *TransactionBuilder) error {
return setAccountId(m.AddressOrSeed, &o.TX.SourceAccount)
}
// MutateTransaction for BaseFee sets the base fee
func (m BaseFee) MutateTransaction(o *TransactionBuilder) error {
o.BaseFee = m.Amount
return nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
tests/test_client.py | import datetime
import hashlib
import io
import json
import os
import random
import requests
import shutil
import string
import tempfile
import time
import unittest
from utils import generate_rand_string
from OTXv2 import OTXv2, OTXv2Cached, InvalidAPIKey, BadRequest, RetryError, NotFound
import IndicatorTypes
from patch_pulse import PatchPulse
STRP_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
ALIEN_DEV_SERVER = os.getenv('X_OTX_DEV_SERVER', "")
ALIEN_API_APIKEY = ""
rand = random.randint(0, 1e9)
def create_user(username, password, email):
"""
Create a user, and get the API key
"""
print("creating user {}".format(username))
requests.post(ALIEN_DEV_SERVER + 'otxapi/qatests/setup/', json={"users": [{ "username": username, "password": password, "email": email}]})
r = requests.post(ALIEN_DEV_SERVER + 'auth/login', json={"username": username, "password": password})
j = json.loads(r.text)
r = requests.get(ALIEN_DEV_SERVER + 'otxapi/user/?detailed=true', headers={'Authorization': j['key']})
j = r.json()
return j['api_keys'][0]['api_key']
def delete_user(username):
print("deleting user {}".format(username))
r = requests.post(ALIEN_DEV_SERVER + 'otxapi/qatests/cleanup/', json={"users": [username]})
return r.json()
# Class names should start with "Test"
class TestOTXv2(unittest.TestCase):
"""
Base class configure API Key to use on a per test basis.
"""
def setUp(self, api_key=''):
self.api_key = api_key or ALIEN_API_APIKEY
self.otx = OTXv2(self.api_key, server=ALIEN_DEV_SERVER)
class TestSubscriptionsInvalidKey(TestOTXv2):
"""
Confirm InvalidAPIKey class is raised for API Key failures
"""
def setUp(self, **kwargs):
super(TestSubscriptionsInvalidKey, self).setUp(api_key=generate_rand_string(length=64))
def test_getall(self):
with self.assertRaises(InvalidAPIKey):
r = self.otx.getall(max_page=3, limit=5)
class TestSubscriptions(TestOTXv2):
"""
Confirm that given a valid API Key, we can obtain threat intelligence subscriptions.
"""
def test_getall(self):
pulses = self.otx.getall(max_page=3, limit=5)
self.assertIsNotNone(pulses)
self.assertTrue(len(pulses) > 0)
self.assertTrue(len(pulses) <= 3 * 5)
most_recent = pulses[0]
self.assertIsNotNone(most_recent.get('id', None))
self.assertIsNotNone(most_recent.get('name', None))
self.assertIsNotNone(most_recent.get('description', None))
self.assertIsNotNone(most_recent.get('author_name', None))
self.assertIsNotNone(most_recent.get('indicators', None))
self.assertIsNotNone(most_recent.get('created', None))
self.assertIsNotNone(most_recent.get('modified', None))
self.assertIsNotNone(most_recent.get('id', None))
def test_getall_iter(self):
pulse_gen = self.otx.getall_iter(max_page=3, limit=5)
self.assertIsNotNone(pulse_gen)
for pulse in pulse_gen:
# print(u"test_getall_iter next pulse: {0}".format(pulse.get('name', '')))
self.assertTrue(pulse.get('name', None))
def test_getsince(self):
three_months_dt = (datetime.datetime.now() - datetime.timedelta(days=90))
three_months_timestamp = three_months_dt.isoformat()
pulses = self.otx.getsince(three_months_timestamp, limit=9999, max_page=3)
for pulse in pulses:
# print(u"test_getsince next pulse: {0}".format(pulse.get('name', '')))
pulse_modified = pulse.get('modified', None)
self.assertIsNotNone(pulse_modified)
try:
pulse_modified_dt = datetime.datetime.strptime(pulse_modified, STRP_TIME_FORMAT)
except ValueError:
pulse_modified_dt = datetime.datetime.strptime(pulse_modified, '%Y-%m-%dT%H:%M:%S')
self.assertGreaterEqual(pulse_modified_dt, three_months_dt)
def test_getsince_iter(self):
three_months_dt = (datetime.datetime.now() - datetime.timedelta(days=90))
three_months_timestamp = three_months_dt.isoformat()
pulse_gen = self.otx.getsince_iter(three_months_timestamp, limit=9999, max_page=3)
self.assertIsNotNone(pulse_gen)
for pulse in pulse_gen:
# print(u"test_getsince_iter next pulse: {0}".format(pulse.get('name', '')))
self.assertTrue(pulse.get('name', None))
pulse_modified = pulse.get('modified', None)
self.assertIsNotNone(pulse_modified)
try:
pulse_modified_dt = datetime.datetime.strptime(pulse_modified, STRP_TIME_FORMAT)
except ValueError:
pulse_modified_dt = datetime.datetime.strptime(pulse_modified, '%Y-%m-%dT%H:%M:%S')
self.assertGreaterEqual(pulse_modified_dt, three_months_dt)
def test_author_param(self):
for pulse in self.otx.getall(author_name='AlienVault', max_page=3):
self.assertEqual(pulse['author_name'], 'AlienVault')
three_months_dt = (datetime.datetime.now() - datetime.timedelta(days=90))
for pulse in self.otx.getall(author_name='AlienVault', modified_since=three_months_dt, max_page=3):
self.assertEqual(pulse['author_name'], 'AlienVault')
pulse_modified = pulse.get('modified', None)
try:
pulse_modified_dt = datetime.datetime.strptime(pulse_modified, STRP_TIME_FORMAT)
except ValueError:
pulse_modified_dt = datetime.datetime.strptime(pulse_modified, '%Y-%m-%dT%H:%M:%S')
self.assertGreaterEqual(pulse_modified_dt, three_months_dt)
class TestSearch(TestOTXv2):
def test_search_pulses_simple(self):
res = self.otx.search_pulses("Russian")
pulses = res.get('results')
self.assertTrue(len(pulses) > 0)
self.assertIsNotNone(pulses)
self.assertTrue(len(pulses) > 0)
pulse = pulses[0]
#print(u"test_search_pulses_simple top hit: {0}".format(pulse.get('name', '')))
#print str(pulses[0])
self.assertIsNotNone(pulse.get('modified', None))
self.assertIsNotNone(pulse.get('author_name', None))
self.assertIsNotNone(pulse.get('id', None))
self.assertIsNotNone(pulse.get('tags', None))
self.assertIsNotNone(pulse.get('references', None))
self.assertIsNotNone(res.get('exact_match'))
def test_exact_match_domain(self):
res = self.otx.search_pulses("malware.org")
pulses = res.get('results')
self.assertTrue(isinstance(pulses, list))
self.assertTrue(len(pulses) > 0)
self.assertIsNotNone(pulses)
# print("test_exact_match_domain additional data for malware.org:")
# pprint.pprint(res)
self.assertTrue(res.get('exact_match', -1))
def test_search_users(self):
res = self.otx.search_users("alien")
self.assertTrue('results' in res.keys())
self.assertTrue(isinstance(res.get('results', ''), list))
users = res.get('results')
first_user = users[0]
self.assertTrue(first_user.get('username', '') != '')
self.assertTrue(res.get('count', -1) >= 0)
class TestEvents(TestOTXv2):
def test_getevents_since(self):
three_months_dt = (datetime.datetime.now() - datetime.timedelta(days=90))
three_months_timestamp = three_months_dt.isoformat()
events = self.otx.getevents_since(three_months_timestamp, limit=9999)
self.assertIsNotNone(events)
self.assertTrue(len(events) > 0)
most_recent = events[0]
self.assertIsNotNone(most_recent.get('action', None))
self.assertIsNotNone(most_recent.get('created', None))
self.assertIsNotNone(most_recent.get('id', None))
class TestIndicatorTypes(TestOTXv2):
def test_get_all_indicators(self):
indicator_gen = self.otx.get_all_indicators(max_page=3)
for indicator in indicator_gen:
self.assertIsNotNone(indicator)
self.assertIsNotNone(indicator.get('type', None))
self.assertIsNotNone(indicator.get('indicator', None))
self.assertIsNotNone(indicator.get('description', None))
def test_get_all_ipv4_indicators(self):
ipv4_type_list = [IndicatorTypes.IPv4]
ipv4_indicator_gen = self.otx.get_all_indicators(indicator_types=ipv4_type_list, max_page=3)
for indicator in ipv4_indicator_gen:
self.assertIsNotNone(indicator)
self.assertIsNotNone(indicator.get('type', None))
self.assertIsNotNone(indicator.get('indicator', None))
self.assertIsNotNone(indicator.get('description', None))
self.assertTrue(indicator.get('type', '') == IndicatorTypes.IPv4.name)
class TestPulseDetails(TestOTXv2):
def test_get_pulse_details(self):
# get a pulse from search to use as testcase
res = self.otx.search_pulses("Russian")
pulses = res.get('results')
self.assertTrue(len(pulses) > 0)
pulse = pulses[0]
pulse_id = pulse.get('id', '')
meta_data = self.otx.get_pulse_details(pulse_id=pulse_id)
# pprint.pprint(meta_data)
self.assertIsNotNone(meta_data)
self.assertTrue('author_name' in meta_data.keys())
self.assertTrue('name' in meta_data.keys())
self.assertTrue('references' in meta_data.keys())
self.assertTrue('tags' in meta_data.keys())
self.assertTrue('indicators' in meta_data.keys())
def test_get_pulse_indicators(self):
res = self.otx.search_pulses("Russian")
pulses = res.get('results')
self.assertTrue(len(pulses) > 0)
pulse = pulses[0]
pulse_id = pulse.get('id', '')
indicators = self.otx.get_pulse_indicators(pulse_id=pulse_id)
self.assertIsNotNone(indicators)
# print("Indicators is " + str(indicators))
for indicator in indicators:
# print("next indicator:")
# pprint.pprint(indicator)
self.assertIsNotNone(indicator.get('indicator'))
self.assertIsNotNone(indicator.get('type'))
def test_get_pulse_indicators_bad_pulse_id(self):
with self.assertRaises(NotFound): # not an existing pulse
res = self.otx.get_pulse_indicators("a"*24)
with self.assertRaises(BadRequest): # not enough characters
res = self.otx.get_pulse_indicators("aaaaaa")
with self.assertRaises(BadRequest): # too many characters
res = self.otx.get_pulse_indicators("a"*25)
with self.assertRaises(BadRequest): # not a string
res = self.otx.get_pulse_indicators(1)
class TestIndicatorDetails(TestOTXv2):
def test_get_indicator_details_IPv4_by_section(self):
# print("test_get_indicator_details_IPv4_by_section")
for section in IndicatorTypes.IPv4.sections:
# print("next section: {0}".format(section))
section_details = self.otx.get_indicator_details_by_section(IndicatorTypes.IPv4, "69.73.130.198", section)
# print(u"section: {0}".format(section))
# pprint.pprint(section_details)
self.assertTrue(True)
def test_get_indicator_details_IPv4_full(self):
# print("test_get_indicator_details_IPv4_full")
full_details = self.otx.get_indicator_details_full(IndicatorTypes.IPv4, "69.73.130.198")
self.assertTrue(sorted(full_details.keys()) == sorted(IndicatorTypes.IPv4.sections))
# pprint.pprint(full_details)
def test_get_indicator_details_Email_full(self):
# print("test_get_indicator_details_IPv4_full")
full_details = self.otx.get_indicator_details_full(IndicatorTypes.EMAIL, "[email protected]")
self.assertTrue(sorted(full_details.keys()) == sorted(IndicatorTypes.EMAIL.sections))
# pprint.pprint(full_details)
class TestPulseCreate(TestOTXv2):
def test_create_pulse_simple(self):
name = "Pyclient-simple-unittests-" + generate_rand_string(8, charset=string.hexdigits).lower()
# print("test_create_pulse_simple submitting pulse: " + name)
response = self.otx.create_pulse(name=name,
public=False,
indicators=[{'indicator': "8.8.8.8", 'type': IndicatorTypes.IPv4.name}],
tags=[],
references=[])
self.assertIsNotNone(response)
def test_create_pulse_no_name(self):
"""
Test: pulse without name should raise value error
"""
# print("test_create_pulse_no_name submitting nameless pulse")
with self.assertRaises(ValueError):
self.otx.create_pulse(**{})
def test_create_pulse_name_too_short(self):
"""
Test: pulse without name should raise value error
"""
body = {'name': generate_rand_string(2)}
# print("test_create_pulse_name_too_short submitting pulse: {}\nExpecting BadRequest.".format(body))
with self.assertRaises(BadRequest):
self.otx.create_pulse(**body)
def test_create_pulse_tlp_mismatch(self):
"""
Test: pulse without name should raise value error
"""
name = generate_rand_string(10)
tlps = ['red', 'amber']
for tlp in tlps:
# print("test_create_pulse_tlp_mismatch submitting pulse: {} (tlp: {})".format(name, tlp))
with self.assertRaises(BadRequest):
self.otx.create_pulse(name=name, TLP=tlp, public=True)
def test_create_pulse_with_indicators(self):
"""
Test: pulse with list of indicators
"""
charset = string.ascii_letters
validated_indicator_list = []
indicator_list = [
{'indicator': generate_rand_string(10, charset=charset) + ".com", 'type': IndicatorTypes.DOMAIN},
{'indicator': generate_rand_string(3, charset=charset) + "." + generate_rand_string(10, charset=charset) + ".com", 'type': IndicatorTypes.HOSTNAME},
{'indicator': "69.73.130.198", 'type': IndicatorTypes.IPv4},
{'indicator': "2a00:1450:4001:800::1017", 'type': IndicatorTypes.IPv6},
{'indicator': "spearphish@" + generate_rand_string(10) + ".com", 'type': IndicatorTypes.EMAIL},
{'indicator': "14c04f88dc97aef3e9b516ef208a2bf5", 'type': IndicatorTypes.FILE_HASH_MD5},
{'indicator': "48e04cb52f1077b5f5aab75baff6c27b0ee4ade1", 'type': IndicatorTypes.FILE_HASH_SHA1},
{'indicator': "7522bc3e366c19ab63381bacd0f03eb09980ecb915ada08ae76d8c3e538600de", 'type': IndicatorTypes.FILE_HASH_SHA256},
{'indicator': "a060fe925aa888053010d1e195ef823a", 'type': IndicatorTypes.FILE_HASH_IMPHASH},
{'indicator': "\sonas\share\samples\14\c0\4f\88\14c04f88dc97aef3e9b516ef208a2bf5", 'type': IndicatorTypes.FILE_PATH},
]
name = "Pyclient-indicators-unittests-" + generate_rand_string(8, charset=string.hexdigits).lower()
for indicator in indicator_list:
validated_indicator = self.otx.validate_indicator(indicator.get('type'), indicator.get('indicator', ''))
self.assertTrue('success' in validated_indicator.get('status', ''))
validated_indicator_list.append(validated_indicator)
# print("test_create_pulse_with_indicators: finished validating indicators.\nsubmitting pulse: {}".format({"name": name, "indicators": validated_indicator_list}))
response = self.otx.create_pulse(name=name, public=False, indicators=validated_indicator_list)
self.assertTrue(response.get('name', '') == name)
self.assertTrue(len(response.get('indicators', [])) == len(validated_indicator_list))
return
def test_create_pulse_and_update(self):
"""
Test: create a pulse then replace the indicators
"""
indicator_list = [ {'indicator': "one.com", 'type': 'domain'} ]
new_indicators = [ {'indicator': "two.com", 'type': 'domain'} ]
name = "Pyclient-indicators-unittests-modify-pulse"
response = self.otx.create_pulse(name=name, public=False, indicators=indicator_list)
pulse_id = response['id']
response = self.otx.replace_pulse_indicators(pulse_id, new_indicators)
new_indicators = str(response['indicators']['indicators'])
self.assertTrue('two.com' in new_indicators)
return
def test_create_pulse_and_edit(self):
"""
Test: create a pulse then add indicators via json directly
"""
indicator_list = [ {'indicator': "one.com", 'type': 'domain'} ]
indicators_to_add = [ {'indicator': "added.com", 'type': 'domain'} ]
add_indicators = { 'indicators': { 'add': indicators_to_add } }
name = "Pyclient-indicators-unittests-modify-pulse"
response = self.otx.create_pulse(name=name, public=False, indicators=indicator_list)
pulse_id = response['id']
response = self.otx.edit_pulse(pulse_id, add_indicators)
new_indicators = str(response['indicators']['indicators'])
self.assertTrue('added.com' in new_indicators)
return
def test_create_pulse_and_edit_via_patch_pulse(self):
"""
Test: create a pulse then add indicators via a patch pulse object
"""
indicator_list = [ {'indicator': "one.com", 'type': 'domain'} ]
name = "Pyclient-indicators-unittests-modify-pulse-patch-pulse"
response = self.otx.create_pulse(name=name, public=False, indicators=indicator_list)
pulse_id = response['id']
# Edit the pulse using a patch pulse object
# We could also edit indicators etc. here
pp = PatchPulse(pulse_id)
pp.add("tags", ["addtag1", "addtag2"])
pp.set("description","New Description")
response = self.otx.edit_pulse(pulse_id, pp.getBody())
new_tags = str(response['tags'])
self.assertTrue('addtag1' in new_tags)
return
def test_create_pulse_tlp(self):
"""
Test: pulse with each TLP.
"""
charset = string.ascii_letters
indicator_list = [
{'indicator': generate_rand_string(10, charset=charset) + ".com", 'type': IndicatorTypes.DOMAIN.name, 'description': 'evil domain (unittests)'},
{'indicator': generate_rand_string(3, charset=charset) + "." + generate_rand_string(10, charset=charset) + ".com", 'type': IndicatorTypes.HOSTNAME.name, 'description': 'evil hostname (unittests)'}
]
name = "Pyclient-tlp-unittests-" + generate_rand_string(8, charset=string.hexdigits).lower()
tlps = ['red', 'amber', 'green', 'white']
for tlp in tlps:
# print("test_create_pulse_tlp: submitting pulse: {}".format({"name": name, "tlp": tlp}))
response = self.otx.create_pulse(name=name, public=False, tlp=tlp, indicators=indicator_list)
self.assertTrue(response.get('name', '') == name)
self.assertTrue(response.get('TLP', '') == tlp)
self.assertFalse(response.get('public'))
return
class TestPulseCreateInvalidKey(TestOTXv2):
def setUp(self, **kwargs):
super(TestPulseCreateInvalidKey, self).setUp(**{'api_key': "ALIEN_API_APIKEY"})
def test_create_pulse_invalid_key(self):
name = "Pyclient-unittests-" + generate_rand_string(8, charset=string.hexdigits).lower()
# print("test_create_pulse_simple submitting pulse: " + name)
with self.assertRaises(InvalidAPIKey):
self.otx.create_pulse(name=name,
public=False,
indicators=[],
tags=[],
references=[])
class TestSubscription(unittest.TestCase):
user1 = "qatester-git-sub1-{}".format(rand)
user2 = "qatester-git-sub2-{}".format(rand)
otx = {}
@classmethod
def setUpClass(cls):
for u in [cls.user1, cls.user2]:
cls.otx[u] = OTXv2(create_user(u, "password", u + "@aveng.us"), server=ALIEN_DEV_SERVER)
@classmethod
def tearDownClass(cls):
for u in [cls.user1, cls.user2]:
delete_user(u)
def test_user_subscription(self):
check_cols = ['username', 'request_user_is_me', 'request_user_is_following', 'request_user_is_subscribed']
before_u1 = {k: v for k, v in self.otx[self.user1].get_user(self.user1).items() if k in check_cols}
before_u2 = {k: v for k, v in self.otx[self.user1].get_user(self.user2).items() if k in check_cols}
self.assertDictEqual(before_u1, {
u"username": self.user1, u"request_user_is_me": True, u"request_user_is_subscribed": False, u"request_user_is_following": False,
})
self.assertDictEqual(before_u2, {
u"username": self.user2, u"request_user_is_me": False, u"request_user_is_subscribed": False, u"request_user_is_following": False,
})
# sub to u2
self.otx[self.user1].subscribe_to_user(self.user2)
after_u1 = {k: v for k, v in self.otx[self.user1].get_user(self.user1).items() if k in check_cols}
after_u2 = {k: v for k, v in self.otx[self.user1].get_user(self.user2).items() if k in check_cols}
self.assertDictEqual(after_u1, {
u"username": self.user1, u"request_user_is_me": True, u"request_user_is_subscribed": False, u"request_user_is_following": False,
})
self.assertDictEqual(after_u2, {
u"username": self.user2, u"request_user_is_me": False, u"request_user_is_subscribed": True, u"request_user_is_following": False,
})
# follow u2
self.otx[self.user1].follow_user(self.user2)
after2_u1 = {k: v for k, v in self.otx[self.user1].get_user(self.user1).items() if k in check_cols}
after2_u2 = {k: v for k, v in self.otx[self.user1].get_user(self.user2).items() if k in check_cols}
self.assertDictEqual(after2_u1, {
u"username": self.user1, u"request_user_is_me": True, u"request_user_is_subscribed": False, u"request_user_is_following": False,
})
self.assertDictEqual(after2_u2, {
u"username": self.user2, u"request_user_is_me": False, u"request_user_is_subscribed": True, u"request_user_is_following": True,
})
# unsub u2
self.otx[self.user1].unsubscribe_from_user(self.user2)
after3_u1 = {k: v for k, v in self.otx[self.user1].get_user(self.user1).items() if k in check_cols}
after3_u2 = {k: v for k, v in self.otx[self.user1].get_user(self.user2).items() if k in check_cols}
self.assertDictEqual(after3_u1, {
u"username": self.user1, u"request_user_is_me": True, u"request_user_is_subscribed": False, u"request_user_is_following": False,
})
self.assertDictEqual(after3_u2, {
u"username": self.user2, u"request_user_is_me": False, u"request_user_is_subscribed": False, u"request_user_is_following": True,
})
# unfollow u2
self.otx[self.user1].unfollow_user(self.user2)
after4_u1 = {k: v for k, v in self.otx[self.user1].get_user(self.user1).items() if k in check_cols}
after4_u2 = {k: v for k, v in self.otx[self.user1].get_user(self.user2).items() if k in check_cols}
self.assertDictEqual(after4_u1, {
u"username": self.user1, u"request_user_is_me": True, u"request_user_is_subscribed": False, u"request_user_is_following": False,
})
self.assertDictEqual(after4_u2, {
u"username": self.user2, u"request_user_is_me": False, u"request_user_is_subscribed": False, u"request_user_is_following": False,
})
def test_pulse_subscription(self):
indicator_list = [{'indicator': "one.com", 'type': 'domain'}]
name = "subscription test"
response = self.otx[self.user1].create_pulse(name=name, public=False, indicators=indicator_list)
pulse_id = response['id']
before = self.otx[self.user1].get_pulse_details(pulse_id)
self.assertFalse(before['is_subscribing'])
self.otx[self.user1].subscribe_to_pulse(pulse_id)
after = self.otx[self.user1].get_pulse_details(pulse_id)
self.assertTrue(after['is_subscribing'])
self.otx[self.user1].unsubscribe_from_pulse(pulse_id)
after2 = self.otx[self.user1].get_pulse_details(pulse_id)
self.assertFalse(after2['is_subscribing'])
class TestValidateIndicator(TestOTXv2):
def test_validate_valid_domain(self):
indicator = generate_rand_string(8, charset=string.ascii_letters).lower() + ".com"
indicator_type = IndicatorTypes.DOMAIN
# print("test_validate_valid_domain submitting (valid-ish) indicator: " + indicator)
response = self.otx.validate_indicator(indicator_type=indicator_type, indicator=indicator)
# print("test_validate_valid_domain response: {}".format(response))
self.assertIsNotNone(response)
self.assertTrue('success' in response.get('status', ''))
def test_validate_invalid_domain(self):
indicator = generate_rand_string(8, charset=string.ascii_letters).lower()
indicator_type = IndicatorTypes.DOMAIN
# print("test_validate_invalid_domain submitting indicator: " + indicator)
with self.assertRaises(BadRequest):
self.otx.validate_indicator(indicator_type=indicator_type, indicator=indicator)
class TestRequests(TestOTXv2):
def test_backoff(self):
with self.assertRaises(RetryError):
t1 = time.time()
self.otx.get('error/500/')
diff = time.time() - t1
self.assertTrue(diff > 1+2+4+8+16)
def test_user_agent(self):
o = OTXv2(self.api_key, server=ALIEN_DEV_SERVER, project='foo')
self.assertEqual(o.headers['User-Agent'], 'OTX Python foo/1.5')
o = OTXv2(self.api_key, server=ALIEN_DEV_SERVER, user_agent='foo')
self.assertEqual(o.headers['User-Agent'], 'foo')
class TestSubmissions(TestOTXv2):
rand1 = None
rand2 = None
maxDiff = None
@classmethod
def setUpClass(cls):
cls.rand1 = random.randint(0, 1e12)
cls.rand2 = random.randint(0, 1e12)
def test_submit_file(self):
data = "print('{} {}')".format(self.rand1, self.rand2)
try:
contents = bytes(data, encoding='utf8')
except TypeError:
contents = bytes(data)
filename = 'test{}.py'.format(self.rand1)
r = self.otx.submit_file(filename=filename, file_handle=io.BytesIO(contents))
self.assertDictEqual(r, {
u'result': u'added',
u'sha256': hashlib.sha256(contents).hexdigest(),
u'status': u'ok',
})
r = self.otx.submitted_files()
self.assertEqual(r[0]['file_name'], filename)
def test_submit_url(self):
time.sleep(2)
u = "http://flannelcat.rustybrooks.com/xxx/{}".format(self.rand1)
r = self.otx.submit_url(url=u)
self.assertDictEqual(r, {u'result': u'added', u'status': u'ok'})
r = self.otx.submitted_urls()
self.assertEquals(r[0]['url'], u)
def test_submit_urls(self):
time.sleep(2)
u1 = "http://flannelcat.rustybrooks.com/yyy/{}".format(self.rand1)
u2 = "http://flannelcat.rustybrooks.com/yyy/{}".format(self.rand2)
r = self.otx.submit_urls(urls=[u1, u2])
r['added'].sort()
self.assertDictEqual(r, {
u'added': sorted([u2, u1]),
u'exists': [],
u'skipped': [],
u'updated': [],
u'invalid': [],
u'status': u'ok',
})
r = self.otx.submitted_urls()
self.assertEquals(
sorted([x['url'] for x in r[:2]]),
sorted([u1, u2])
)
class TestOTXv2Cached(unittest.TestCase):
user = "qatester-git-u1-{}".format(rand)
author1 = "qatester-gith-a1-{}".format(rand)
author2 = "qatester-gith-a2-{}".format(rand)
otx = {}
@classmethod
def setUpClass(cls):
for u in [cls.user, cls.author1, cls.author2]:
cls.otx[u] = OTXv2Cached(
create_user(u, "password", u + "@aveng.us"),
cache_dir=tempfile.mkdtemp(),
server=ALIEN_DEV_SERVER,
)
@classmethod
def tearDownClass(cls):
for u in [cls.user, cls.author1, cls.author2]:
delete_user(u)
shutil.rmtree(cls.otx[u].cache_dir)
def test_basic(self):
def _names(pulses):
return sorted([x['name'] for x in pulses])
def _ind(indicators):
return sorted([x['indicator'] for x in indicators])
t1 = datetime.datetime.utcnow(), datetime.datetime.utcnow()
# new user, no subs except the default, AV. Unsub from AV user and feed should be empty
self.otx[self.user].unsubscribe_from_user("AlienVault")
self.otx[self.user].update()
t2 = self.otx[self.user].last_subscription_fetch, self.otx[self.user].last_events_fetch
self.assertEqual(self.otx[self.user].getall(), [])
self.assertEqual(self.otx[self.user].getall(author_name=self.author1), [])
self.assertEqual(self.otx[self.user].getall(author_name=self.author2), [])
self.assertEqual(self.otx[self.user].getall(modified_since=t1[0]), [])
self.assertEqual(list(self.otx[self.user].get_all_indicators(modified_since=t1[0])), [])
# let's have the user create a pulse and verify that it shows in their feed
self.otx[self.user].create_pulse(
name="xxup1",
public=True,
indicators=[{'indicator': "8.8.8.8", 'type': IndicatorTypes.IPv4.name}],
)
# let's have author1 create a pulse - we're not subbed to him so it won't show at first
self.otx[self.author1].create_pulse(
name="xa1p1",
public=True,
indicators=[{'indicator': "9.9.9.9", 'type': IndicatorTypes.IPv4.name}],
)
# let's have author2 create a pulse - we're not subbed to him so it won't show at first
self.otx[self.author2].create_pulse(
name="xa2p1",
public=True,
indicators=[{'indicator': "9.9.9.10", 'type': IndicatorTypes.IPv4.name}],
)
self.otx[self.user].update()
t3 = self.otx[self.user].last_subscription_fetch, self.otx[self.user].last_events_fetch
self.assertEqual(_names(self.otx[self.user].getall()), ['xxup1'])
self.assertEqual(_names(self.otx[self.user].getall(author_name=self.author1)), [])
self.assertEqual(_names(self.otx[self.user].getall(author_name=self.author2)), [])
self.assertEqual(_names(self.otx[self.user].getall(modified_since=t1[0])), ['xxup1'])
self.assertEqual(_ind(list(self.otx[self.user].get_all_indicators(modified_since=t1[0]))), ['8.8.8.8'])
# subscribe to author1, now we should see his pulse
self.otx[self.user].subscribe_to_user(self.author1)
self.otx[self.user].update()
t4 = self.otx[self.user].last_subscription_fetch, self.otx[self.user].last_events_fetch
self.assertEqual(_names(self.otx[self.user].getall()), ['xa1p1', 'xxup1'])
self.assertEqual(_names(self.otx[self.user].getall(author_name=self.author1)), ['xa1p1'])
self.assertEqual(_names(self.otx[self.user].getall(author_name=self.author2)), [])
self.assertEqual(_names(self.otx[self.user].getall(modified_since=t1[0])), ['xa1p1', 'xxup1'])
self.assertEqual(_ind(list(self.otx[self.user].get_all_indicators(modified_since=t1[0]))), ['8.8.8.8', '9.9.9.9'])
# subscribe to author2, now we should see his pulse
self.otx[self.user].subscribe_to_user(self.author2)
self.otx[self.user].update()
t4 = self.otx[self.user].last_subscription_fetch, self.otx[self.user].last_events_fetch
self.assertEqual(_names(self.otx[self.user].getall()), ['xa1p1', 'xa2p1', 'xxup1'])
self.assertEqual(_names(self.otx[self.user].getall(author_name=self.author1)), ['xa1p1'])
self.assertEqual(_names(self.otx[self.user].getall(author_name=self.author2)), ['xa2p1'])
self.assertEqual(_names(self.otx[self.user].getall(modified_since=t1[0])), ['xa1p1', 'xa2p1', 'xxup1'])
self.assertEqual(_ind(list(self.otx[self.user].get_all_indicators(modified_since=t1[0]))), ['8.8.8.8', '9.9.9.10', '9.9.9.9'])
# let's have author2 create another pulse
self.otx[self.author2].create_pulse(
name="xa2p2",
public=True,
indicators=[{'indicator': "foo.com", 'type': IndicatorTypes.DOMAIN.name}],
)
self.otx[self.user].update()
t5 = self.otx[self.user].last_subscription_fetch, self.otx[self.user].last_events_fetch
self.assertEqual(_names(self.otx[self.user].getall()), ['xa1p1', 'xa2p1', 'xa2p2', 'xxup1'])
self.assertEqual(_names(self.otx[self.user].getall(author_name=self.author1)), ['xa1p1'])
self.assertEqual(_names(self.otx[self.user].getall(author_name=self.author2)), ['xa2p1', 'xa2p2'])
self.assertEqual(_names(self.otx[self.user].getall(modified_since=t1[0])), ['xa1p1', 'xa2p1', 'xa2p2', 'xxup1'])
self.assertEqual(_ind(list(self.otx[self.user].get_all_indicators(modified_since=t1[0]))), ['8.8.8.8', '9.9.9.10', '9.9.9.9', 'foo.com'])
self.assertEqual(_ind(list(self.otx[self.user].get_all_indicators(modified_since=t4[0]))), ['9.9.9.10', 'foo.com'])
self.assertEqual(_ind(list(self.otx[self.user].get_all_indicators(indicator_types=[IndicatorTypes.DOMAIN]))), ['foo.com'])
self.assertEqual(_ind(list(self.otx[self.user].get_all_indicators(author_name=self.author1))), ['9.9.9.9'])
self.assertEqual(_ind(list(self.otx[self.user].get_all_indicators(author_name=self.author2))), ['9.9.9.10', 'foo.com'])
self.assertEqual(_ind(list(self.otx[self.user].get_all_indicators(author_name=self.author1, indicator_types=[IndicatorTypes.DOMAIN]))), [])
self.assertEqual(_ind(list(self.otx[self.user].get_all_indicators(author_name=self.author2, indicator_types=[IndicatorTypes.DOMAIN]))), ['foo.com'])
def test_passthrough(self):
"""
A simple test that demonstrates that any function not in OTXv2Cached will flow through to it's parent class
"""
res = self.otx[self.user].search_pulses("Russian")
pulses = res.get('results')
self.assertTrue(len(pulses) > 0)
self.assertIsNotNone(pulses)
self.assertTrue(len(pulses) > 0)
pulse = pulses[0]
self.assertIsNotNone(pulse.get('modified', None))
self.assertIsNotNone(pulse.get('author_name', None))
self.assertIsNotNone(pulse.get('id', None))
self.assertIsNotNone(pulse.get('tags', None))
self.assertIsNotNone(pulse.get('references', None))
self.assertIsNotNone(res.get('exact_match'))
if __name__ == '__main__':
username = "qatester-git-{}".format(rand)
try:
ALIEN_API_APIKEY = create_user(username, "password", username + "@aveng.us")
unittest.main()
finally:
print(delete_user(username))
| []
| []
| [
"X_OTX_DEV_SERVER"
]
| [] | ["X_OTX_DEV_SERVER"] | python | 1 | 0 | |
pkg/update.go | package pkg
import (
"bytes"
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/litmuschaos/litmus-e2e/pkg/log"
"github.com/litmuschaos/litmus-e2e/pkg/types"
"github.com/pkg/errors"
)
//UpdateResultTable will update the result of pipelines in a table on github using python update script
func UpdateResultTable(experimentDetails, testVerdict string, testsDetails *types.TestDetails) error {
var out, stderr bytes.Buffer
//Updating the result table
log.Infof("The job_id for the job is: %v", os.Getenv("CI_JOB_ID"))
log.Infof("The testVerdict for the experiment is: %v", testVerdict+"ed")
//Setup emoji with test result
if testVerdict == "Pass" {
testVerdict = testVerdict + "ed :smiley:"
} else if testVerdict == "Fail" {
testVerdict = testVerdict + "ed :worried:"
} else {
testVerdict = testVerdict + " :cold_sweat:"
}
imageTag := GetImageTag(testsDetails.GoExperimentImage)
//Running python script to update result table
cmd := exec.Command("python3", "-u", "../utils/result_update.py", "--job_id", os.Getenv("CI_JOB_ID"), "--tag", imageTag, "--test_desc", experimentDetails, "--test_result", testVerdict, "--time_stamp", (time.Now().Format(time.ANSIC))+"(IST)", "--token", os.Getenv("GITHUB_TOKEN"), "--test_name", testsDetails.ExperimentName)
cmd.Stdout = &out
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
fmt.Println(fmt.Sprint(err) + ": " + stderr.String())
return err
}
log.Infof("Result: " + out.String())
log.Info("[Table]: Pipeline Result table updated successfully !!!")
return nil
}
//UpdatePipelineStatus will update the status of pipeline at the end of all jobs
func UpdatePipelineStatus(testsDetails *types.TestDetails, coverageData string) error {
var out, stderr bytes.Buffer
var pipelineName string
//Updating the result table
log.Infof("The pipeline id is:", os.Getenv("CI_PIPELINE_ID"))
if os.Getenv("POD_LEVEL") == "true" {
pipelineName = "pod-level"
} else if os.Getenv("NODE_LEVEL") == "true" {
pipelineName = "node-level"
} else if os.Getenv("COMPONENT_TEST") == "true" {
pipelineName = "component"
}
imageTag := GetImageTag(testsDetails.GoExperimentImage)
// Recording job number for pipeline update
cmd := exec.Command("python3", "-u", "../utils/pipeline_status_update.py", "--pipeline_id", os.Getenv("CI_PIPELINE_ID"), "--tag", imageTag, "--time_stamp", (time.Now().Format(time.ANSIC))+"(IST)", "--coverage", coverageData, "--pipeline", pipelineName, "--token", os.Getenv("GITHUB_TOKEN"))
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
fmt.Println(fmt.Sprint(err) + ": " + stderr.String())
return err
}
fmt.Println("Result: " + out.String())
return nil
}
// GetImageTag returns the Go experiment image tag
func GetImageTag(goExperimentImage string) string {
tag := strings.Split((goExperimentImage), ":")
return tag[1]
}
// AddAnnotation will add or update annotation on an application
func AddAnnotation(deployment, key, value, ns string) error {
command := []string{"annotate", "--overwrite", "deploy/" + deployment, key + "=" + value, "-n", ns}
err := Kubectl(command...)
if err != nil {
return errors.Errorf("fail to modify annotation, err: %v", err)
}
return nil
}
| [
"\"CI_JOB_ID\"",
"\"CI_JOB_ID\"",
"\"GITHUB_TOKEN\"",
"\"CI_PIPELINE_ID\"",
"\"POD_LEVEL\"",
"\"NODE_LEVEL\"",
"\"COMPONENT_TEST\"",
"\"CI_PIPELINE_ID\"",
"\"GITHUB_TOKEN\""
]
| []
| [
"CI_PIPELINE_ID",
"NODE_LEVEL",
"COMPONENT_TEST",
"CI_JOB_ID",
"POD_LEVEL",
"GITHUB_TOKEN"
]
| [] | ["CI_PIPELINE_ID", "NODE_LEVEL", "COMPONENT_TEST", "CI_JOB_ID", "POD_LEVEL", "GITHUB_TOKEN"] | go | 6 | 0 | |
tests/util_test.py | # -*- coding: utf-8 -*-
import logging
import os
import pytest
from datetime import datetime
from datetime import timedelta
from dateutil.parser import parse as dt
from unittest import mock
from elastalert.util import add_raw_postfix
from elastalert.util import build_es_conn_config
from elastalert.util import dt_to_ts
from elastalert.util import dt_to_ts_with_format
from elastalert.util import EAException
from elastalert.util import elasticsearch_client
from elastalert.util import flatten_dict
from elastalert.util import format_index
from elastalert.util import get_module
from elastalert.util import lookup_es_key
from elastalert.util import parse_deadline
from elastalert.util import parse_duration
from elastalert.util import pytzfy
from elastalert.util import replace_dots_in_field_names
from elastalert.util import resolve_string
from elastalert.util import set_es_key
from elastalert.util import should_scrolling_continue
from elastalert.util import ts_to_dt_with_format
from elastalert.util import ts_utc_to_tz
@pytest.mark.parametrize('spec, expected_delta', [
('hours=2', timedelta(hours=2)),
('minutes=30', timedelta(minutes=30)),
('seconds=45', timedelta(seconds=45)),
])
def test_parse_duration(spec, expected_delta):
"""``unit=num`` specs can be translated into ``timedelta`` instances."""
assert parse_duration(spec) == expected_delta
@pytest.mark.parametrize('spec, expected_deadline', [
('hours=2', dt('2017-07-07T12:00:00.000Z')),
('minutes=30', dt('2017-07-07T10:30:00.000Z')),
('seconds=45', dt('2017-07-07T10:00:45.000Z')),
])
def test_parse_deadline(spec, expected_deadline):
"""``unit=num`` specs can be translated into ``datetime`` instances."""
# Note: Can't mock ``utcnow`` directly because ``datetime`` is a built-in.
class MockDatetime(datetime):
@staticmethod
def utcnow():
return dt('2017-07-07T10:00:00.000Z')
with mock.patch('datetime.datetime', MockDatetime):
assert parse_deadline(spec) == expected_deadline
def test_setting_keys(ea):
expected = 12467267
record = {
'Message': '12345',
'Fields': {
'ts': 'fail',
'severity': 'large',
'user': 'jimmay'
}
}
# Set the value
assert set_es_key(record, 'Fields.ts', expected)
# Get the value again
assert lookup_es_key(record, 'Fields.ts') == expected
def test_looking_up_missing_keys(ea):
record = {
'Message': '12345',
'Fields': {
'severity': 'large',
'user': 'jimmay',
'null': None
}
}
assert lookup_es_key(record, 'Fields.ts') is None
assert lookup_es_key(record, 'Fields.null.foo') is None
def test_looking_up_nested_keys(ea):
expected = 12467267
record = {
'Message': '12345',
'Fields': {
'ts': expected,
'severity': 'large',
'user': 'jimmay'
}
}
assert lookup_es_key(record, 'Fields.ts') == expected
def test_looking_up_nested_composite_keys(ea):
expected = 12467267
record = {
'Message': '12345',
'Fields': {
'ts.value': expected,
'severity': 'large',
'user': 'jimmay'
}
}
assert lookup_es_key(record, 'Fields.ts.value') == expected
def test_looking_up_arrays(ea):
record = {
'flags': [1, 2, 3],
'objects': [
{'foo': 'bar'},
{'foo': [{'bar': 'baz'}]},
{'foo': {'bar': 'baz'}}
]
}
assert lookup_es_key(record, 'flags[0]') == 1
assert lookup_es_key(record, 'flags[1]') == 2
assert lookup_es_key(record, 'objects[0]foo') == 'bar'
assert lookup_es_key(record, 'objects[1]foo[0]bar') == 'baz'
assert lookup_es_key(record, 'objects[2]foo.bar') == 'baz'
assert lookup_es_key(record, 'objects[1]foo[1]bar') is None
assert lookup_es_key(record, 'objects[1]foo[0]baz') is None
def test_add_raw_postfix(ea):
expected = 'foo.raw'
assert add_raw_postfix('foo', False) == expected
assert add_raw_postfix('foo.raw', False) == expected
expected = 'foo.keyword'
assert add_raw_postfix('foo', True) == expected
assert add_raw_postfix('foo.keyword', True) == expected
def test_replace_dots_in_field_names(ea):
actual = {
'a': {
'b.c': 'd',
'e': {
'f': {
'g.h': 0
}
}
},
'i.j.k': 1,
'l': {
'm': 2
}
}
expected = {
'a': {
'b_c': 'd',
'e': {
'f': {
'g_h': 0
}
}
},
'i_j_k': 1,
'l': {
'm': 2
}
}
assert replace_dots_in_field_names(actual) == expected
assert replace_dots_in_field_names({'a': 0, 1: 2}) == {'a': 0, 1: 2}
def test_resolve_string(ea):
match = {
'name': 'mySystem',
'temperature': 45,
'humidity': 80.56,
'sensors': ['outsideSensor', 'insideSensor'],
'foo': {'bar': 'baz'}
}
expected_outputs = [
"mySystem is online <MISSING VALUE>",
"Sensors ['outsideSensor', 'insideSensor'] in the <MISSING VALUE> have temp 45 and 80.56 humidity",
"Actuator <MISSING VALUE> in the <MISSING VALUE> has temp <MISSING VALUE>",
'Something baz']
old_style_strings = [
"%(name)s is online %(noKey)s",
"Sensors %(sensors)s in the %(noPlace)s have temp %(temperature)s and %(humidity)s humidity",
"Actuator %(noKey)s in the %(noPlace)s has temp %(noKey)s",
'Something %(foo.bar)s']
assert resolve_string(old_style_strings[0], match) == expected_outputs[0]
assert resolve_string(old_style_strings[1], match) == expected_outputs[1]
assert resolve_string(old_style_strings[2], match) == expected_outputs[2]
assert resolve_string(old_style_strings[3], match) == expected_outputs[3]
new_style_strings = [
"{name} is online {noKey}",
"Sensors {sensors} in the {noPlace} have temp {temperature} and {humidity} humidity",
"Actuator {noKey} in the {noPlace} has temp {noKey}",
"Something {foo[bar]}"]
assert resolve_string(new_style_strings[0], match) == expected_outputs[0]
assert resolve_string(new_style_strings[1], match) == expected_outputs[1]
assert resolve_string(new_style_strings[2], match) == expected_outputs[2]
assert resolve_string(new_style_strings[3], match) == expected_outputs[3]
def test_format_index():
pattern = 'logstash-%Y.%m.%d'
pattern2 = 'logstash-%Y.%W'
date = dt('2018-06-25T12:00:00Z')
date2 = dt('2018-06-26T12:00:00Z')
assert sorted(format_index(pattern, date, date).split(',')) == ['logstash-2018.06.25']
assert sorted(format_index(pattern, date, date2).split(',')) == ['logstash-2018.06.25', 'logstash-2018.06.26']
assert sorted(format_index(pattern, date, date2, True).split(',')) == ['logstash-2018.06.24',
'logstash-2018.06.25',
'logstash-2018.06.26']
assert sorted(format_index(pattern2, date, date2, True).split(',')) == ['logstash-2018.25', 'logstash-2018.26']
def test_should_scrolling_continue():
rule_no_max_scrolling = {'max_scrolling_count': 0, 'scrolling_cycle': 1}
rule_reached_max_scrolling = {'max_scrolling_count': 2, 'scrolling_cycle': 2}
rule_before_first_run = {'max_scrolling_count': 0, 'scrolling_cycle': 0}
rule_before_max_scrolling = {'max_scrolling_count': 2, 'scrolling_cycle': 1}
rule_over_max_scrolling = {'max_scrolling_count': 2, 'scrolling_cycle': 3}
assert should_scrolling_continue(rule_no_max_scrolling) is True
assert should_scrolling_continue(rule_reached_max_scrolling) is False
assert should_scrolling_continue(rule_before_first_run) is True
assert should_scrolling_continue(rule_before_max_scrolling) is True
assert should_scrolling_continue(rule_over_max_scrolling) is False
def test_ts_to_dt_with_format1():
assert ts_to_dt_with_format('2021/02/01 12:30:00', '%Y/%m/%d %H:%M:%S') == dt('2021-02-01 12:30:00+00:00')
def test_ts_to_dt_with_format2():
assert ts_to_dt_with_format('01/02/2021 12:30:00', '%d/%m/%Y %H:%M:%S') == dt('2021-02-01 12:30:00+00:00')
def test_ts_to_dt_with_format3():
date = datetime(2021, 7, 6, hour=0, minute=0, second=0)
assert ts_to_dt_with_format(date, '') == dt('2021-7-6 00:00')
def test_ts_to_dt_with_format4():
assert ts_to_dt_with_format('01/02/2021 12:30:00 +0900', '%d/%m/%Y %H:%M:%S %z') == dt('2021-02-01 12:30:00+09:00')
def test_dt_to_ts_with_format1():
assert dt_to_ts_with_format(dt('2021-02-01 12:30:00+00:00'), '%Y/%m/%d %H:%M:%S') == '2021/02/01 12:30:00'
def test_dt_to_ts_with_format2():
assert dt_to_ts_with_format(dt('2021-02-01 12:30:00+00:00'), '%d/%m/%Y %H:%M:%S') == '01/02/2021 12:30:00'
def test_dt_to_ts_with_format3():
assert dt_to_ts_with_format('2021-02-01 12:30:00+00:00', '%d/%m/%Y %H:%M:%S') == '2021-02-01 12:30:00+00:00'
def test_flatten_dict():
assert flatten_dict({'test': 'value1', 'test2': 'value2'}) == {'test': 'value1', 'test2': 'value2'}
def test_pytzfy1():
assert pytzfy(dt('2021-02-01 12:30:00+00:00')) == dt('2021-02-01 12:30:00+00:00')
def test_pytzfy2():
assert pytzfy(datetime(2018, 12, 31, 5, 0, 30, 1000)) == dt('2018-12-31 05:00:30.001000')
def test_get_module():
with pytest.raises(EAException) as ea:
get_module('test')
assert 'Could not import module' in str(ea)
def test_dt_to_ts(caplog):
caplog.set_level(logging.WARNING)
dt_to_ts('a')
user, level, message = caplog.record_tuples[0]
assert 'elastalert' == user
assert logging.WARNING == level
assert 'Expected datetime, got' in message
def test_ts_utc_to_tz():
date = datetime(2021, 7, 6, hour=0, minute=0, second=0)
actual_data = ts_utc_to_tz(date, 'Europe/Istanbul')
assert '2021-07-06 03:00:00+03:00' == str(actual_data)
test_build_es_conn_config_param = 'es_host, es_port, es_conn_timeout, es_send_get_body_as, ssl_show_warn, es_username, '
test_build_es_conn_config_param += 'es_password, es_api_key, es_bearer, aws_region, profile, use_ssl, verify_certs, '
test_build_es_conn_config_param += 'ca_certs, client_cert,client_key,es_url_prefix, expected_data'
@pytest.mark.parametrize(test_build_es_conn_config_param, [
('', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', True),
('localhost', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', True),
('localhost', 9200, '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
{
'use_ssl': False,
'verify_certs': True,
'ca_certs': None,
'client_cert': None,
'client_key': None,
'http_auth': None,
'es_username': None,
'es_password': None,
'es_api_key': None,
'es_bearer': None,
'aws_region': None,
'profile': None,
'headers': None,
'es_host': 'localhost',
'es_port': 9200,
'es_url_prefix': '',
'es_conn_timeout': 20,
'send_get_body_as': 'GET',
'ssl_show_warn': True
}),
('localhost', 9200, 30, 'POST', False, 'user', 'pass', 'key', 'bearer', 'us-east-1', 'default',
True, False, '/path/to/cacert.pem', '/path/to/client_cert.pem', '/path/to/client_key.key', 'elasticsearch',
{
'use_ssl': True,
'verify_certs': False,
'ca_certs': '/path/to/cacert.pem',
'client_cert': '/path/to/client_cert.pem',
'client_key': '/path/to/client_key.key',
'http_auth': None,
'es_username': 'user',
'es_password': 'pass',
'es_api_key': 'key',
'es_bearer': 'bearer',
'aws_region': 'us-east-1',
'profile': 'default',
'headers': None,
'es_host': 'localhost',
'es_port': 9200,
'es_url_prefix': 'elasticsearch',
'es_conn_timeout': 30,
'send_get_body_as': 'POST',
'ssl_show_warn': False
}),
])
def test_build_es_conn_config(es_host, es_port, es_conn_timeout, es_send_get_body_as, ssl_show_warn, es_username,
es_password, es_api_key, es_bearer, aws_region, profile, use_ssl, verify_certs,
ca_certs, client_cert, client_key, es_url_prefix, expected_data):
try:
conf = {}
if es_host:
conf['es_host'] = es_host
if es_port:
conf['es_port'] = es_port
if es_conn_timeout:
conf['es_conn_timeout'] = es_conn_timeout
if es_send_get_body_as:
conf['es_send_get_body_as'] = es_send_get_body_as
if ssl_show_warn != '':
conf['ssl_show_warn'] = ssl_show_warn
if es_username:
conf['es_username'] = es_username
if es_password:
conf['es_password'] = es_password
if es_api_key:
conf['es_api_key'] = es_api_key
if es_bearer:
conf['es_bearer'] = es_bearer
if aws_region:
conf['aws_region'] = aws_region
if profile:
conf['profile'] = profile
if use_ssl != '':
conf['use_ssl'] = use_ssl
if verify_certs != '':
conf['verify_certs'] = verify_certs
if ca_certs:
conf['ca_certs'] = ca_certs
if client_cert:
conf['client_cert'] = client_cert
if client_key:
conf['client_key'] = client_key
if es_url_prefix:
conf['es_url_prefix'] = es_url_prefix
actual = build_es_conn_config(conf)
assert expected_data == actual
except KeyError:
assert expected_data
@mock.patch.dict(os.environ, {'ES_USERNAME': 'USER',
'ES_PASSWORD': 'PASS',
'ES_API_KEY': 'KEY',
'ES_BEARER': 'BEARE'})
def test_build_es_conn_config2():
conf = {}
conf['es_host'] = 'localhost'
conf['es_port'] = 9200
expected = {
'use_ssl': False,
'verify_certs': True,
'ca_certs': None,
'client_cert': None,
'client_key': None,
'http_auth': None,
'es_username': 'USER',
'es_password': 'PASS',
'es_api_key': 'KEY',
'es_bearer': 'BEARE',
'aws_region': None,
'profile': None,
'headers': None,
'es_host': 'localhost',
'es_port': 9200,
'es_url_prefix': '',
'es_conn_timeout': 20,
'send_get_body_as': 'GET',
'ssl_show_warn': True
}
actual = build_es_conn_config(conf)
assert expected == actual
@pytest.mark.parametrize('es_host, es_port, es_bearer, es_api_key', [
('localhost', 9200, '', ''),
('localhost', 9200, 'bearer', 'bearer')
])
@mock.patch.dict(os.environ, {'AWS_DEFAULT_REGION': ''})
def test_elasticsearch_client(es_host, es_port, es_bearer, es_api_key):
conf = {}
conf['es_host'] = es_host
conf['es_port'] = es_port
if es_bearer:
conf['es_bearer'] = es_bearer
if es_api_key:
conf['es_api_key'] = es_api_key
acutual = elasticsearch_client(conf)
assert None is not acutual
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cli/command/image/build.go | package image
import (
"archive/tar"
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/image/build"
"github.com/docker/cli/opts"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/urlutil"
units "github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var errStdinConflict = errors.New("invalid argument: can't use stdin for both build context and dockerfile")
type buildOptions struct {
context string
dockerfileName string
tags opts.ListOpts
labels opts.ListOpts
buildArgs opts.ListOpts
extraHosts opts.ListOpts
ulimits *opts.UlimitOpt
memory opts.MemBytes
memorySwap opts.MemSwapBytes
shmSize opts.MemBytes
cpuShares int64
cpuPeriod int64
cpuQuota int64
cpuSetCpus string
cpuSetMems string
cgroupParent string
isolation string
quiet bool
noCache bool
console opts.NullableBool
rm bool
forceRm bool
pull bool
cacheFrom []string
compress bool
securityOpt []string
networkMode string
squash bool
target string
imageIDFile string
stream bool
platform string
untrusted bool
}
// dockerfileFromStdin returns true when the user specified that the Dockerfile
// should be read from stdin instead of a file
func (o buildOptions) dockerfileFromStdin() bool {
return o.dockerfileName == "-"
}
// contextFromStdin returns true when the user specified that the build context
// should be read from stdin
func (o buildOptions) contextFromStdin() bool {
return o.context == "-"
}
func newBuildOptions() buildOptions {
ulimits := make(map[string]*units.Ulimit)
return buildOptions{
tags: opts.NewListOpts(validateTag),
buildArgs: opts.NewListOpts(opts.ValidateEnv),
ulimits: opts.NewUlimitOpt(&ulimits),
labels: opts.NewListOpts(opts.ValidateEnv),
extraHosts: opts.NewListOpts(opts.ValidateExtraHost),
}
}
// NewBuildCommand creates a new `docker build` command
func NewBuildCommand(dockerCli command.Cli) *cobra.Command {
options := newBuildOptions()
cmd := &cobra.Command{
Use: "build [OPTIONS] PATH | URL | -",
Short: "Build an image from a Dockerfile",
Args: cli.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
options.context = args[0]
return runBuild(dockerCli, options)
},
}
flags := cmd.Flags()
flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format")
flags.Var(&options.buildArgs, "build-arg", "Set build-time variables")
flags.Var(options.ulimits, "ulimit", "Ulimit options")
flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
flags.VarP(&options.memory, "memory", "m", "Memory limit")
flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap")
flags.Var(&options.shmSize, "shm-size", "Size of /dev/shm")
flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)")
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology")
flags.Var(&options.labels, "label", "Set metadata for an image")
flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image")
flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build")
flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers")
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image")
flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources")
flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip")
flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options")
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
flags.SetAnnotation("network", "version", []string{"1.25"})
flags.Var(&options.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)")
flags.StringVar(&options.target, "target", "", "Set the target build stage to build.")
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
command.AddTrustVerificationFlags(flags, &options.untrusted, dockerCli.ContentTrustEnabled())
command.AddPlatformFlag(flags, &options.platform)
flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer")
flags.SetAnnotation("squash", "experimental", nil)
flags.SetAnnotation("squash", "version", []string{"1.25"})
flags.BoolVar(&options.stream, "stream", false, "Stream attaches to server to negotiate build context")
flags.SetAnnotation("stream", "experimental", nil)
flags.SetAnnotation("stream", "version", []string{"1.31"})
flags.Var(&options.console, "console", "Show console output (with buildkit only) (true, false, auto)")
flags.SetAnnotation("console", "experimental", nil)
flags.SetAnnotation("console", "version", []string{"1.38"})
return cmd
}
// lastProgressOutput is the same as progress.Output except
// that it only output with the last update. It is used in
// non terminal scenarios to suppress verbose messages
type lastProgressOutput struct {
output progress.Output
}
// WriteProgress formats progress information from a ProgressReader.
func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error {
if !prog.LastUpdate {
return nil
}
return out.output.WriteProgress(prog)
}
// nolint: gocyclo
func runBuild(dockerCli command.Cli, options buildOptions) error {
if buildkitEnv := os.Getenv("DOCKER_BUILDKIT"); buildkitEnv != "" {
enableBuildkit, err := strconv.ParseBool(buildkitEnv)
if err != nil {
return errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value")
}
if enableBuildkit {
return runBuildBuildKit(dockerCli, options)
}
}
var (
buildCtx io.ReadCloser
dockerfileCtx io.ReadCloser
err error
contextDir string
tempDir string
relDockerfile string
progBuff io.Writer
buildBuff io.Writer
remote string
)
if options.compress && options.stream {
return errors.New("--compress conflicts with --stream options")
}
if options.dockerfileFromStdin() {
if options.contextFromStdin() {
return errStdinConflict
}
dockerfileCtx = dockerCli.In()
}
specifiedContext := options.context
progBuff = dockerCli.Out()
buildBuff = dockerCli.Out()
if options.quiet {
progBuff = bytes.NewBuffer(nil)
buildBuff = bytes.NewBuffer(nil)
}
if options.imageIDFile != "" {
// Avoid leaving a stale file if we eventually fail
if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "Removing image ID file")
}
}
switch {
case options.contextFromStdin():
// buildCtx is tar archive. if stdin was dockerfile then it is wrapped
buildCtx, relDockerfile, err = build.GetContextFromReader(dockerCli.In(), options.dockerfileName)
case isLocalDir(specifiedContext):
contextDir, relDockerfile, err = build.GetContextFromLocalDir(specifiedContext, options.dockerfileName)
if err == nil && strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) {
// Dockerfile is outside of build-context; read the Dockerfile and pass it as dockerfileCtx
dockerfileCtx, err = os.Open(options.dockerfileName)
if err != nil {
return errors.Errorf("unable to open Dockerfile: %v", err)
}
defer dockerfileCtx.Close()
}
case urlutil.IsGitURL(specifiedContext):
tempDir, relDockerfile, err = build.GetContextFromGitURL(specifiedContext, options.dockerfileName)
case urlutil.IsURL(specifiedContext):
buildCtx, relDockerfile, err = build.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName)
default:
return errors.Errorf("unable to prepare context: path %q not found", specifiedContext)
}
if err != nil {
if options.quiet && urlutil.IsURL(specifiedContext) {
fmt.Fprintln(dockerCli.Err(), progBuff)
}
return errors.Errorf("unable to prepare context: %s", err)
}
if tempDir != "" {
defer os.RemoveAll(tempDir)
contextDir = tempDir
}
// read from a directory into tar archive
if buildCtx == nil && !options.stream {
excludes, err := build.ReadDockerignore(contextDir)
if err != nil {
return err
}
if err := build.ValidateContextDirectory(contextDir, excludes); err != nil {
return errors.Errorf("error checking context: '%s'.", err)
}
// And canonicalize dockerfile name to a platform-independent one
relDockerfile = archive.CanonicalTarNameForPath(relDockerfile)
excludes = build.TrimBuildFilesFromExcludes(excludes, relDockerfile, options.dockerfileFromStdin())
buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
ExcludePatterns: excludes,
ChownOpts: &idtools.IDPair{UID: 0, GID: 0},
})
if err != nil {
return err
}
}
// replace Dockerfile if it was added from stdin or a file outside the build-context, and there is archive context
if dockerfileCtx != nil && buildCtx != nil {
buildCtx, relDockerfile, err = build.AddDockerfileToBuildContext(dockerfileCtx, buildCtx)
if err != nil {
return err
}
}
// if streaming and Dockerfile was not from stdin then read from file
// to the same reader that is usually stdin
if options.stream && dockerfileCtx == nil {
dockerfileCtx, err = os.Open(relDockerfile)
if err != nil {
return errors.Wrapf(err, "failed to open %s", relDockerfile)
}
defer dockerfileCtx.Close()
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var resolvedTags []*resolvedTag
if !options.untrusted {
translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) {
return TrustedReference(ctx, dockerCli, ref, nil)
}
// if there is a tar wrapper, the dockerfile needs to be replaced inside it
if buildCtx != nil {
// Wrap the tar archive to replace the Dockerfile entry with the rewritten
// Dockerfile which uses trusted pulls.
buildCtx = replaceDockerfileForContentTrust(ctx, buildCtx, relDockerfile, translator, &resolvedTags)
} else if dockerfileCtx != nil {
// if there was not archive context still do the possible replacements in Dockerfile
newDockerfile, _, err := rewriteDockerfileFromForContentTrust(ctx, dockerfileCtx, translator)
if err != nil {
return err
}
dockerfileCtx = ioutil.NopCloser(bytes.NewBuffer(newDockerfile))
}
}
if options.compress {
buildCtx, err = build.Compress(buildCtx)
if err != nil {
return err
}
}
// Setup an upload progress bar
progressOutput := streamformatter.NewProgressOutput(progBuff)
if !dockerCli.Out().IsTerminal() {
progressOutput = &lastProgressOutput{output: progressOutput}
}
// if up to this point nothing has set the context then we must have another
// way for sending it(streaming) and set the context to the Dockerfile
if dockerfileCtx != nil && buildCtx == nil {
buildCtx = dockerfileCtx
}
s, err := trySession(dockerCli, contextDir)
if err != nil {
return err
}
var body io.Reader
if buildCtx != nil && !options.stream {
body = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon")
}
// add context stream to the session
if options.stream && s != nil {
syncDone := make(chan error) // used to signal first progress reporting completed.
// progress would also send errors but don't need it here as errors
// are handled by session.Run() and ImageBuild()
if err := addDirToSession(s, contextDir, progressOutput, syncDone); err != nil {
return err
}
buf := newBufferedWriter(syncDone, buildBuff)
defer func() {
select {
case <-buf.flushed:
case <-ctx.Done():
}
}()
buildBuff = buf
remote = clientSessionRemote
body = buildCtx
}
configFile := dockerCli.ConfigFile()
authConfigs, _ := configFile.GetAllCredentials()
buildOptions := imageBuildOptions(dockerCli, options)
buildOptions.Version = types.BuilderV1
buildOptions.Dockerfile = relDockerfile
buildOptions.AuthConfigs = authConfigs
buildOptions.RemoteContext = remote
if s != nil {
go func() {
logrus.Debugf("running session: %v", s.ID())
if err := s.Run(ctx, dockerCli.Client().DialSession); err != nil {
logrus.Error(err)
cancel() // cancel progress context
}
}()
buildOptions.SessionID = s.ID()
}
response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions)
if err != nil {
if options.quiet {
fmt.Fprintf(dockerCli.Err(), "%s", progBuff)
}
cancel()
return err
}
defer response.Body.Close()
imageID := ""
aux := func(msg jsonmessage.JSONMessage) {
var result types.BuildResult
if err := json.Unmarshal(*msg.Aux, &result); err != nil {
fmt.Fprintf(dockerCli.Err(), "Failed to parse aux message: %s", err)
} else {
imageID = result.ID
}
}
err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), aux)
if err != nil {
if jerr, ok := err.(*jsonmessage.JSONError); ok {
// If no error code is set, default to 1
if jerr.Code == 0 {
jerr.Code = 1
}
if options.quiet {
fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff)
}
return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
}
return err
}
// Windows: show error message about modified file permissions if the
// daemon isn't running Windows.
if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet {
fmt.Fprintln(dockerCli.Out(), "SECURITY WARNING: You are building a Docker "+
"image from Windows against a non-Windows Docker host. All files and "+
"directories added to build context will have '-rwxr-xr-x' permissions. "+
"It is recommended to double check and reset permissions for sensitive "+
"files and directories.")
}
// Everything worked so if -q was provided the output from the daemon
// should be just the image ID and we'll print that to stdout.
if options.quiet {
imageID = fmt.Sprintf("%s", buildBuff)
fmt.Fprintf(dockerCli.Out(), imageID)
}
if options.imageIDFile != "" {
if imageID == "" {
return errors.Errorf("Server did not provide an image ID. Cannot write %s", options.imageIDFile)
}
if err := ioutil.WriteFile(options.imageIDFile, []byte(imageID), 0666); err != nil {
return err
}
}
if !options.untrusted {
// Since the build was successful, now we must tag any of the resolved
// images from the above Dockerfile rewrite.
for _, resolved := range resolvedTags {
if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil {
return err
}
}
}
return nil
}
func isLocalDir(c string) bool {
_, err := os.Stat(c)
return err == nil
}
type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error)
// validateTag checks if the given image name can be resolved.
func validateTag(rawRepo string) (string, error) {
_, err := reference.ParseNormalizedNamed(rawRepo)
if err != nil {
return "", err
}
return rawRepo, nil
}
var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P<image>[^ \f\r\t\v\n#]+)`)
// resolvedTag records the repository, tag, and resolved digest reference
// from a Dockerfile rewrite.
type resolvedTag struct {
digestRef reference.Canonical
tagRef reference.NamedTagged
}
// rewriteDockerfileFromForContentTrust rewrites the given Dockerfile by resolving images in
// "FROM <image>" instructions to a digest reference. `translator` is a
// function that takes a repository name and tag reference and returns a
// trusted digest reference.
// This should be called *only* when content trust is enabled
func rewriteDockerfileFromForContentTrust(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) {
scanner := bufio.NewScanner(dockerfile)
buf := bytes.NewBuffer(nil)
// Scan the lines of the Dockerfile, looking for a "FROM" line.
for scanner.Scan() {
line := scanner.Text()
matches := dockerfileFromLinePattern.FindStringSubmatch(line)
if matches != nil && matches[1] != api.NoBaseImageSpecifier {
// Replace the line with a resolved "FROM repo@digest"
var ref reference.Named
ref, err = reference.ParseNormalizedNamed(matches[1])
if err != nil {
return nil, nil, err
}
ref = reference.TagNameOnly(ref)
if ref, ok := ref.(reference.NamedTagged); ok {
trustedRef, err := translator(ctx, ref)
if err != nil {
return nil, nil, err
}
line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", reference.FamiliarString(trustedRef)))
resolvedTags = append(resolvedTags, &resolvedTag{
digestRef: trustedRef,
tagRef: ref,
})
}
}
_, err := fmt.Fprintln(buf, line)
if err != nil {
return nil, nil, err
}
}
return buf.Bytes(), resolvedTags, scanner.Err()
}
// replaceDockerfileForContentTrust wraps the given input tar archive stream and
// uses the translator to replace the Dockerfile which uses a trusted reference.
// Returns a new tar archive stream with the replaced Dockerfile.
func replaceDockerfileForContentTrust(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser {
pipeReader, pipeWriter := io.Pipe()
go func() {
tarReader := tar.NewReader(inputTarStream)
tarWriter := tar.NewWriter(pipeWriter)
defer inputTarStream.Close()
for {
hdr, err := tarReader.Next()
if err == io.EOF {
// Signals end of archive.
tarWriter.Close()
pipeWriter.Close()
return
}
if err != nil {
pipeWriter.CloseWithError(err)
return
}
content := io.Reader(tarReader)
if hdr.Name == dockerfileName {
// This entry is the Dockerfile. Since the tar archive was
// generated from a directory on the local filesystem, the
// Dockerfile will only appear once in the archive.
var newDockerfile []byte
newDockerfile, *resolvedTags, err = rewriteDockerfileFromForContentTrust(ctx, content, translator)
if err != nil {
pipeWriter.CloseWithError(err)
return
}
hdr.Size = int64(len(newDockerfile))
content = bytes.NewBuffer(newDockerfile)
}
if err := tarWriter.WriteHeader(hdr); err != nil {
pipeWriter.CloseWithError(err)
return
}
if _, err := io.Copy(tarWriter, content); err != nil {
pipeWriter.CloseWithError(err)
return
}
}
}()
return pipeReader
}
func imageBuildOptions(dockerCli command.Cli, options buildOptions) types.ImageBuildOptions {
configFile := dockerCli.ConfigFile()
return types.ImageBuildOptions{
Memory: options.memory.Value(),
MemorySwap: options.memorySwap.Value(),
Tags: options.tags.GetAll(),
SuppressOutput: options.quiet,
NoCache: options.noCache,
Remove: options.rm,
ForceRemove: options.forceRm,
PullParent: options.pull,
Isolation: container.Isolation(options.isolation),
CPUSetCPUs: options.cpuSetCpus,
CPUSetMems: options.cpuSetMems,
CPUShares: options.cpuShares,
CPUQuota: options.cpuQuota,
CPUPeriod: options.cpuPeriod,
CgroupParent: options.cgroupParent,
ShmSize: options.shmSize.Value(),
Ulimits: options.ulimits.GetList(),
BuildArgs: configFile.ParseProxyConfig(dockerCli.Client().DaemonHost(), options.buildArgs.GetAll()),
Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()),
CacheFrom: options.cacheFrom,
SecurityOpt: options.securityOpt,
NetworkMode: options.networkMode,
Squash: options.squash,
ExtraHosts: options.extraHosts.GetAll(),
Target: options.target,
Platform: options.platform,
}
}
| [
"\"DOCKER_BUILDKIT\""
]
| []
| [
"DOCKER_BUILDKIT"
]
| [] | ["DOCKER_BUILDKIT"] | go | 1 | 0 | |
gao_serial_test.go | package gao_serial
import (
"fmt"
"os"
"strings"
"testing"
"time"
)
func TestGaoSerialOpen(t *testing.T) {
ports := strings.Split(os.Getenv("PORTS"), ",")
commands := []string{
"ATZ\r",
"AT\r",
"AT+CSQ\r",
"AT+CGSN\r",
"AT+CREG?\r",
"AT+CMGF=1\r",
}
for _, port := range ports {
gao := NewGaoSerial(2 * time.Second)
fmt.Printf("start to open port: %s\n", port)
if err := gao.Open(port, 115200); err != nil {
fmt.Printf("open port: %s error: %s\n", port, err.Error())
continue
}
for _, cmd := range commands {
fmt.Printf("write cmd: %s\n", cmd)
if _, err := gao.Write([]byte(cmd)); err != nil {
fmt.Printf("write %s error: %s\n", cmd, err.Error())
if _, ok := err.(ErrPortBlock); ok {
break
}
}
start := time.Now()
timeout := time.Second
if strings.Contains(cmd, "test") {
time.Sleep(2 * time.Second)
}
bs, err := gao.Read(timeout)
fmt.Printf("read cost time: %v\n", time.Since(start))
if err != nil {
fmt.Printf("read error: %s\n", err.Error())
if _, ok := err.(ErrPortBlock); ok {
break
}
}
fmt.Printf("read content: %s\n", string(bs))
}
}
}
| [
"\"PORTS\""
]
| []
| [
"PORTS"
]
| [] | ["PORTS"] | go | 1 | 0 | |
Problem Solving/Birthday Cake Candles/Solution.java | /**
* @author SANKALP SAXENA
*/
import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
public class Solution {
// Complete the birthdayCakeCandles function below.
static int birthdayCakeCandles(int[] ar) {
int max = ar[0];
for(int i : ar) {
if(i > max)
max = i;
}
int count = 0;
for(int i : ar){
if(i == max)
count ++;
}
return count;
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int arCount = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
int[] ar = new int[arCount];
String[] arItems = scanner.nextLine().split(" ");
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int i = 0; i < arCount; i++) {
int arItem = Integer.parseInt(arItems[i]);
ar[i] = arItem;
}
int result = birthdayCakeCandles(ar);
bufferedWriter.write(String.valueOf(result));
bufferedWriter.newLine();
bufferedWriter.close();
scanner.close();
}
}
| [
"\"OUTPUT_PATH\""
]
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | java | 1 | 0 | |
IPython/utils/sysinfo.py | # encoding: utf-8
"""
Utilities for getting information about IPython and the system it's running in.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import platform
import pprint
import sys
import subprocess
from IPython.core import release
from IPython.utils import py3compat, _sysinfo, encoding
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def pkg_commit_hash(pkg_path):
"""Get short form of commit hash given directory `pkg_path`
We get the commit hash from (in order of preference):
* IPython.utils._sysinfo.commit
* git output, if we are in a git repository
If these fail, we return a not-found placeholder tuple
Parameters
----------
pkg_path : str
directory containing package
only used for getting commit from active repo
Returns
-------
hash_from : str
Where we got the hash from - description
hash_str : str
short form of hash
"""
# Try and get commit from written commit text file
if _sysinfo.commit:
return "installation", _sysinfo.commit
# maybe we are in a repository
proc = subprocess.Popen('git rev-parse --short HEAD',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=pkg_path, shell=True)
repo_commit, _ = proc.communicate()
if repo_commit:
return 'repository', repo_commit.strip()
return '(none found)', '<not found>'
def pkg_info(pkg_path):
"""Return dict describing the context of this package
Parameters
----------
pkg_path : str
path containing __init__.py for package
Returns
-------
context : dict
with named parameters of interest
"""
src, hsh = pkg_commit_hash(pkg_path)
return dict(
ipython_version=release.version,
ipython_path=pkg_path,
codename=release.codename,
commit_source=src,
commit_hash=hsh,
sys_version=sys.version,
sys_executable=sys.executable,
sys_platform=sys.platform,
platform=platform.platform(),
os_name=os.name,
default_encoding=encoding.DEFAULT_ENCODING,
)
def get_sys_info():
"""Return useful information about IPython and the system, as a dict."""
p = os.path
path = p.dirname(p.abspath(p.join(__file__, '..')))
return pkg_info(path)
@py3compat.doctest_refactor_print
def sys_info():
"""Return useful information about IPython and the system, as a string.
Examples
--------
::
In [2]: print sys_info()
{'commit_hash': '144fdae', # random
'commit_source': 'repository',
'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython',
'ipython_version': '0.11.dev',
'os_name': 'posix',
'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick',
'sys_executable': '/usr/bin/python',
'sys_platform': 'linux2',
'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\n[GCC 4.4.5]'}
"""
return pprint.pformat(get_sys_info())
def _num_cpus_unix():
"""Return the number of active CPUs on a Unix system."""
return os.sysconf("SC_NPROCESSORS_ONLN")
def _num_cpus_darwin():
"""Return the number of active CPUs on a Darwin system."""
p = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'], stdout=subprocess.PIPE)
return p.stdout.read()
def _num_cpus_windows():
"""Return the number of active CPUs on a Windows system."""
return os.environ.get("NUMBER_OF_PROCESSORS")
def num_cpus():
"""Return the effective number of CPUs in the system as an integer.
This cross-platform function makes an attempt at finding the total number of
available CPUs in the system, as returned by various underlying system and
python calls.
If it can't find a sensible answer, it returns 1 (though an error *may* make
it return a large positive number that's actually incorrect).
"""
# Many thanks to the Parallel Python project (http://www.parallelpython.com)
# for the names of the keys we needed to look up for this function. This
# code was inspired by their equivalent function.
ncpufuncs = {'Linux': _num_cpus_unix,
'Darwin': _num_cpus_darwin,
'Windows': _num_cpus_windows,
# On Vista, python < 2.5.2 has a bug and returns 'Microsoft'
# See http://bugs.python.org/issue1082 for details.
'Microsoft': _num_cpus_windows,
}
ncpufunc = ncpufuncs.get(platform.system(),
# default to unix version (Solaris, AIX, etc)
_num_cpus_unix)
try:
ncpus = max(1, int(ncpufunc()))
except:
ncpus = 1
return ncpus
| []
| []
| [
"NUMBER_OF_PROCESSORS"
]
| [] | ["NUMBER_OF_PROCESSORS"] | python | 1 | 0 | |
backend/ideapros_llc_home_s_32960/wsgi.py | """
WSGI config for ideapros_llc_home_s_32960 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ideapros_llc_home_s_32960.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
demos/models/ATS-Forms/src/main/java/es/osoco/bbva/ats/forms/adapter/infrastructure/RabbitMQConnection.java | package es.osoco.bbva.ats.forms.adapter.infrastructure;
import com.rabbitmq.client.Connection;
import com.rabbitmq.client.ConnectionFactory;
import es.osoco.logging.LoggingFactory;
import java.io.IOException;
import java.util.concurrent.TimeoutException;
public class RabbitMQConnection {
public static RabbitMQConnection getInstance() {
if(RabbitMQConnection.INSTANCE == null) {
RabbitMQConnection.build();
}
return RabbitMQConnection.INSTANCE;
}
public Connection getConnection() {
if ((connection == null) || (!connection.isOpen())) {
createConnection();
}
return connection;
}
private static final String RABBITMQ_IP = System.getenv("RABBITMQ_IP");
private static final String USER_NAME = System.getenv("RABBITMQ_USER_NAME");
private static final String PASSWORD = System.getenv("RABBITMQ_PASSWORD");
private static RabbitMQConnection INSTANCE = new RabbitMQConnection();
private Connection connection;
private RabbitMQConnection() {
createConnection();
}
private void createConnection() {
final ConnectionFactory factory = new ConnectionFactory();
try {
factory.setHost(RABBITMQ_IP);
factory.setPort(5672);
factory.setVirtualHost("/");
factory.setUsername(USER_NAME);
factory.setPassword(PASSWORD);
factory.setRequestedHeartbeat(30);
connection = factory.newConnection();
} catch (IOException e) {
LoggingFactory.getInstance().createLogging().error("RabbitMQ IO exception: " + e.getMessage());
} catch (TimeoutException e) {
LoggingFactory.getInstance().createLogging().error("RabbitMQ time out exception: " + e.getMessage());
}
}
private static void build() {
RabbitMQConnection.INSTANCE = new RabbitMQConnection();
}
}
| [
"\"RABBITMQ_IP\"",
"\"RABBITMQ_USER_NAME\"",
"\"RABBITMQ_PASSWORD\""
]
| []
| [
"RABBITMQ_IP",
"RABBITMQ_USER_NAME",
"RABBITMQ_PASSWORD"
]
| [] | ["RABBITMQ_IP", "RABBITMQ_USER_NAME", "RABBITMQ_PASSWORD"] | java | 3 | 0 | |
api/types/types.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import (
"os"
"path/filepath"
"github.com/docker/go-connections/nat"
)
const (
DefaultVanName string = "skupper"
DefaultBridgeName string = "skupper0"
)
type Path int
const (
HostPath Path = iota
CertsPath
ConnectionsPath
ConfigPath
ConsoleUsersPath
SaslConfigPath
ServicesPath
SitesPath
)
var skupperPaths = map[Path]string{
HostPath: "",
CertsPath: "qpid-dispatch-certs",
ConnectionsPath: "connections",
ConfigPath: "config",
ConsoleUsersPath: "console-users",
SaslConfigPath: "sasl-config",
ServicesPath: "services",
SitesPath: "sites",
}
func GetSkupperPath(p Path) string {
return filepath.Join(os.Getenv("SKUPPER_TMPDIR"), "skupper", skupperPaths[p])
}
// TransportMode describes how a qdr is intended to be deployed, either interior or edge
type TransportMode string
const (
// TransportModeInterior means the qdr will participate in inter-router protocol exchanges
TransportModeInterior TransportMode = "interior"
// TransportModeEdge means that the qdr will connect to interior routers for network access
TransportModeEdge = "edge"
)
// Transport constants
const (
TransportDeploymentName string = "skupper-router"
TransportComponentName string = "router"
DefaultTransportImage string = "quay.io/skupper/qdrouterd:0.4"
TransportContainerName string = "router"
TransportLivenessPort int32 = 9090
TransportEnvConfig string = "QDROUTERD_CONF"
TransportSaslConfig string = "skupper-sasl-config"
TransportNetworkName string = "skupper-network"
TransportConfigFile string = "qdrouterd.json"
)
var TransportPrometheusAnnotations = map[string]string{
"prometheus.io/port": "9090",
"prometheus.io/scrape": "true",
}
// Controller constants
// TODO: revert controller image
const (
ControllerDeploymentName string = "skupper-service-controller"
ControllerComponentName string = "controller"
// DefaultControllerImage string = "quay.io/ajssmith/skupper-exp-controller"
DefaultControllerImage string = "localhost:5000/skupper-exp-controller"
ControllerContainerName string = "service-controller"
ControllerConfigPath string = "/etc/messaging/"
ControllerPluginPath string = "/etc/plugins"
)
// Skupper qualifiers
const (
BaseQualifier string = "skupper.io"
TokenGeneratedBy string = BaseQualifier + "/generated-by"
TokenCost string = BaseQualifier + "/cost"
)
// Console constants
const (
ConsolePortName string = "console"
ConsoleServiceName string = "skupper-console"
ConsoleDefaultServicePort int32 = 8080
ConsoleDefaultServiceTargetPort int32 = 8080
ConsoleOpenShiftServicePort int32 = 8888
ConsoleOpenShiftOauthServicePort int32 = 443
ConsoleOpenShiftOuathServiceTargetPort int32 = 8443
ConsoleOpenShiftServingCerts string = "skupper-proxy-certs"
)
type ConsoleAuthMode string
const (
ConsoleAuthModeInternal ConsoleAuthMode = "internal"
ConsoleAuthModeUnsecured = "unsecured"
)
// Router constants
const (
AmqpDefaultPort int32 = 5672
AmqpsDefaultPort int32 = 5671
EdgeRole string = "edge"
EdgeRouteName string = "skupper-edge"
EdgeListenerPort int32 = 45671
InterRouterRole string = "inter-router"
InterRouterListenerPort int32 = 55671
InterRouterRouteName string = "skupper-inter-router"
InterRouterProfile string = "skupper-internal"
)
// Controller Service Interface constants
const (
ServiceSyncAddress = "mc/$skupper-service-sync"
)
// TODO: what is possiblity of using types from skupper itself (e.g. no namespace for docker
// or we change the name to endpoint, etc.
// RouterSpec is the specification of VAN network with router, controller and assembly
type RouterSpec struct {
Name string `json:"name,omitempty"`
AuthMode ConsoleAuthMode `json:"authMode,omitempty"`
Transport DeploymentSpec `json:"transport,omitempty"`
Controller DeploymentSpec `json:"controller,omitempty"`
RouterConfig string `json:"routerConfig,omitempty"`
Users []User `json:"users,omitempty"`
CertAuthoritys []CertAuthority `json:"certAuthoritys,omitempty"`
Credentials []Credential `json:"credentials,omitempty"`
}
// DeploymentSpec for the VAN router or controller components to run within a cluster
type DeploymentSpec struct {
Image string `json:"image,omitempty"`
LivenessPort int32 `json:"livenessPort,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
EnvVar map[string]string `json:"envVar,omitempty"`
Ports nat.PortSet `json:"ports,omitempty"`
Volumes []string `json:"volumes,omitempty"`
Mounts map[string]string `json:"mounts,omitempty"`
}
type ConnectorRole string
const (
ConnectorRoleInterRouter ConnectorRole = "inter-router"
ConnectorRoleEdge = "edge"
)
type Connector struct {
Name string `json:"name,omitempty"`
Role string `json:"role,omitempty"`
Host string `json:"host"`
Port string `json:"port"`
RouteContainer bool `json:"routeContainer,omitempty"`
Cost int32 `json:"cost,omitempty"`
VerifyHostname bool `json:"verifyHostname,omitempty"`
SslProfile string `json:"sslProfile,omitempty"`
LinkCapacity int32 `json:"linkCapacity,omitempty"`
}
type Credential struct {
CA string
Name string
Subject string
Hosts []string
ConnectJson bool
Post bool
Data map[string][]byte
}
type CertAuthority struct {
Name string
}
type User struct {
Name string
Password string
}
type TransportConnectedSites struct {
Direct int
Indirect int
Total int
Warnings []string
}
type ServiceInterface struct {
Address string `json:"address"`
Protocol string `json:"protocol"`
Port int `json:"port"`
EventChannel bool `json:"eventchannel,omitempty"`
Aggregate string `json:"aggregate,omitempty"`
Headless *Headless `json:"headless,omitempty"`
Targets []ServiceInterfaceTarget `json:"targets"`
Origin string `json:"origin,omitempty"`
Alias string `json:"alias,omitempty"`
}
type ServiceInterfaceTarget struct {
Name string `json:"name,omitempty"`
Selector string `json:"selector"`
TargetPort int `json:"targetPort,omitempty"`
Service string `json:"service,omitempty"`
}
type Headless struct {
Name string `json:"name"`
Size int `json:"size"`
TargetPort int `json:"targetPort,omitempty"`
}
type ByServiceInterfaceAddress []ServiceInterface
func (a ByServiceInterfaceAddress) Len() int {
return len(a)
}
func (a ByServiceInterfaceAddress) Less(i, j int) bool {
return a[i].Address > a[i].Address
}
func (a ByServiceInterfaceAddress) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
| [
"\"SKUPPER_TMPDIR\""
]
| []
| [
"SKUPPER_TMPDIR"
]
| [] | ["SKUPPER_TMPDIR"] | go | 1 | 0 | |
config/settings/local.py | # -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
import os
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='k%-ir&m=wlet6(-kn%9i65tw6u^0_b(z#yc*kp2&i&fozawb*#')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['*'])
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
'propagate': True,
},
},
'root': {
'handlers': ['console'],
'level': 'INFO',
},
}
| []
| []
| [
"USE_DOCKER",
"DJANGO_LOG_LEVEL"
]
| [] | ["USE_DOCKER", "DJANGO_LOG_LEVEL"] | python | 2 | 0 | |
aladin/lib/python3.6/site-packages/treebeard/tests/test_treebeard.py | # -*- coding: utf-8 -*-
"""Unit/Functional tests"""
from __future__ import with_statement, unicode_literals
import datetime
import os
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.models import User
from django.contrib.messages.storage.fallback import FallbackStorage
from django.db.models import Q
from django.template import Template, Context
from django.test import TestCase
from django.test.client import RequestFactory
import pytest
from treebeard import numconv
from treebeard.admin import admin_factory, TO_FIELD_VAR
from treebeard.exceptions import InvalidPosition, InvalidMoveToDescendant,\
PathOverflow, MissingNodeOrderBy, NodeAlreadySaved
from treebeard.forms import movenodeform_factory
from treebeard.templatetags.admin_tree import get_static_url
from treebeard.tests import models
from treebeard.tests.admin import register_all as admin_register_all
admin_register_all()
BASE_DATA = [
{'data': {'desc': '1'}},
{'data': {'desc': '2'}, 'children': [
{'data': {'desc': '21'}},
{'data': {'desc': '22'}},
{'data': {'desc': '23'}, 'children': [
{'data': {'desc': '231'}},
]},
{'data': {'desc': '24'}},
]},
{'data': {'desc': '3'}},
{'data': {'desc': '4'}, 'children': [
{'data': {'desc': '41'}},
]}]
UNCHANGED = [
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
def _prepare_db_test(request):
case = TestCase(methodName='__init__')
case._pre_setup()
request.addfinalizer(case._post_teardown)
return request.param
def idfn(fixture_value):
return fixture_value.__name__
@pytest.fixture(scope='function',
params=models.BASE_MODELS + models.PROXY_MODELS,
ids=idfn)
def model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.BASE_MODELS, ids=idfn)
def model_without_proxy(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.UNICODE_MODELS, ids=idfn)
def model_with_unicode(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.SORTED_MODELS, ids=idfn)
def sorted_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.RELATED_MODELS, ids=idfn)
def related_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.INHERITED_MODELS, ids=idfn)
def inherited_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.MP_SHORTPATH_MODELS, ids=idfn)
def mpshort_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeShortPath], ids=idfn)
def mpshortnotsorted_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeAlphabet], ids=idfn)
def mpalphabet_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeSortedAutoNow], ids=idfn)
def mpsortedautonow_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeSmallStep])
def mpsmallstep_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestManyToManyWithUser])
def mpm2muser_model(request):
return _prepare_db_test(request)
class TestTreeBase(object):
def got(self, model):
if model in [models.NS_TestNode, models.NS_TestNode_Proxy]:
# this slows down nested sets tests quite a bit, but it has the
# advantage that we'll check the node edges are correct
d = {}
for tree_id, lft, rgt in model.objects.values_list('tree_id',
'lft',
'rgt'):
d.setdefault(tree_id, []).extend([lft, rgt])
for tree_id, got_edges in d.items():
assert len(got_edges) == max(got_edges)
good_edges = list(range(1, len(got_edges) + 1))
assert sorted(got_edges) == good_edges
return [(o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree()]
def _assert_get_annotated_list(self, model, expected, parent=None):
results = model.get_annotated_list(parent)
got = [
(obj[0].desc, obj[1]['open'], obj[1]['close'], obj[1]['level'])
for obj in results
]
assert expected == got
assert all([type(obj[0]) == model for obj in results])
class TestEmptyTree(TestTreeBase):
def test_load_bulk_empty(self, model):
ids = model.load_bulk(BASE_DATA)
got_descs = [obj.desc
for obj in model.objects.filter(id__in=ids)]
expected_descs = [x[0] for x in UNCHANGED]
assert sorted(got_descs) == sorted(expected_descs)
assert self.got(model) == UNCHANGED
def test_dump_bulk_empty(self, model):
assert model.dump_bulk() == []
def test_add_root_empty(self, model):
model.add_root(desc='1')
expected = [('1', 1, 0)]
assert self.got(model) == expected
def test_get_root_nodes_empty(self, model):
got = model.get_root_nodes()
expected = []
assert [node.desc for node in got] == expected
def test_get_first_root_node_empty(self, model):
got = model.get_first_root_node()
assert got is None
def test_get_last_root_node_empty(self, model):
got = model.get_last_root_node()
assert got is None
def test_get_tree(self, model):
got = list(model.get_tree())
assert got == []
def test_get_annotated_list(self, model):
expected = []
self._assert_get_annotated_list(model, expected)
class TestNonEmptyTree(TestTreeBase):
@classmethod
def setup_class(cls):
for model in models.BASE_MODELS:
model.load_bulk(BASE_DATA)
@classmethod
def teardown_class(cls):
models.empty_models_tables(models.BASE_MODELS)
class TestClassMethods(TestNonEmptyTree):
def test_load_bulk_existing(self, model):
# inserting on an existing node
node = model.objects.get(desc='231')
ids = model.load_bulk(BASE_DATA, node)
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 4),
('1', 4, 0),
('2', 4, 4),
('21', 5, 0),
('22', 5, 0),
('23', 5, 1),
('231', 6, 0),
('24', 5, 0),
('3', 4, 0),
('4', 4, 1),
('41', 5, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
expected_descs = ['1', '2', '21', '22', '23', '231', '24',
'3', '4', '41']
got_descs = [obj.desc for obj in model.objects.filter(id__in=ids)]
assert sorted(got_descs) == sorted(expected_descs)
assert self.got(model) == expected
def test_get_tree_all(self, model):
nodes = model.get_tree()
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in nodes]
assert got == UNCHANGED
assert all([type(o) == model for o in nodes])
def test_dump_bulk_all(self, model):
assert model.dump_bulk(keep_ids=False) == BASE_DATA
def test_get_tree_node(self, model):
node = model.objects.get(desc='231')
model.load_bulk(BASE_DATA, node)
# the tree was modified by load_bulk, so we reload our node object
node = model.objects.get(pk=node.pk)
nodes = model.get_tree(node)
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in nodes]
expected = [('231', 3, 4),
('1', 4, 0),
('2', 4, 4),
('21', 5, 0),
('22', 5, 0),
('23', 5, 1),
('231', 6, 0),
('24', 5, 0),
('3', 4, 0),
('4', 4, 1),
('41', 5, 0)]
assert got == expected
assert all([type(o) == model for o in nodes])
def test_get_tree_leaf(self, model):
node = model.objects.get(desc='1')
assert 0 == node.get_children_count()
nodes = model.get_tree(node)
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in nodes]
expected = [('1', 1, 0)]
assert got == expected
assert all([type(o) == model for o in nodes])
def test_get_annotated_list_all(self, model):
expected = [('1', True, [], 0), ('2', False, [], 0),
('21', True, [], 1), ('22', False, [], 1),
('23', False, [], 1), ('231', True, [0], 2),
('24', False, [0], 1), ('3', False, [], 0),
('4', False, [], 0), ('41', True, [0, 1], 1)]
self._assert_get_annotated_list(model, expected)
def test_get_annotated_list_node(self, model):
node = model.objects.get(desc='2')
expected = [('2', True, [], 0), ('21', True, [], 1),
('22', False, [], 1), ('23', False, [], 1),
('231', True, [0], 2), ('24', False, [0, 1], 1)]
self._assert_get_annotated_list(model, expected, node)
def test_get_annotated_list_leaf(self, model):
node = model.objects.get(desc='1')
expected = [('1', True, [0], 0)]
self._assert_get_annotated_list(model, expected, node)
def test_dump_bulk_node(self, model):
node = model.objects.get(desc='231')
model.load_bulk(BASE_DATA, node)
# the tree was modified by load_bulk, so we reload our node object
node = model.objects.get(pk=node.pk)
got = model.dump_bulk(node, False)
expected = [{'data': {'desc': '231'}, 'children': BASE_DATA}]
assert got == expected
def test_load_and_dump_bulk_keeping_ids(self, model):
exp = model.dump_bulk(keep_ids=True)
model.objects.all().delete()
model.load_bulk(exp, None, True)
got = model.dump_bulk(keep_ids=True)
assert got == exp
# do we really have an unchaged tree after the dump/delete/load?
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree()]
assert got == UNCHANGED
def test_load_and_dump_bulk_with_fk(self, related_model):
# https://bitbucket.org/tabo/django-treebeard/issue/48/
related_model.objects.all().delete()
related, created = models.RelatedModel.objects.get_or_create(
desc="Test %s" % related_model.__name__)
related_data = [
{'data': {'desc': '1', 'related': related.pk}},
{'data': {'desc': '2', 'related': related.pk}, 'children': [
{'data': {'desc': '21', 'related': related.pk}},
{'data': {'desc': '22', 'related': related.pk}},
{'data': {'desc': '23', 'related': related.pk}, 'children': [
{'data': {'desc': '231', 'related': related.pk}},
]},
{'data': {'desc': '24', 'related': related.pk}},
]},
{'data': {'desc': '3', 'related': related.pk}},
{'data': {'desc': '4', 'related': related.pk}, 'children': [
{'data': {'desc': '41', 'related': related.pk}},
]}]
related_model.load_bulk(related_data)
got = related_model.dump_bulk(keep_ids=False)
assert got == related_data
def test_get_root_nodes(self, model):
got = model.get_root_nodes()
expected = ['1', '2', '3', '4']
assert [node.desc for node in got] == expected
assert all([type(node) == model for node in got])
def test_get_first_root_node(self, model):
got = model.get_first_root_node()
assert got.desc == '1'
assert type(got) == model
def test_get_last_root_node(self, model):
got = model.get_last_root_node()
assert got.desc == '4'
assert type(got) == model
def test_add_root(self, model):
obj = model.add_root(desc='5')
assert obj.get_depth() == 1
got = model.get_last_root_node()
assert got.desc == '5'
assert type(got) == model
def test_add_root_with_passed_instance(self, model):
obj = model(desc='5')
result = model.add_root(instance=obj)
assert result == obj
got = model.get_last_root_node()
assert got.desc == '5'
assert type(got) == model
def test_add_root_with_already_saved_instance(self, model):
obj = model.objects.get(desc='4')
with pytest.raises(NodeAlreadySaved):
model.add_root(instance=obj)
class TestSimpleNodeMethods(TestNonEmptyTree):
def test_is_root(self, model):
data = [
('2', True),
('1', True),
('4', True),
('21', False),
('24', False),
('22', False),
('231', False),
]
for desc, expected in data:
got = model.objects.get(desc=desc).is_root()
assert got == expected
def test_is_leaf(self, model):
data = [
('2', False),
('23', False),
('231', True),
]
for desc, expected in data:
got = model.objects.get(desc=desc).is_leaf()
assert got == expected
def test_get_root(self, model):
data = [
('2', '2'),
('1', '1'),
('4', '4'),
('21', '2'),
('24', '2'),
('22', '2'),
('231', '2'),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_root()
assert node.desc == expected
assert type(node) == model
def test_get_parent(self, model):
data = [
('2', None),
('1', None),
('4', None),
('21', '2'),
('24', '2'),
('22', '2'),
('231', '23'),
]
data = dict(data)
objs = {}
for desc, expected in data.items():
node = model.objects.get(desc=desc)
parent = node.get_parent()
if expected:
assert parent.desc == expected
assert type(parent) == model
else:
assert parent is None
objs[desc] = node
# corrupt the objects' parent cache
node._parent_obj = 'CORRUPTED!!!'
for desc, expected in data.items():
node = objs[desc]
# asking get_parent to not use the parent cache (since we
# corrupted it in the previous loop)
parent = node.get_parent(True)
if expected:
assert parent.desc == expected
assert type(parent) == model
else:
assert parent is None
def test_get_children(self, model):
data = [
('2', ['21', '22', '23', '24']),
('23', ['231']),
('231', []),
]
for desc, expected in data:
children = model.objects.get(desc=desc).get_children()
assert [node.desc for node in children] == expected
assert all([type(node) == model for node in children])
def test_get_children_count(self, model):
data = [
('2', 4),
('23', 1),
('231', 0),
]
for desc, expected in data:
got = model.objects.get(desc=desc).get_children_count()
assert got == expected
def test_get_siblings(self, model):
data = [
('2', ['1', '2', '3', '4']),
('21', ['21', '22', '23', '24']),
('231', ['231']),
]
for desc, expected in data:
siblings = model.objects.get(desc=desc).get_siblings()
assert [node.desc for node in siblings] == expected
assert all([type(node) == model for node in siblings])
def test_get_first_sibling(self, model):
data = [
('2', '1'),
('1', '1'),
('4', '1'),
('21', '21'),
('24', '21'),
('22', '21'),
('231', '231'),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_first_sibling()
assert node.desc == expected
assert type(node) == model
def test_get_prev_sibling(self, model):
data = [
('2', '1'),
('1', None),
('4', '3'),
('21', None),
('24', '23'),
('22', '21'),
('231', None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_prev_sibling()
if expected is None:
assert node is None
else:
assert node.desc == expected
assert type(node) == model
def test_get_next_sibling(self, model):
data = [
('2', '3'),
('1', '2'),
('4', None),
('21', '22'),
('24', None),
('22', '23'),
('231', None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_next_sibling()
if expected is None:
assert node is None
else:
assert node.desc == expected
assert type(node) == model
def test_get_last_sibling(self, model):
data = [
('2', '4'),
('1', '4'),
('4', '4'),
('21', '24'),
('24', '24'),
('22', '24'),
('231', '231'),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_last_sibling()
assert node.desc == expected
assert type(node) == model
def test_get_first_child(self, model):
data = [
('2', '21'),
('21', None),
('23', '231'),
('231', None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_first_child()
if expected is None:
assert node is None
else:
assert node.desc == expected
assert type(node) == model
def test_get_last_child(self, model):
data = [
('2', '24'),
('21', None),
('23', '231'),
('231', None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_last_child()
if expected is None:
assert node is None
else:
assert node.desc == expected
assert type(node) == model
def test_get_ancestors(self, model):
data = [
('2', []),
('21', ['2']),
('231', ['2', '23']),
]
for desc, expected in data:
nodes = model.objects.get(desc=desc).get_ancestors()
assert [node.desc for node in nodes] == expected
assert all([type(node) == model for node in nodes])
def test_get_descendants(self, model):
data = [
('2', ['21', '22', '23', '231', '24']),
('23', ['231']),
('231', []),
('1', []),
('4', ['41']),
]
for desc, expected in data:
nodes = model.objects.get(desc=desc).get_descendants()
assert [node.desc for node in nodes] == expected
assert all([type(node) == model for node in nodes])
def test_get_descendant_count(self, model):
data = [
('2', 5),
('23', 1),
('231', 0),
('1', 0),
('4', 1),
]
for desc, expected in data:
got = model.objects.get(desc=desc).get_descendant_count()
assert got == expected
def test_is_sibling_of(self, model):
data = [
('2', '2', True),
('2', '1', True),
('21', '2', False),
('231', '2', False),
('22', '23', True),
('231', '23', False),
('231', '231', True),
]
for desc1, desc2, expected in data:
node1 = model.objects.get(desc=desc1)
node2 = model.objects.get(desc=desc2)
assert node1.is_sibling_of(node2) == expected
def test_is_child_of(self, model):
data = [
('2', '2', False),
('2', '1', False),
('21', '2', True),
('231', '2', False),
('231', '23', True),
('231', '231', False),
]
for desc1, desc2, expected in data:
node1 = model.objects.get(desc=desc1)
node2 = model.objects.get(desc=desc2)
assert node1.is_child_of(node2) == expected
def test_is_descendant_of(self, model):
data = [
('2', '2', False),
('2', '1', False),
('21', '2', True),
('231', '2', True),
('231', '23', True),
('231', '231', False),
]
for desc1, desc2, expected in data:
node1 = model.objects.get(desc=desc1)
node2 = model.objects.get(desc=desc2)
assert node1.is_descendant_of(node2) == expected
class TestAddChild(TestNonEmptyTree):
def test_add_child_to_leaf(self, model):
model.objects.get(desc='231').add_child(desc='2311')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 1),
('2311', 4, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_child_to_node(self, model):
model.objects.get(desc='2').add_child(desc='25')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('25', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_child_with_passed_instance(self, model):
child = model(desc='2311')
result = model.objects.get(desc='231').add_child(instance=child)
assert result == child
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 1),
('2311', 4, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_child_with_already_saved_instance(self, model):
child = model.objects.get(desc='21')
with pytest.raises(NodeAlreadySaved):
model.objects.get(desc='2').add_child(instance=child)
class TestAddSibling(TestNonEmptyTree):
def test_add_sibling_invalid_pos(self, model):
with pytest.raises(InvalidPosition):
model.objects.get(desc='231').add_sibling('invalid_pos')
def test_add_sibling_missing_nodeorderby(self, model):
node_wchildren = model.objects.get(desc='2')
with pytest.raises(MissingNodeOrderBy):
node_wchildren.add_sibling('sorted-sibling', desc='aaa')
def test_add_sibling_last_root(self, model):
node_wchildren = model.objects.get(desc='2')
obj = node_wchildren.add_sibling('last-sibling', desc='5')
assert obj.get_depth() == 1
assert node_wchildren.get_last_sibling().desc == '5'
def test_add_sibling_last(self, model):
node = model.objects.get(desc='231')
obj = node.add_sibling('last-sibling', desc='232')
assert obj.get_depth() == 3
assert node.get_last_sibling().desc == '232'
def test_add_sibling_first_root(self, model):
node_wchildren = model.objects.get(desc='2')
obj = node_wchildren.add_sibling('first-sibling', desc='new')
assert obj.get_depth() == 1
expected = [('new', 1, 0),
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_first(self, model):
node_wchildren = model.objects.get(desc='23')
obj = node_wchildren.add_sibling('first-sibling', desc='new')
assert obj.get_depth() == 2
expected = [('1', 1, 0),
('2', 1, 5),
('new', 2, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_left_root(self, model):
node_wchildren = model.objects.get(desc='2')
obj = node_wchildren.add_sibling('left', desc='new')
assert obj.get_depth() == 1
expected = [('1', 1, 0),
('new', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_left(self, model):
node_wchildren = model.objects.get(desc='23')
obj = node_wchildren.add_sibling('left', desc='new')
assert obj.get_depth() == 2
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('new', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_left_noleft_root(self, model):
node = model.objects.get(desc='1')
obj = node.add_sibling('left', desc='new')
assert obj.get_depth() == 1
expected = [('new', 1, 0),
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_left_noleft(self, model):
node = model.objects.get(desc='231')
obj = node.add_sibling('left', desc='new')
assert obj.get_depth() == 3
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 2),
('new', 3, 0),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_right_root(self, model):
node_wchildren = model.objects.get(desc='2')
obj = node_wchildren.add_sibling('right', desc='new')
assert obj.get_depth() == 1
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('new', 1, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_right(self, model):
node_wchildren = model.objects.get(desc='23')
obj = node_wchildren.add_sibling('right', desc='new')
assert obj.get_depth() == 2
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('new', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_right_noright_root(self, model):
node = model.objects.get(desc='4')
obj = node.add_sibling('right', desc='new')
assert obj.get_depth() == 1
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0),
('new', 1, 0)]
assert self.got(model) == expected
def test_add_sibling_right_noright(self, model):
node = model.objects.get(desc='231')
obj = node.add_sibling('right', desc='new')
assert obj.get_depth() == 3
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 2),
('231', 3, 0),
('new', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_with_passed_instance(self, model):
node_wchildren = model.objects.get(desc='2')
obj = model(desc='5')
result = node_wchildren.add_sibling('last-sibling', instance=obj)
assert result == obj
assert obj.get_depth() == 1
assert node_wchildren.get_last_sibling().desc == '5'
def test_add_sibling_already_saved_instance(self, model):
node_wchildren = model.objects.get(desc='2')
existing_node = model.objects.get(desc='4')
with pytest.raises(NodeAlreadySaved):
node_wchildren.add_sibling('last-sibling', instance=existing_node)
class TestDelete(TestNonEmptyTree):
@classmethod
def setup_class(cls):
TestNonEmptyTree.setup_class()
for model, dep_model in zip(models.BASE_MODELS, models.DEP_MODELS):
for node in model.objects.all():
dep_model(node=node).save()
@classmethod
def teardown_class(cls):
models.empty_models_tables(models.DEP_MODELS + models.BASE_MODELS)
def test_delete_leaf(self, model):
model.objects.get(desc='231').delete()
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_node(self, model):
model.objects.get(desc='23').delete()
expected = [('1', 1, 0),
('2', 1, 3),
('21', 2, 0),
('22', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_root(self, model):
model.objects.get(desc='2').delete()
expected = [('1', 1, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_filter_root_nodes(self, model):
model.objects.filter(desc__in=('2', '3')).delete()
expected = [('1', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_filter_children(self, model):
model.objects.filter(desc__in=('2', '23', '231')).delete()
expected = [('1', 1, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_nonexistant_nodes(self, model):
model.objects.filter(desc__in=('ZZZ', 'XXX')).delete()
assert self.got(model) == UNCHANGED
def test_delete_same_node_twice(self, model):
model.objects.filter(desc__in=('2', '2')).delete()
expected = [('1', 1, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_all_root_nodes(self, model):
model.get_root_nodes().delete()
count = model.objects.count()
assert count == 0
def test_delete_all_nodes(self, model):
model.objects.all().delete()
count = model.objects.count()
assert count == 0
class TestMoveErrors(TestNonEmptyTree):
def test_move_invalid_pos(self, model):
node = model.objects.get(desc='231')
with pytest.raises(InvalidPosition):
node.move(node, 'invalid_pos')
def test_move_to_descendant(self, model):
node = model.objects.get(desc='2')
target = model.objects.get(desc='231')
with pytest.raises(InvalidMoveToDescendant):
node.move(target, 'first-sibling')
def test_move_missing_nodeorderby(self, model):
node = model.objects.get(desc='231')
with pytest.raises(MissingNodeOrderBy):
node.move(node, 'sorted-child')
with pytest.raises(MissingNodeOrderBy):
node.move(node, 'sorted-sibling')
class TestMoveSortedErrors(TestTreeBase):
def test_nonsorted_move_in_sorted(self, sorted_model):
node = sorted_model.add_root(val1=3, val2=3, desc='zxy')
with pytest.raises(InvalidPosition):
node.move(node, 'left')
class TestMoveLeafRoot(TestNonEmptyTree):
def test_move_leaf_last_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'last-sibling')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0),
('231', 1, 0)]
assert self.got(model) == expected
def test_move_leaf_first_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'first-sibling')
expected = [('231', 1, 0),
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_left_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'left')
expected = [('1', 1, 0),
('231', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_right_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('231', 1, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_last_child_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'last-child')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('231', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_first_child_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'first-child')
expected = [('1', 1, 0),
('2', 1, 5),
('231', 2, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
class TestMoveLeaf(TestNonEmptyTree):
def test_move_leaf_last_sibling(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'last-sibling')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('231', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_first_sibling(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'first-sibling')
expected = [('1', 1, 0),
('2', 1, 5),
('231', 2, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_left_sibling(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'left')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('231', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_right_sibling(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('231', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_left_sibling_itself(self, model):
target = model.objects.get(desc='231')
model.objects.get(desc='231').move(target, 'left')
assert self.got(model) == UNCHANGED
def test_move_leaf_last_child(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'last-child')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 1),
('231', 3, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_first_child(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'first-child')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 1),
('231', 3, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
class TestMoveBranchRoot(TestNonEmptyTree):
def test_move_branch_first_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'first-sibling')
expected = [('4', 1, 1),
('41', 2, 0),
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_last_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'last-sibling')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_branch_left_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'left')
expected = [('1', 1, 0),
('4', 1, 1),
('41', 2, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_right_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('4', 1, 1),
('41', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_left_noleft_sibling_root(self, model):
target = model.objects.get(desc='2').get_first_sibling()
model.objects.get(desc='4').move(target, 'left')
expected = [('4', 1, 1),
('41', 2, 0),
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_right_noright_sibling_root(self, model):
target = model.objects.get(desc='2').get_last_sibling()
model.objects.get(desc='4').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_branch_first_child_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'first-child')
expected = [('1', 1, 0),
('2', 1, 5),
('4', 2, 1),
('41', 3, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_last_child_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'last-child')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('4', 2, 1),
('41', 3, 0),
('3', 1, 0)]
assert self.got(model) == expected
class TestMoveBranch(TestNonEmptyTree):
def test_move_branch_first_sibling(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'first-sibling')
expected = [('1', 1, 0),
('2', 1, 5),
('4', 2, 1),
('41', 3, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_last_sibling(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'last-sibling')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('4', 2, 1),
('41', 3, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_left_sibling(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'left')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('4', 2, 1),
('41', 3, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_right_sibling(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('4', 2, 1),
('41', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_left_noleft_sibling(self, model):
target = model.objects.get(desc='23').get_first_sibling()
model.objects.get(desc='4').move(target, 'left')
expected = [('1', 1, 0),
('2', 1, 5),
('4', 2, 1),
('41', 3, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_right_noright_sibling(self, model):
target = model.objects.get(desc='23').get_last_sibling()
model.objects.get(desc='4').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('4', 2, 1),
('41', 3, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_left_itself_sibling(self, model):
target = model.objects.get(desc='4')
model.objects.get(desc='4').move(target, 'left')
assert self.got(model) == UNCHANGED
def test_move_branch_first_child(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'first-child')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 2),
('4', 3, 1),
('41', 4, 0),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_last_child(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'last-child')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 2),
('231', 3, 0),
('4', 3, 1),
('41', 4, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
class TestTreeSorted(TestTreeBase):
def got(self, sorted_model):
return [(o.val1, o.val2, o.desc, o.get_depth(), o.get_children_count())
for o in sorted_model.get_tree()]
def test_add_root_sorted(self, sorted_model):
sorted_model.add_root(val1=3, val2=3, desc='zxy')
sorted_model.add_root(val1=1, val2=4, desc='bcd')
sorted_model.add_root(val1=2, val2=5, desc='zxy')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=4, val2=1, desc='fgh')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=2, val2=2, desc='qwe')
sorted_model.add_root(val1=3, val2=2, desc='vcx')
expected = [(1, 4, 'bcd', 1, 0),
(2, 2, 'qwe', 1, 0),
(2, 5, 'zxy', 1, 0),
(3, 2, 'vcx', 1, 0),
(3, 3, 'abc', 1, 0),
(3, 3, 'abc', 1, 0),
(3, 3, 'zxy', 1, 0),
(4, 1, 'fgh', 1, 0)]
assert self.got(sorted_model) == expected
def test_add_child_root_sorted(self, sorted_model):
root = sorted_model.add_root(val1=0, val2=0, desc='aaa')
root.add_child(val1=3, val2=3, desc='zxy')
root.add_child(val1=1, val2=4, desc='bcd')
root.add_child(val1=2, val2=5, desc='zxy')
root.add_child(val1=3, val2=3, desc='abc')
root.add_child(val1=4, val2=1, desc='fgh')
root.add_child(val1=3, val2=3, desc='abc')
root.add_child(val1=2, val2=2, desc='qwe')
root.add_child(val1=3, val2=2, desc='vcx')
expected = [(0, 0, 'aaa', 1, 8),
(1, 4, 'bcd', 2, 0),
(2, 2, 'qwe', 2, 0),
(2, 5, 'zxy', 2, 0),
(3, 2, 'vcx', 2, 0),
(3, 3, 'abc', 2, 0),
(3, 3, 'abc', 2, 0),
(3, 3, 'zxy', 2, 0),
(4, 1, 'fgh', 2, 0)]
assert self.got(sorted_model) == expected
def test_add_child_nonroot_sorted(self, sorted_model):
get_node = lambda node_id: sorted_model.objects.get(pk=node_id)
root_id = sorted_model.add_root(val1=0, val2=0, desc='a').pk
node_id = get_node(root_id).add_child(val1=0, val2=0, desc='ac').pk
get_node(root_id).add_child(val1=0, val2=0, desc='aa')
get_node(root_id).add_child(val1=0, val2=0, desc='av')
get_node(node_id).add_child(val1=0, val2=0, desc='aca')
get_node(node_id).add_child(val1=0, val2=0, desc='acc')
get_node(node_id).add_child(val1=0, val2=0, desc='acb')
expected = [(0, 0, 'a', 1, 3),
(0, 0, 'aa', 2, 0),
(0, 0, 'ac', 2, 3),
(0, 0, 'aca', 3, 0),
(0, 0, 'acb', 3, 0),
(0, 0, 'acc', 3, 0),
(0, 0, 'av', 2, 0)]
assert self.got(sorted_model) == expected
def test_move_sorted(self, sorted_model):
sorted_model.add_root(val1=3, val2=3, desc='zxy')
sorted_model.add_root(val1=1, val2=4, desc='bcd')
sorted_model.add_root(val1=2, val2=5, desc='zxy')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=4, val2=1, desc='fgh')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=2, val2=2, desc='qwe')
sorted_model.add_root(val1=3, val2=2, desc='vcx')
root_nodes = sorted_model.get_root_nodes()
target = root_nodes[0]
for node in root_nodes[1:]:
# because raw queries don't update django objects
node = sorted_model.objects.get(pk=node.pk)
target = sorted_model.objects.get(pk=target.pk)
node.move(target, 'sorted-child')
expected = [(1, 4, 'bcd', 1, 7),
(2, 2, 'qwe', 2, 0),
(2, 5, 'zxy', 2, 0),
(3, 2, 'vcx', 2, 0),
(3, 3, 'abc', 2, 0),
(3, 3, 'abc', 2, 0),
(3, 3, 'zxy', 2, 0),
(4, 1, 'fgh', 2, 0)]
assert self.got(sorted_model) == expected
def test_move_sortedsibling(self, sorted_model):
# https://bitbucket.org/tabo/django-treebeard/issue/27
sorted_model.add_root(val1=3, val2=3, desc='zxy')
sorted_model.add_root(val1=1, val2=4, desc='bcd')
sorted_model.add_root(val1=2, val2=5, desc='zxy')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=4, val2=1, desc='fgh')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=2, val2=2, desc='qwe')
sorted_model.add_root(val1=3, val2=2, desc='vcx')
root_nodes = sorted_model.get_root_nodes()
target = root_nodes[0]
for node in root_nodes[1:]:
# because raw queries don't update django objects
node = sorted_model.objects.get(pk=node.pk)
target = sorted_model.objects.get(pk=target.pk)
node.val1 -= 2
node.save()
node.move(target, 'sorted-sibling')
expected = [(0, 2, 'qwe', 1, 0),
(0, 5, 'zxy', 1, 0),
(1, 2, 'vcx', 1, 0),
(1, 3, 'abc', 1, 0),
(1, 3, 'abc', 1, 0),
(1, 3, 'zxy', 1, 0),
(1, 4, 'bcd', 1, 0),
(2, 1, 'fgh', 1, 0)]
assert self.got(sorted_model) == expected
class TestInheritedModels(TestTreeBase):
@classmethod
def setup_class(cls):
themodels = zip(models.BASE_MODELS, models.INHERITED_MODELS)
for model, inherited_model in themodels:
model.add_root(desc='1')
model.add_root(desc='2')
node21 = inherited_model(desc='21')
model.objects.get(desc='2').add_child(instance=node21)
model.objects.get(desc='21').add_child(desc='211')
model.objects.get(desc='21').add_child(desc='212')
model.objects.get(desc='2').add_child(desc='22')
node3 = inherited_model(desc='3')
model.add_root(instance=node3)
@classmethod
def teardown_class(cls):
# Will also empty INHERITED_MODELS by cascade
models.empty_models_tables(models.BASE_MODELS)
def test_get_tree_all(self, inherited_model):
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in inherited_model.get_tree()]
expected = [
('1', 1, 0),
('2', 1, 2),
('21', 2, 2),
('211', 3, 0),
('212', 3, 0),
('22', 2, 0),
('3', 1, 0),
]
assert got == expected
def test_get_tree_node(self, inherited_model):
node = inherited_model.objects.get(desc='21')
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in inherited_model.get_tree(node)]
expected = [
('21', 2, 2),
('211', 3, 0),
('212', 3, 0),
]
assert got == expected
def test_get_root_nodes(self, inherited_model):
got = inherited_model.get_root_nodes()
expected = ['1', '2', '3']
assert [node.desc for node in got] == expected
def test_get_first_root_node(self, inherited_model):
got = inherited_model.get_first_root_node()
assert got.desc == '1'
def test_get_last_root_node(self, inherited_model):
got = inherited_model.get_last_root_node()
assert got.desc == '3'
def test_is_root(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert node21.is_root() is False
assert node3.is_root() is True
def test_is_leaf(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert node21.is_leaf() is False
assert node3.is_leaf() is True
def test_get_root(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert node21.get_root().desc == '2'
assert node3.get_root().desc == '3'
def test_get_parent(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert node21.get_parent().desc == '2'
assert node3.get_parent() is None
def test_get_children(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert [node.desc for node in node21.get_children()] == ['211', '212']
assert [node.desc for node in node3.get_children()] == []
def test_get_children_count(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert node21.get_children_count() == 2
assert node3.get_children_count() == 0
def test_get_siblings(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert [node.desc for node in node21.get_siblings()] == ['21', '22']
assert [node.desc for node in node3.get_siblings()] == ['1', '2', '3']
def test_get_first_sibling(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert node21.get_first_sibling().desc == '21'
assert node3.get_first_sibling().desc == '1'
def test_get_prev_sibling(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert node21.get_prev_sibling() is None
assert node3.get_prev_sibling().desc == '2'
def test_get_next_sibling(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert node21.get_next_sibling().desc == '22'
assert node3.get_next_sibling() is None
def test_get_last_sibling(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert node21.get_last_sibling().desc == '22'
assert node3.get_last_sibling().desc == '3'
def test_get_first_child(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert node21.get_first_child().desc == '211'
assert node3.get_first_child() is None
def test_get_last_child(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert node21.get_last_child().desc == '212'
assert node3.get_last_child() is None
def test_get_ancestors(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert [node.desc for node in node21.get_ancestors()] == ['2']
assert [node.desc for node in node3.get_ancestors()] == []
def test_get_descendants(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert [node.desc for node in node21.get_descendants()] == [
'211', '212']
assert [node.desc for node in node3.get_descendants()] == []
def test_get_descendant_count(self, inherited_model):
node21 = inherited_model.objects.get(desc='21')
node3 = inherited_model.objects.get(desc='3')
assert node21.get_descendant_count() == 2
assert node3.get_descendant_count() == 0
def test_cascading_deletion(self, inherited_model):
# Deleting a node by calling delete() on the inherited_model class
# should delete descendants, even if those descendants are not
# instances of inherited_model
base_model = inherited_model.__bases__[0]
node21 = inherited_model.objects.get(desc='21')
node21.delete()
node2 = base_model.objects.get(desc='2')
for desc in ['21', '211', '212']:
assert not base_model.objects.filter(desc=desc).exists()
assert [node.desc for node in node2.get_descendants()] == ['22']
class TestMP_TreeAlphabet(TestTreeBase):
@pytest.mark.skipif(
not os.getenv('TREEBEARD_TEST_ALPHABET', False),
reason='TREEBEARD_TEST_ALPHABET env variable not set.'
)
def test_alphabet(self, mpalphabet_model):
"""This isn't actually a test, it's an informational routine."""
basealpha = numconv.BASE85
got_err = False
last_good = None
for alphabetlen in range(3, len(basealpha) + 1):
alphabet = basealpha[0:alphabetlen]
assert len(alphabet) >= 3
expected = [alphabet[0] + char for char in alphabet[1:]]
expected.extend([alphabet[1] + char for char in alphabet])
expected.append(alphabet[2] + alphabet[0])
# remove all nodes
mpalphabet_model.objects.all().delete()
# change the model's alphabet
mpalphabet_model.alphabet = alphabet
mpalphabet_model.numconv_obj_ = None
# insert root nodes
for pos in range(len(alphabet) * 2):
try:
mpalphabet_model.add_root(numval=pos)
except:
got_err = True
break
if got_err:
break
got = [obj.path
for obj in mpalphabet_model.objects.all()]
if got != expected:
break
last_good = alphabet
assert False, (
'Best BASE85 based alphabet for your setup: {} (base {})'.format(
last_good, len(last_good))
)
class TestHelpers(TestTreeBase):
@classmethod
def setup_class(cls):
for model in models.BASE_MODELS:
model.load_bulk(BASE_DATA)
for node in model.get_root_nodes():
model.load_bulk(BASE_DATA, node)
model.add_root(desc='5')
@classmethod
def teardown_class(cls):
models.empty_models_tables(models.BASE_MODELS)
def test_descendants_group_count_root(self, model):
expected = [(o.desc, o.get_descendant_count())
for o in model.get_root_nodes()]
got = [(o.desc, o.descendants_count)
for o in model.get_descendants_group_count()]
assert got == expected
def test_descendants_group_count_node(self, model):
parent = model.get_root_nodes().get(desc='2')
expected = [(o.desc, o.get_descendant_count())
for o in parent.get_children()]
got = [(o.desc, o.descendants_count)
for o in model.get_descendants_group_count(parent)]
assert got == expected
class TestMP_TreeSortedAutoNow(TestTreeBase):
"""
The sorting mechanism used by treebeard when adding a node can fail if the
ordering is using an "auto_now" field
"""
def test_sorted_by_autonow_workaround(self, mpsortedautonow_model):
# workaround
for i in range(1, 5):
mpsortedautonow_model.add_root(desc='node%d' % (i, ),
created=datetime.datetime.now())
def test_sorted_by_autonow_FAIL(self, mpsortedautonow_model):
"""
This test asserts that we have a problem.
fix this, somehow
"""
mpsortedautonow_model.add_root(desc='node1')
with pytest.raises(ValueError):
mpsortedautonow_model.add_root(desc='node2')
class TestMP_TreeStepOverflow(TestTreeBase):
def test_add_root(self, mpsmallstep_model):
method = mpsmallstep_model.add_root
for i in range(1, 10):
method()
with pytest.raises(PathOverflow):
method()
def test_add_child(self, mpsmallstep_model):
root = mpsmallstep_model.add_root()
method = root.add_child
for i in range(1, 10):
method()
with pytest.raises(PathOverflow):
method()
def test_add_sibling(self, mpsmallstep_model):
root = mpsmallstep_model.add_root()
for i in range(1, 10):
root.add_child()
positions = ('first-sibling', 'left', 'right', 'last-sibling')
for pos in positions:
with pytest.raises(PathOverflow):
root.get_last_child().add_sibling(pos)
def test_move(self, mpsmallstep_model):
root = mpsmallstep_model.add_root()
for i in range(1, 10):
root.add_child()
newroot = mpsmallstep_model.add_root()
targets = [(root, ['first-child', 'last-child']),
(root.get_first_child(), ['first-sibling',
'left',
'right',
'last-sibling'])]
for target, positions in targets:
for pos in positions:
with pytest.raises(PathOverflow):
newroot.move(target, pos)
class TestMP_TreeShortPath(TestTreeBase):
"""Test a tree with a very small path field (max_length=4) and a
steplen of 1
"""
def test_short_path(self, mpshortnotsorted_model):
obj = mpshortnotsorted_model.add_root()
obj = obj.add_child().add_child().add_child()
with pytest.raises(PathOverflow):
obj.add_child()
class TestMP_TreeFindProblems(TestTreeBase):
def test_find_problems(self, mpalphabet_model):
mpalphabet_model.alphabet = '01234'
mpalphabet_model(path='01', depth=1, numchild=0, numval=0).save()
mpalphabet_model(path='1', depth=1, numchild=0, numval=0).save()
mpalphabet_model(path='111', depth=1, numchild=0, numval=0).save()
mpalphabet_model(path='abcd', depth=1, numchild=0, numval=0).save()
mpalphabet_model(path='qa#$%!', depth=1, numchild=0, numval=0).save()
mpalphabet_model(path='0201', depth=2, numchild=0, numval=0).save()
mpalphabet_model(path='020201', depth=3, numchild=0, numval=0).save()
mpalphabet_model(path='03', depth=1, numchild=2, numval=0).save()
mpalphabet_model(path='0301', depth=2, numchild=0, numval=0).save()
mpalphabet_model(path='030102', depth=3, numchild=10, numval=0).save()
mpalphabet_model(path='04', depth=10, numchild=1, numval=0).save()
mpalphabet_model(path='0401', depth=20, numchild=0, numval=0).save()
def got(ids):
return [o.path for o in
mpalphabet_model.objects.filter(id__in=ids)]
(evil_chars, bad_steplen, orphans, wrong_depth, wrong_numchild) = (
mpalphabet_model.find_problems())
assert ['abcd', 'qa#$%!'] == got(evil_chars)
assert ['1', '111'] == got(bad_steplen)
assert ['0201', '020201'] == got(orphans)
assert ['03', '0301', '030102'] == got(wrong_numchild)
assert ['04', '0401'] == got(wrong_depth)
class TestMP_TreeFix(TestTreeBase):
expected_no_holes = {
models.MP_TestNodeShortPath: [
('1', 'b', 1, 2),
('11', 'u', 2, 1),
('111', 'i', 3, 1),
('1111', 'e', 4, 0),
('12', 'o', 2, 0),
('2', 'd', 1, 0),
('3', 'g', 1, 0),
('4', 'a', 1, 4),
('41', 'a', 2, 0),
('42', 'a', 2, 0),
('43', 'u', 2, 1),
('431', 'i', 3, 1),
('4311', 'e', 4, 0),
('44', 'o', 2, 0)],
models.MP_TestSortedNodeShortPath: [
('1', 'a', 1, 4),
('11', 'a', 2, 0),
('12', 'a', 2, 0),
('13', 'o', 2, 0),
('14', 'u', 2, 1),
('141', 'i', 3, 1),
('1411', 'e', 4, 0),
('2', 'b', 1, 2),
('21', 'o', 2, 0),
('22', 'u', 2, 1),
('221', 'i', 3, 1),
('2211', 'e', 4, 0),
('3', 'd', 1, 0),
('4', 'g', 1, 0)]}
expected_with_holes = {
models.MP_TestNodeShortPath: [
('1', 'b', 1, 2),
('13', 'u', 2, 1),
('134', 'i', 3, 1),
('1343', 'e', 4, 0),
('14', 'o', 2, 0),
('2', 'd', 1, 0),
('3', 'g', 1, 0),
('4', 'a', 1, 4),
('41', 'a', 2, 0),
('42', 'a', 2, 0),
('43', 'u', 2, 1),
('434', 'i', 3, 1),
('4343', 'e', 4, 0),
('44', 'o', 2, 0)],
models.MP_TestSortedNodeShortPath: [
('1', 'b', 1, 2),
('13', 'u', 2, 1),
('134', 'i', 3, 1),
('1343', 'e', 4, 0),
('14', 'o', 2, 0),
('2', 'd', 1, 0),
('3', 'g', 1, 0),
('4', 'a', 1, 4),
('41', 'a', 2, 0),
('42', 'a', 2, 0),
('43', 'u', 2, 1),
('434', 'i', 3, 1),
('4343', 'e', 4, 0),
('44', 'o', 2, 0)]}
def got(self, model):
return [(o.path, o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree()]
def add_broken_test_data(self, model):
model(path='4', depth=2, numchild=2, desc='a').save()
model(path='13', depth=1000, numchild=0, desc='u').save()
model(path='14', depth=4, numchild=500, desc='o').save()
model(path='134', depth=321, numchild=543, desc='i').save()
model(path='1343', depth=321, numchild=543, desc='e').save()
model(path='42', depth=1, numchild=1, desc='a').save()
model(path='43', depth=1000, numchild=0, desc='u').save()
model(path='44', depth=4, numchild=500, desc='o').save()
model(path='434', depth=321, numchild=543, desc='i').save()
model(path='4343', depth=321, numchild=543, desc='e').save()
model(path='41', depth=1, numchild=1, desc='a').save()
model(path='3', depth=221, numchild=322, desc='g').save()
model(path='1', depth=10, numchild=3, desc='b').save()
model(path='2', depth=10, numchild=3, desc='d').save()
def test_fix_tree_non_destructive(self, mpshort_model):
self.add_broken_test_data(mpshort_model)
mpshort_model.fix_tree(destructive=False)
got = self.got(mpshort_model)
expected = self.expected_with_holes[mpshort_model]
assert got == expected
mpshort_model.find_problems()
def test_fix_tree_destructive(self, mpshort_model):
self.add_broken_test_data(mpshort_model)
mpshort_model.fix_tree(destructive=True)
got = self.got(mpshort_model)
expected = self.expected_no_holes[mpshort_model]
assert got == expected
mpshort_model.find_problems()
class TestIssues(TestTreeBase):
# test for http://code.google.com/p/django-treebeard/issues/detail?id=14
def test_many_to_many_django_user_anonymous(self, mpm2muser_model):
# Using AnonymousUser() in the querysets will expose non-treebeard
# related problems in Django 1.0
#
# Postgres:
# ProgrammingError: can't adapt
# SQLite:
# InterfaceError: Error binding parameter 4 - probably unsupported
# type.
# MySQL compared a string to an integer field:
# `treebeard_mp_testissue14_users`.`user_id` = 'AnonymousUser'
#
# Using a None field instead works (will be translated to IS NULL).
#
# anonuserobj = AnonymousUser()
anonuserobj = None
def qs_check(qs, expected):
assert [o.name for o in qs] == expected
def qs_check_first_or_user(expected, root, user):
qs_check(
root.get_children().filter(Q(name="first") | Q(users=user)),
expected)
user = User.objects.create_user('test_user', '[email protected]',
'testpasswd')
user.save()
root = mpm2muser_model.add_root(name="the root node")
root.add_child(name="first")
second = root.add_child(name="second")
qs_check(root.get_children(), ['first', 'second'])
qs_check(root.get_children().filter(Q(name="first")), ['first'])
qs_check(root.get_children().filter(Q(users=user)), [])
qs_check_first_or_user(['first'], root, user)
qs_check_first_or_user(['first', 'second'], root, anonuserobj)
user = User.objects.get(username="test_user")
second.users.add(user)
qs_check_first_or_user(['first', 'second'], root, user)
qs_check_first_or_user(['first'], root, anonuserobj)
class TestMoveNodeForm(TestNonEmptyTree):
def _get_nodes_list(self, nodes):
return [(pk, '%sNode %d' % (' ' * 4 * (depth - 1), pk))
for pk, depth in nodes]
def _assert_nodes_in_choices(self, form, nodes):
choices = form.fields['_ref_node_id'].choices
assert 0 == choices.pop(0)[0]
assert nodes == [(choice[0], choice[1]) for choice in choices]
def _move_node_helper(self, node, safe_parent_nodes):
form_class = movenodeform_factory(type(node))
form = form_class(instance=node)
assert ['desc', '_position', '_ref_node_id'] == list(
form.base_fields.keys())
got = [choice[0] for choice in form.fields['_position'].choices]
assert ['first-child', 'left', 'right'] == got
nodes = self._get_nodes_list(safe_parent_nodes)
self._assert_nodes_in_choices(form, nodes)
def _get_node_ids_and_depths(self, nodes):
return [(node.pk, node.get_depth()) for node in nodes]
def test_form_root_node(self, model):
nodes = list(model.get_tree())
node = nodes.pop(0)
safe_parent_nodes = self._get_node_ids_and_depths(nodes)
self._move_node_helper(node, safe_parent_nodes)
def test_form_leaf_node(self, model):
nodes = list(model.get_tree())
node = nodes.pop()
safe_parent_nodes = self._get_node_ids_and_depths(nodes)
self._move_node_helper(node, safe_parent_nodes)
def test_form_admin(self, model):
request = None
nodes = list(model.get_tree())
safe_parent_nodes = self._get_node_ids_and_depths(nodes)
for node in model.objects.all():
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
ma = admin_class(model, site)
got = list(ma.get_form(request).base_fields.keys())
desc_pos_refnodeid = ['desc', '_position', '_ref_node_id']
assert desc_pos_refnodeid == got
got = ma.get_fieldsets(request)
expected = [(None, {'fields': desc_pos_refnodeid})]
assert got == expected
got = ma.get_fieldsets(request, node)
assert got == expected
form = ma.get_form(request)()
nodes = self._get_nodes_list(safe_parent_nodes)
self._assert_nodes_in_choices(form, nodes)
class TestModelAdmin(TestNonEmptyTree):
def test_default_fields(self, model):
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
ma = admin_class(model, site)
assert list(ma.get_form(None).base_fields.keys()) == [
'desc', '_position', '_ref_node_id']
class TestSortedForm(TestTreeSorted):
def test_sorted_form(self, sorted_model):
sorted_model.add_root(val1=3, val2=3, desc='zxy')
sorted_model.add_root(val1=1, val2=4, desc='bcd')
sorted_model.add_root(val1=2, val2=5, desc='zxy')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=4, val2=1, desc='fgh')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=2, val2=2, desc='qwe')
sorted_model.add_root(val1=3, val2=2, desc='vcx')
form_class = movenodeform_factory(sorted_model)
form = form_class()
assert list(form.fields.keys()) == ['val1', 'val2', 'desc',
'_position', '_ref_node_id']
form = form_class(instance=sorted_model.objects.get(desc='bcd'))
assert list(form.fields.keys()) == ['val1', 'val2', 'desc',
'_position', '_ref_node_id']
assert 'id__position' in str(form)
assert 'id__ref_node_id' in str(form)
class TestForm(TestNonEmptyTree):
def test_form(self, model):
form_class = movenodeform_factory(model)
form = form_class()
assert list(form.fields.keys()) == ['desc', '_position',
'_ref_node_id']
form = form_class(instance=model.objects.get(desc='1'))
assert list(form.fields.keys()) == ['desc', '_position',
'_ref_node_id']
assert 'id__position' in str(form)
assert 'id__ref_node_id' in str(form)
def test_move_node_form(self, model):
form_class = movenodeform_factory(model)
bad_node = model.objects.get(desc='1').add_child(
desc='Benign<script>alert("Compromised");</script>'
)
form = form_class(instance=bad_node)
rendered_html = form.as_p()
assert "Benign" in rendered_html
assert "<script>" not in rendered_html
assert "<script>" in rendered_html
def test_get_position_ref_node(self, model):
form_class = movenodeform_factory(model)
instance_parent = model.objects.get(desc='1')
form = form_class(instance=instance_parent)
assert form._get_position_ref_node(instance_parent) == {
'_position': 'first-child',
'_ref_node_id': ''
}
instance_child = model.objects.get(desc='21')
form = form_class(instance=instance_child)
assert form._get_position_ref_node(instance_child) == {
'_position': 'first-child',
'_ref_node_id': model.objects.get(desc='2').pk
}
instance_grandchild = model.objects.get(desc='22')
form = form_class(instance=instance_grandchild)
assert form._get_position_ref_node(instance_grandchild) == {
'_position': 'right',
'_ref_node_id': model.objects.get(desc='21').pk
}
instance_grandchild = model.objects.get(desc='231')
form = form_class(instance=instance_grandchild)
assert form._get_position_ref_node(instance_grandchild) == {
'_position': 'first-child',
'_ref_node_id': model.objects.get(desc='23').pk
}
def test_clean_cleaned_data(self, model):
instance_parent = model.objects.get(desc='1')
_position = 'first-child'
_ref_node_id = ''
form_class = movenodeform_factory(model)
form = form_class(
instance=instance_parent,
data={
'_position': _position,
'_ref_node_id': _ref_node_id,
'desc': instance_parent.desc
}
)
assert form.is_valid()
assert form._clean_cleaned_data() == (_position, _ref_node_id)
def test_save_edit(self, model):
instance_parent = model.objects.get(desc='1')
original_count = len(model.objects.all())
form_class = movenodeform_factory(model)
form = form_class(
instance=instance_parent,
data={
'_position': 'first-child',
'_ref_node_id': model.objects.get(desc='2').pk,
'desc': instance_parent.desc
}
)
assert form.is_valid()
saved_instance = form.save()
assert original_count == model.objects.all().count()
assert saved_instance.get_children_count() == 0
assert saved_instance.get_depth() == 2
assert not saved_instance.is_root()
assert saved_instance.is_leaf()
# Return to original state
form_class = movenodeform_factory(model)
form = form_class(
instance=saved_instance,
data={
'_position': 'first-child',
'_ref_node_id': '',
'desc': saved_instance.desc
}
)
assert form.is_valid()
restored_instance = form.save()
assert original_count == model.objects.all().count()
assert restored_instance.get_children_count() == 0
assert restored_instance.get_depth() == 1
assert restored_instance.is_root()
assert restored_instance.is_leaf()
def test_save_new(self, model):
original_count = model.objects.all().count()
assert original_count == 10
_position = 'first-child'
form_class = movenodeform_factory(model)
form = form_class(
data={'_position': _position, 'desc': 'New Form Test'})
assert form.is_valid()
assert form.save() is not None
assert original_count < model.objects.all().count()
class TestAdminTreeTemplateTags(TestCase):
def test_treebeard_css(self):
template = Template("{% load admin_tree %}{% treebeard_css %}")
context = Context()
rendered = template.render(context)
expected = ('<link rel="stylesheet" type="text/css" '
'href="/treebeard/treebeard-admin.css"/>')
assert expected == rendered
def test_treebeard_js(self):
template = Template("{% load admin_tree %}{% treebeard_js %}")
context = Context()
rendered = template.render(context)
expected = ('<script type="text/javascript" src="jsi18n"></script>'
'<script type="text/javascript" '
'src="/treebeard/treebeard-admin.js"></script>'
'<script>(function($){'
'jQuery = $.noConflict(true);'
'})(django.jQuery);</script>'
'<script type="text/javascript" '
'src="/treebeard/jquery-ui-1.8.5.custom.min.js"></script>')
assert expected == rendered
def test_get_static_url(self):
with self.settings(STATIC_URL=None, MEDIA_URL=None):
assert get_static_url() == '/'
with self.settings(STATIC_URL='/static/', MEDIA_URL=None):
assert get_static_url() == '/static/'
with self.settings(STATIC_URL=None, MEDIA_URL='/media/'):
assert get_static_url() == '/media/'
with self.settings(STATIC_URL='/static/', MEDIA_URL='/media/'):
assert get_static_url() == '/static/'
class TestAdminTree(TestNonEmptyTree):
template = Template('{% load admin_tree %}{% spaceless %}'
'{% result_tree cl request %}{% endspaceless %}')
def test_result_tree(self, model_without_proxy):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
model = model_without_proxy
request = RequestFactory().get('/admin/tree/')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
# We have the same amount of drag handlers as objects
drag_handler = '<td class="drag-handler"><span> </span></td>'
assert table_output.count(drag_handler) == model.objects.count()
# All nodes are in the result tree
for object in model.objects.all():
url = cl.url_for_result(object)
node = '<a href="%s">Node %i</a>' % (url, object.pk)
assert node in table_output
# Unfiltered
assert '<input type="hidden" id="has-filters" value="0"/>' in \
table_output
def test_unicode_result_tree(self, model_with_unicode):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
model = model_with_unicode
# Add a unicode description
model.add_root(desc='áéîøü')
request = RequestFactory().get('/admin/tree/')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
# We have the same amount of drag handlers as objects
drag_handler = '<td class="drag-handler"><span> </span></td>'
assert table_output.count(drag_handler) == model.objects.count()
# All nodes are in the result tree
for object in model.objects.all():
url = cl.url_for_result(object)
node = '<a href="%s">%s</a>' % (url, object.desc)
assert node in table_output
# Unfiltered
assert '<input type="hidden" id="has-filters" value="0"/>' in \
table_output
def test_result_filtered(self, model_without_proxy):
""" Test template changes with filters or pagination.
"""
model = model_without_proxy
# Filtered GET
request = RequestFactory().get('/admin/tree/?desc=1')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
# Filtered
assert '<input type="hidden" id="has-filters" value="1"/>' in \
table_output
# Not Filtered GET, it should ignore pagination
request = RequestFactory().get('/admin/tree/?p=1')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
# Not Filtered
assert '<input type="hidden" id="has-filters" value="0"/>' in \
table_output
# Not Filtered GET, it should ignore all
request = RequestFactory().get('/admin/tree/?all=1')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
# Not Filtered
assert '<input type="hidden" id="has-filters" value="0"/>' in \
table_output
class TestAdminTreeList(TestNonEmptyTree):
template = Template('{% load admin_tree_list %}{% spaceless %}'
'{% result_tree cl request %}{% endspaceless %}')
def test_result_tree_list(self, model_without_proxy):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
model = model_without_proxy
request = RequestFactory().get('/admin/tree/')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
output_template = '<li><a href="%i/" >Node %i</a>'
for object in model.objects.all():
expected_output = output_template % (object.pk, object.pk)
assert expected_output in table_output
def test_result_tree_list_with_action(self, model_without_proxy):
model = model_without_proxy
request = RequestFactory().get('/admin/tree/')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request,
'action_form': True})
table_output = self.template.render(context)
output_template = ('<input type="checkbox" class="action-select" '
'value="%i" name="_selected_action" />'
'<a href="%i/" >Node %i</a>')
for object in model.objects.all():
expected_output = output_template % (object.pk, object.pk,
object.pk)
assert expected_output in table_output
def test_result_tree_list_with_get(self, model_without_proxy):
model = model_without_proxy
# Test t GET parameter with value id
request = RequestFactory().get(
'/admin/tree/?{0}=id'.format(TO_FIELD_VAR))
site = AdminSite()
admin_register_all(site)
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
output_template = "opener.dismissRelatedLookupPopup(window, '%i');"
for object in model.objects.all():
expected_output = output_template % object.pk
assert expected_output in table_output
class TestTreeAdmin(TestNonEmptyTree):
site = AdminSite()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request_factory = RequestFactory()
request = request_factory.get(url)
request.user = user
return request
def _mocked_request(self, data):
request_factory = RequestFactory()
request = request_factory.post('/', data=data)
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
return request
def _get_admin_obj(self, model_class):
form_class = movenodeform_factory(model_class)
admin_class = admin_factory(form_class)
return admin_class(model_class, self.site)
def test_changelist_view(self):
tmp_user = self._create_superuser('changelist_tmp')
request = self._mocked_authenticated_request('/', tmp_user)
admin_obj = self._get_admin_obj(models.AL_TestNode)
admin_obj.changelist_view(request)
assert admin_obj.change_list_template == 'admin/tree_list.html'
admin_obj = self._get_admin_obj(models.MP_TestNode)
admin_obj.changelist_view(request)
assert admin_obj.change_list_template != 'admin/tree_list.html'
def test_get_node(self, model):
admin_obj = self._get_admin_obj(model)
target = model.objects.get(desc='2')
assert admin_obj.get_node(target.pk) == target
def test_move_node_validate_keyerror(self, model):
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.move_node(request)
assert response.status_code == 400
request = self._mocked_request(data={'node_id': 1})
response = admin_obj.move_node(request)
assert response.status_code == 400
def test_move_node_validate_valueerror(self, model):
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={'node_id': 1,
'sibling_id': 2,
'as_child': 'invalid'})
response = admin_obj.move_node(request)
assert response.status_code == 400
def test_move_validate_missing_nodeorderby(self, model):
node = model.objects.get(desc='231')
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.try_to_move_node(True, node, 'sorted-child',
request, target=node)
assert response.status_code == 400
response = admin_obj.try_to_move_node(True, node, 'sorted-sibling',
request, target=node)
assert response.status_code == 400
def test_move_validate_invalid_pos(self, model):
node = model.objects.get(desc='231')
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.try_to_move_node(True, node, 'invalid_pos',
request, target=node)
assert response.status_code == 400
def test_move_validate_to_descendant(self, model):
node = model.objects.get(desc='2')
target = model.objects.get(desc='231')
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.try_to_move_node(True, node, 'first-sibling',
request, target)
assert response.status_code == 400
def test_move_left(self, model):
node = model.objects.get(desc='231')
target = model.objects.get(desc='2')
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={'node_id': node.pk,
'sibling_id': target.pk,
'as_child': 0})
response = admin_obj.move_node(request)
assert response.status_code == 200
expected = [('1', 1, 0),
('231', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_last_child(self, model):
node = model.objects.get(desc='231')
target = model.objects.get(desc='2')
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={'node_id': node.pk,
'sibling_id': target.pk,
'as_child': 1})
response = admin_obj.move_node(request)
assert response.status_code == 200
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('231', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
| []
| []
| [
"TREEBEARD_TEST_ALPHABET"
]
| [] | ["TREEBEARD_TEST_ALPHABET"] | python | 1 | 0 | |
booktrade/booktrade/wsgi.py | """
WSGI config for booktrade project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "booktrade.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/commands/test.go | // Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
package commands
import (
"bufio"
"fmt"
"os"
"os/exec"
"os/signal"
"syscall"
"github.com/mattermost/mattermost-server/api"
"github.com/mattermost/mattermost-server/api4"
"github.com/mattermost/mattermost-server/cmd"
"github.com/mattermost/mattermost-server/model"
"github.com/mattermost/mattermost-server/utils"
"github.com/mattermost/mattermost-server/wsapi"
"github.com/spf13/cobra"
)
var TestCmd = &cobra.Command{
Use: "test",
Short: "Testing Commands",
Hidden: true,
}
var RunWebClientTestsCmd = &cobra.Command{
Use: "web_client_tests",
Short: "Run the web client tests",
RunE: webClientTestsCmdF,
}
var RunServerForWebClientTestsCmd = &cobra.Command{
Use: "web_client_tests_server",
Short: "Run the server configured for running the web client tests against it",
RunE: serverForWebClientTestsCmdF,
}
func init() {
TestCmd.AddCommand(
RunWebClientTestsCmd,
RunServerForWebClientTestsCmd,
)
cmd.RootCmd.AddCommand(TestCmd)
}
func webClientTestsCmdF(command *cobra.Command, args []string) error {
a, err := cmd.InitDBCommandContextCobra(command)
if err != nil {
return err
}
defer a.Shutdown()
utils.InitTranslations(a.Config().LocalizationSettings)
serverErr := a.StartServer()
if serverErr != nil {
return serverErr
}
api4.Init(a, a.Srv.Router, false)
api.Init(a, a.Srv.Router)
wsapi.Init(a, a.Srv.WebSocketRouter)
a.UpdateConfig(setupClientTests)
runWebClientTests()
return nil
}
func serverForWebClientTestsCmdF(command *cobra.Command, args []string) error {
a, err := cmd.InitDBCommandContextCobra(command)
if err != nil {
return err
}
defer a.Shutdown()
utils.InitTranslations(a.Config().LocalizationSettings)
serverErr := a.StartServer()
if serverErr != nil {
return serverErr
}
api4.Init(a, a.Srv.Router, false)
api.Init(a, a.Srv.Router)
wsapi.Init(a, a.Srv.WebSocketRouter)
a.UpdateConfig(setupClientTests)
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
<-c
return nil
}
func setupClientTests(cfg *model.Config) {
*cfg.TeamSettings.EnableOpenServer = true
*cfg.ServiceSettings.EnableCommands = false
*cfg.ServiceSettings.EnableOnlyAdminIntegrations = false
*cfg.ServiceSettings.EnableCustomEmoji = true
cfg.ServiceSettings.EnableIncomingWebhooks = false
cfg.ServiceSettings.EnableOutgoingWebhooks = false
}
func executeTestCommand(command *exec.Cmd) {
cmdOutPipe, err := command.StdoutPipe()
if err != nil {
cmd.CommandPrintErrorln("Failed to run tests")
os.Exit(1)
return
}
cmdErrOutPipe, err := command.StderrPipe()
if err != nil {
cmd.CommandPrintErrorln("Failed to run tests")
os.Exit(1)
return
}
cmdOutReader := bufio.NewScanner(cmdOutPipe)
cmdErrOutReader := bufio.NewScanner(cmdErrOutPipe)
go func() {
for cmdOutReader.Scan() {
fmt.Println(cmdOutReader.Text())
}
}()
go func() {
for cmdErrOutReader.Scan() {
fmt.Println(cmdErrOutReader.Text())
}
}()
if err := command.Run(); err != nil {
cmd.CommandPrintErrorln("Client Tests failed")
os.Exit(1)
return
}
}
func runWebClientTests() {
if webappDir := os.Getenv("WEBAPP_DIR"); webappDir != "" {
os.Chdir(webappDir)
} else {
os.Chdir("../mattermost-webapp")
}
cmd := exec.Command("npm", "test")
executeTestCommand(cmd)
}
| [
"\"WEBAPP_DIR\""
]
| []
| [
"WEBAPP_DIR"
]
| [] | ["WEBAPP_DIR"] | go | 1 | 0 | |
cmd/searcher/search/search_test.go | package search_test
import (
"archive/tar"
"bytes"
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"os"
"sort"
"strconv"
"testing"
"time"
"github.com/cockroachdb/errors"
"github.com/sourcegraph/sourcegraph/cmd/searcher/protocol"
"github.com/sourcegraph/sourcegraph/cmd/searcher/search"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/search/searcher"
"github.com/sourcegraph/sourcegraph/internal/store"
"github.com/sourcegraph/sourcegraph/internal/testutil"
)
type fileType int
const (
typeFile fileType = iota
typeSymlink
)
func TestSearch(t *testing.T) {
// Create byte buffer of binary file
miltonPNG := bytes.Repeat([]byte{0x00}, 32*1024)
files := map[string]struct {
body string
typ fileType
}{
"README.md": {`# Hello World
Hello world example in go`, typeFile},
"file++.plus": {`filename contains regex metachars`, typeFile},
"main.go": {`package main
import "fmt"
func main() {
fmt.Println("Hello world")
}
`, typeFile},
"abc.txt": {"w", typeFile},
"milton.png": {string(miltonPNG), typeFile},
"ignore.me": {`func hello() string {return "world"}`, typeFile},
"symlink": {"abc.txt", typeSymlink},
}
cases := []struct {
arg protocol.PatternInfo
want string
}{
{protocol.PatternInfo{Pattern: "foo"}, ""},
{protocol.PatternInfo{Pattern: "World", IsCaseSensitive: true}, `
README.md:1:# Hello World
`},
{protocol.PatternInfo{Pattern: "world", IsCaseSensitive: true}, `
README.md:3:Hello world example in go
main.go:6: fmt.Println("Hello world")
`},
{protocol.PatternInfo{Pattern: "world"}, `
README.md:1:# Hello World
README.md:3:Hello world example in go
main.go:6: fmt.Println("Hello world")
`},
{protocol.PatternInfo{Pattern: "func.*main"}, ""},
{protocol.PatternInfo{Pattern: "func.*main", IsRegExp: true}, `
main.go:5:func main() {
`},
// https://github.com/sourcegraph/sourcegraph/issues/8155
{protocol.PatternInfo{Pattern: "^func", IsRegExp: true}, `
main.go:5:func main() {
`},
{protocol.PatternInfo{Pattern: "^FuNc", IsRegExp: true}, `
main.go:5:func main() {
`},
{protocol.PatternInfo{Pattern: "mai", IsWordMatch: true}, ""},
{protocol.PatternInfo{Pattern: "main", IsWordMatch: true}, `
main.go:1:package main
main.go:5:func main() {
`},
// Ensure we handle CaseInsensitive regexp searches with
// special uppercase chars in pattern.
{protocol.PatternInfo{Pattern: `printL\B`, IsRegExp: true}, `
main.go:6: fmt.Println("Hello world")
`},
{protocol.PatternInfo{Pattern: "world", ExcludePattern: "README.md"}, `
main.go:6: fmt.Println("Hello world")
`},
{protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{"*.md"}}, `
README.md:1:# Hello World
README.md:3:Hello world example in go
`},
{protocol.PatternInfo{Pattern: "w", IncludePatterns: []string{"*.{md,txt}", "*.txt"}}, `
abc.txt:1:w
`},
{protocol.PatternInfo{Pattern: "world", ExcludePattern: "README\\.md", PathPatternsAreRegExps: true}, `
main.go:6: fmt.Println("Hello world")
`},
{protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{"\\.md"}, PathPatternsAreRegExps: true}, `
README.md:1:# Hello World
README.md:3:Hello world example in go
`},
{protocol.PatternInfo{Pattern: "w", IncludePatterns: []string{"\\.(md|txt)", "README"}, PathPatternsAreRegExps: true}, `
README.md:1:# Hello World
README.md:3:Hello world example in go
`},
{protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{"*.{MD,go}"}, PathPatternsAreCaseSensitive: true}, `
main.go:6: fmt.Println("Hello world")
`},
{protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{`\.(MD|go)`}, PathPatternsAreRegExps: true, PathPatternsAreCaseSensitive: true}, `
main.go:6: fmt.Println("Hello world")
`},
{protocol.PatternInfo{Pattern: "doesnotmatch"}, ""},
{protocol.PatternInfo{Pattern: "", IsRegExp: false, IncludePatterns: []string{"\\.png"}, PathPatternsAreRegExps: true, PatternMatchesPath: true}, `
milton.png
`},
{protocol.PatternInfo{Pattern: "package main\n\nimport \"fmt\"", IsCaseSensitive: false, IsRegExp: true, PathPatternsAreRegExps: true, PatternMatchesPath: true, PatternMatchesContent: true}, `
main.go:1:package main
main.go:2:
main.go:3:import "fmt"
`},
{protocol.PatternInfo{Pattern: "package main\n\\s*import \"fmt\"", IsCaseSensitive: false, IsRegExp: true, PathPatternsAreRegExps: true, PatternMatchesPath: true, PatternMatchesContent: true}, `
main.go:1:package main
main.go:2:
main.go:3:import "fmt"
`},
{protocol.PatternInfo{Pattern: "package main\n", IsCaseSensitive: false, IsRegExp: true, PathPatternsAreRegExps: true, PatternMatchesPath: true, PatternMatchesContent: true}, `
main.go:1:package main
`},
{protocol.PatternInfo{Pattern: "package main\n\\s*", IsCaseSensitive: false, IsRegExp: true, PathPatternsAreRegExps: true, PatternMatchesPath: true, PatternMatchesContent: true}, `
main.go:1:package main
main.go:2:
`},
{protocol.PatternInfo{Pattern: "package main\n\\s*", IsCaseSensitive: false, IsRegExp: true, PathPatternsAreRegExps: true, PatternMatchesPath: true, PatternMatchesContent: true}, `
main.go:1:package main
main.go:2:
`},
{protocol.PatternInfo{Pattern: "\nfunc", IsCaseSensitive: false, IsRegExp: true, PathPatternsAreRegExps: true, PatternMatchesPath: true, PatternMatchesContent: true}, `
main.go:4:
main.go:5:func main() {
`},
{protocol.PatternInfo{Pattern: "\n\\s*func", IsCaseSensitive: false, IsRegExp: true, PathPatternsAreRegExps: true, PatternMatchesPath: true, PatternMatchesContent: true}, `
main.go:3:import "fmt"
main.go:4:
main.go:5:func main() {
`},
{protocol.PatternInfo{Pattern: "package main\n\nimport \"fmt\"\n\nfunc main\\(\\) {", IsCaseSensitive: false, IsRegExp: true, PathPatternsAreRegExps: true, PatternMatchesPath: true, PatternMatchesContent: true}, `
main.go:1:package main
main.go:2:
main.go:3:import "fmt"
main.go:4:
main.go:5:func main() {
`},
{protocol.PatternInfo{Pattern: "\n", IsCaseSensitive: false, IsRegExp: true, PathPatternsAreRegExps: true, PatternMatchesPath: true, PatternMatchesContent: true}, `
README.md:1:# Hello World
README.md:2:
main.go:1:package main
main.go:2:
main.go:3:import "fmt"
main.go:4:
main.go:5:func main() {
main.go:6: fmt.Println("Hello world")
main.go:7:}
`},
{protocol.PatternInfo{Pattern: "^$", IsRegExp: true}, ``},
{protocol.PatternInfo{
Pattern: "filename contains regex metachars",
IncludePatterns: []string{"file++.plus"},
IsStructuralPat: true,
IsRegExp: true, // To test for a regression, imply that IsStructuralPat takes precedence.
}, `
file++.plus:1:filename contains regex metachars
`},
{protocol.PatternInfo{Pattern: "World", IsNegated: true}, `
abc.txt
file++.plus
milton.png
symlink
`},
{protocol.PatternInfo{Pattern: "World", IsCaseSensitive: true, IsNegated: true}, `
abc.txt
file++.plus
main.go
milton.png
symlink
`},
{protocol.PatternInfo{Pattern: "fmt", IsNegated: true}, `
README.md
abc.txt
file++.plus
milton.png
symlink
`},
{protocol.PatternInfo{Pattern: "abc", PatternMatchesPath: true, PatternMatchesContent: true}, `
abc.txt
symlink:1:abc.txt
`},
{protocol.PatternInfo{Pattern: "abc", PatternMatchesPath: false, PatternMatchesContent: true}, `
symlink:1:abc.txt
`},
{protocol.PatternInfo{Pattern: "abc", PatternMatchesPath: true, PatternMatchesContent: false}, `
abc.txt
`},
}
s, cleanup, err := newStore(files)
if err != nil {
t.Fatal(err)
}
s.FilterTar = func(_ context.Context, _ api.RepoName, _ api.CommitID) (store.FilterFunc, error) {
return func(hdr *tar.Header) bool {
return hdr.Name == "ignore.me"
}, nil
}
defer cleanup()
ts := httptest.NewServer(&search.Service{Store: s})
defer ts.Close()
for i, test := range cases {
t.Run(strconv.Itoa(i), func(t *testing.T) {
if test.arg.IsStructuralPat && os.Getenv("CI") == "" {
t.Skip("skipping comby test when not on CI")
}
// CI can be very busy, so give lots of time to fetchTimeout.
fetchTimeout := 500 * time.Millisecond
if deadline, ok := t.Deadline(); ok {
fetchTimeout = time.Until(deadline) / 2
}
req := protocol.Request{
Repo: "foo",
URL: "u",
Commit: "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef",
PatternInfo: test.arg,
FetchTimeout: fetchTimeout.String(),
}
m, err := doSearch(ts.URL, &req)
if err != nil {
t.Fatalf("%s failed: %s", test.arg.String(), err)
}
sort.Sort(sortByPath(m))
got := toString(m)
err = sanityCheckSorted(m)
if err != nil {
t.Fatalf("%s malformed response: %s\n%s", test.arg.String(), err, got)
}
// We have an extra newline to make expected readable
if len(test.want) > 0 {
test.want = test.want[1:]
}
if got != test.want {
d, err := testutil.Diff(test.want, got)
if err != nil {
t.Fatal(err)
}
t.Fatalf("%s unexpected response:\n%s", test.arg.String(), d)
}
})
}
}
func TestSearch_badrequest(t *testing.T) {
cases := []protocol.Request{
// Bad regexp
{
Repo: "foo",
URL: "u",
Commit: "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef",
PatternInfo: protocol.PatternInfo{
Pattern: `\F`,
IsRegExp: true,
},
},
// Unsupported regex
{
Repo: "foo",
URL: "u",
Commit: "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef",
PatternInfo: protocol.PatternInfo{
Pattern: `(?!id)entity`,
IsRegExp: true,
},
},
// No repo
{
URL: "u",
Commit: "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef",
PatternInfo: protocol.PatternInfo{
Pattern: "test",
},
},
// No commit
{
Repo: "foo",
URL: "u",
PatternInfo: protocol.PatternInfo{
Pattern: "test",
},
},
// Non-absolute commit
{
Repo: "foo",
URL: "u",
Commit: "HEAD",
PatternInfo: protocol.PatternInfo{
Pattern: "test",
},
},
// Bad include glob
{
Repo: "foo",
URL: "u",
Commit: "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef",
PatternInfo: protocol.PatternInfo{
Pattern: "test",
IncludePatterns: []string{"[c-a]"},
},
},
// Bad exclude glob
{
Repo: "foo",
URL: "u",
Commit: "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef",
PatternInfo: protocol.PatternInfo{
Pattern: "test",
ExcludePattern: "[c-a]",
},
},
// Bad include regexp
{
Repo: "foo",
URL: "u",
Commit: "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef",
PatternInfo: protocol.PatternInfo{
Pattern: "test",
IncludePatterns: []string{"**"},
PathPatternsAreRegExps: true,
},
},
// Bad exclude regexp
{
Repo: "foo",
URL: "u",
Commit: "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef",
PatternInfo: protocol.PatternInfo{
Pattern: "test",
ExcludePattern: "**",
PathPatternsAreRegExps: true,
},
},
// structural search with negated pattern
{
Repo: "foo",
URL: "u",
Commit: "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef",
PatternInfo: protocol.PatternInfo{
Pattern: "fmt.Println(:[_])",
IsNegated: true,
ExcludePattern: "",
PathPatternsAreRegExps: true,
IsStructuralPat: true,
},
},
}
store, cleanup, err := newStore(nil)
if err != nil {
t.Fatal(err)
}
defer cleanup()
ts := httptest.NewServer(&search.Service{Store: store})
defer ts.Close()
for _, p := range cases {
p.PatternInfo.PatternMatchesContent = true
_, err := doSearch(ts.URL, &p)
if err == nil {
t.Fatalf("%v expected to fail", p)
}
}
}
func doSearch(u string, p *protocol.Request) ([]protocol.FileMatch, error) {
reqBody, err := json.Marshal(p)
if err != nil {
return nil, err
}
resp, err := http.Post(u, "application/json", bytes.NewReader(reqBody))
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return nil, errors.Errorf("non-200 response: code=%d body=%s", resp.StatusCode, string(body))
}
var ed searcher.EventDone
var matches []protocol.FileMatch
dec := searcher.StreamDecoder{
OnMatches: func(newMatches []*protocol.FileMatch) {
for _, match := range newMatches {
matches = append(matches, *match)
}
},
OnDone: func(e searcher.EventDone) {
ed = e
},
OnUnknown: func(event []byte, _ []byte) {
panic("unknown event")
},
}
if err := dec.ReadAll(resp.Body); err != nil {
return nil, err
}
if ed.Error != "" {
return nil, errors.New(ed.Error)
}
if ed.DeadlineHit {
err = context.DeadlineExceeded
}
return matches, err
}
func newStore(files map[string]struct {
body string
typ fileType
}) (*store.Store, func(), error) {
buf := new(bytes.Buffer)
w := tar.NewWriter(buf)
for name, file := range files {
var hdr *tar.Header
switch file.typ {
case typeFile:
hdr = &tar.Header{
Name: name,
Mode: 0600,
Size: int64(len(file.body)),
}
if err := w.WriteHeader(hdr); err != nil {
return nil, nil, err
}
if _, err := w.Write([]byte(file.body)); err != nil {
return nil, nil, err
}
case typeSymlink:
hdr = &tar.Header{
Typeflag: tar.TypeSymlink,
Name: name,
Mode: int64(os.ModePerm | os.ModeSymlink),
Linkname: file.body,
}
if err := w.WriteHeader(hdr); err != nil {
return nil, nil, err
}
}
}
// git-archive usually includes a pax header we should ignore.
// use a body which matches a test case. Ensures we don't return this
// false entry as a result.
if err := addpaxheader(w, "Hello world\n"); err != nil {
return nil, nil, err
}
err := w.Close()
if err != nil {
return nil, nil, err
}
d, err := os.MkdirTemp("", "search_test")
if err != nil {
return nil, nil, err
}
return &store.Store{
FetchTar: func(ctx context.Context, repo api.RepoName, commit api.CommitID) (io.ReadCloser, error) {
return io.NopCloser(bytes.NewReader(buf.Bytes())), nil
},
Path: d,
}, func() { os.RemoveAll(d) }, nil
}
func toString(m []protocol.FileMatch) string {
buf := new(bytes.Buffer)
for _, f := range m {
if len(f.LineMatches) == 0 {
buf.WriteString(f.Path)
buf.WriteByte('\n')
}
for _, l := range f.LineMatches {
buf.WriteString(f.Path)
buf.WriteByte(':')
buf.WriteString(strconv.Itoa(l.LineNumber + 1))
buf.WriteByte(':')
buf.WriteString(l.Preview)
buf.WriteByte('\n')
}
}
return buf.String()
}
func sanityCheckSorted(m []protocol.FileMatch) error {
if !sort.IsSorted(sortByPath(m)) {
return errors.New("unsorted file matches, please sortByPath")
}
for i := range m {
if i > 0 && m[i].Path == m[i-1].Path {
return errors.Errorf("duplicate FileMatch on %s", m[i].Path)
}
lm := m[i].LineMatches
if !sort.IsSorted(sortByLineNumber(lm)) {
return errors.Errorf("unsorted LineMatches for %s", m[i].Path)
}
for j := range lm {
if j > 0 && lm[j].LineNumber == lm[j-1].LineNumber {
return errors.Errorf("duplicate LineNumber on %s:%d", m[i].Path, lm[j].LineNumber)
}
}
}
return nil
}
type sortByPath []protocol.FileMatch
func (m sortByPath) Len() int { return len(m) }
func (m sortByPath) Less(i, j int) bool { return m[i].Path < m[j].Path }
func (m sortByPath) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
type sortByLineNumber []protocol.LineMatch
func (m sortByLineNumber) Len() int { return len(m) }
func (m sortByLineNumber) Less(i, j int) bool { return m[i].LineNumber < m[j].LineNumber }
func (m sortByLineNumber) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
| [
"\"CI\""
]
| []
| [
"CI"
]
| [] | ["CI"] | go | 1 | 0 | |
src/test/java/net/sourceforge/myvd/test/Server/TestVirtualMemberOf.java | /*
* Copyright 2008 Marc Boorshtein
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.sourceforge.myvd.test.Server;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Properties;
import net.sourceforge.myvd.chain.AddInterceptorChain;
import net.sourceforge.myvd.chain.BindInterceptorChain;
import net.sourceforge.myvd.chain.DeleteInterceptorChain;
import net.sourceforge.myvd.chain.ExetendedOperationInterceptorChain;
import net.sourceforge.myvd.chain.ModifyInterceptorChain;
import net.sourceforge.myvd.chain.RenameInterceptorChain;
import net.sourceforge.myvd.chain.SearchInterceptorChain;
import net.sourceforge.myvd.core.NameSpace;
import net.sourceforge.myvd.inserts.Insert;
import net.sourceforge.myvd.inserts.extensions.PasswordChangeOperation;
import net.sourceforge.myvd.inserts.ldap.LDAPInterceptor;
import net.sourceforge.myvd.router.Router;
import net.sourceforge.myvd.server.Server;
import net.sourceforge.myvd.test.chain.TestChain;
import net.sourceforge.myvd.test.util.OpenLDAPUtils;
import net.sourceforge.myvd.test.util.StartMyVD;
import net.sourceforge.myvd.test.util.StartOpenLDAP;
import net.sourceforge.myvd.test.util.Util;
import net.sourceforge.myvd.types.Attribute;
import net.sourceforge.myvd.types.Bool;
import net.sourceforge.myvd.types.DistinguishedName;
import net.sourceforge.myvd.types.Entry;
import net.sourceforge.myvd.types.EntrySet;
import net.sourceforge.myvd.types.ExtendedOperation;
import net.sourceforge.myvd.types.Filter;
import net.sourceforge.myvd.types.Int;
import net.sourceforge.myvd.types.Password;
import net.sourceforge.myvd.types.Result;
import net.sourceforge.myvd.types.Results;
import net.sourceforge.myvd.types.SessionVariables;
import com.novell.ldap.LDAPAttribute;
import com.novell.ldap.LDAPAttributeSet;
import com.novell.ldap.LDAPConnection;
import com.novell.ldap.LDAPConstraints;
import com.novell.ldap.LDAPEntry;
import com.novell.ldap.LDAPException;
import com.novell.ldap.LDAPExtendedOperation;
import com.novell.ldap.LDAPJSSESecureSocketFactory;
import com.novell.ldap.LDAPModification;
import com.novell.ldap.LDAPSearchConstraints;
import com.novell.ldap.LDAPSearchResult;
import com.novell.ldap.LDAPSearchResults;
import com.novell.ldap.LDAPSocketFactory;
import com.novell.ldap.asn1.ASN1Identifier;
import com.novell.ldap.asn1.ASN1OctetString;
import com.novell.ldap.asn1.ASN1Sequence;
import com.novell.ldap.asn1.ASN1Tagged;
import com.novell.ldap.asn1.LBEREncoder;
import com.novell.ldap.util.DN;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.BeforeClass;
import org.junit.AfterClass;
import static org.junit.Assert.*;
public class TestVirtualMemberOf {
// Insert[] globalChain;
// Router router;
private static StartOpenLDAP baseServer;
private static StartOpenLDAP internalServer;
private static StartOpenLDAP externalServer;
private static StartMyVD server;
// private Server server;
@BeforeClass
public static void setUp() throws Exception {
OpenLDAPUtils.killAllOpenLDAPS();
baseServer = new StartOpenLDAP();
baseServer.startServer(System.getenv("PROJ_DIR") + "/test/Base", 10983, "cn=admin,dc=domain,dc=com", "manager");
internalServer = new StartOpenLDAP();
internalServer.startServer(System.getenv("PROJ_DIR") + "/test/InternalUsers", 11983,
"cn=admin,ou=internal,dc=domain,dc=com", "manager");
externalServer = new StartOpenLDAP();
externalServer.startServer(System.getenv("PROJ_DIR") + "/test/ExternalUsers", 12983,
"cn=admin,ou=external,dc=domain,dc=com", "manager");
server = new StartMyVD();
server.startServer(System.getenv("PROJ_DIR") + "/test/TestServer/testVirtualMemberOf.props", 50983);
// server = new Server(System.getenv("PROJ_DIR") +
// "/test/TestServer/testconfig.props");
// server.startServer();
// globalChain = server.getGlobalChain();
// router = server.getRouter();
System.setProperty("javax.net.ssl.trustStore", System.getenv("PROJ_DIR") + "/test/TestServer/testconfig.jks");
}
@Test
public void testStartServer() throws Exception {
LDAPConnection con = new LDAPConnection();
con.connect("127.0.0.1", 50983);
// con.bind(3,"ou=internal,o=mycompany","secret".getBytes());
LDAPSearchResults res = con.search("ou=internal,o=mycompany,c=us", 2, "(objectClass=*)", new String[0], false);
while (res.hasMore()) {
System.out.println(res.next().getDN());
}
con.disconnect();
}
@Test
public void testSearchSubtreeResults() throws LDAPException {
LDAPAttributeSet attribs = new LDAPAttributeSet();
attribs.add(new LDAPAttribute("objectClass", "inetOrgPerson"));
attribs.add(new LDAPAttribute("cn", "Test User"));
attribs.add(new LDAPAttribute("sn", "User"));
attribs.add(new LDAPAttribute("uid", "testUser"));
attribs.add(new LDAPAttribute("userPassword", "secret"));
LDAPEntry entry2 = new LDAPEntry("cn=Test User,ou=internal,o=mycompany,c=us", attribs);
attribs = new LDAPAttributeSet();
attribs.add(new LDAPAttribute("objectClass", "inetOrgPerson"));
attribs.add(new LDAPAttribute("cn", "Test Cust"));
attribs.add(new LDAPAttribute("sn", "Cust"));
attribs.add(new LDAPAttribute("uid", "testCust"));
attribs.add(new LDAPAttribute("userPassword", "secret"));
attribs.add(new LDAPAttribute("memberOf", "cn=Test Group,ou=external,o=mycompany,c=us"));
LDAPEntry entry1 = new LDAPEntry("cn=Test Cust,ou=external,o=mycompany,c=us", attribs);
LDAPConnection con = new LDAPConnection();
con.connect("127.0.0.1", 50983);
// con.bind(3,"cn=admin,o=mycompany","manager".getBytes());
LDAPSearchResults res = con.search("o=mycompany,c=us", 2, "(objectClass=inetOrgPerson)", new String[] {},
false);
int size = 0;
while (res.hasMore()) {
LDAPEntry fromDir = res.next();
LDAPEntry controlEntry = null;// control.get(fromDir.getEntry().getDN());
if (size == 0) {
controlEntry = entry1;
} else if (size == 1) {
controlEntry = entry2;
} else {
controlEntry = null;
}
if (controlEntry == null) {
fail("Entry " + fromDir.getDN() + " should not be returned");
return;
}
if (!Util.compareEntry(fromDir, controlEntry)) {
fail("The entry was not correct : \n" + Util.toLDIF(fromDir) + "\nfrom control:\n"
+ Util.toLDIF(controlEntry));
return;
}
size++;
}
if (size != 2) {
fail("Not the correct number of entries : " + size);
}
con.disconnect();
}
@Test
public void testSearchSubtreeByMemberof() throws LDAPException {
LDAPAttributeSet attribs = new LDAPAttributeSet();
attribs.add(new LDAPAttribute("objectClass", "inetOrgPerson"));
attribs.add(new LDAPAttribute("cn", "Test Cust"));
attribs.add(new LDAPAttribute("sn", "Cust"));
attribs.add(new LDAPAttribute("uid", "testCust"));
attribs.add(new LDAPAttribute("userPassword", "secret"));
attribs.add(new LDAPAttribute("memberOf", "cn=Test Group,ou=external,o=mycompany,c=us"));
LDAPEntry entry1 = new LDAPEntry("cn=Test Cust,ou=external,o=mycompany,c=us", attribs);
LDAPConnection con = new LDAPConnection();
con.connect("127.0.0.1", 50983);
// con.bind(3,"cn=admin,o=mycompany","manager".getBytes());
LDAPSearchResults res = con.search("o=mycompany,c=us", 2, "(memberOf=cn=Test Group,ou=external,o=mycompany,c=us)", new String[] {},
false);
int size = 0;
boolean found = false;
while (res.hasMore()) {
found = true;
LDAPEntry fromDir = res.next();
LDAPEntry controlEntry = null;// control.get(fromDir.getEntry().getDN());
if (size == 0) {
controlEntry = entry1;
} else {
fail("Too many results");
}
assertTrue(Util.compareEntry(fromDir, controlEntry));
size++;
}
con.disconnect();
assertTrue(found);
}
@Test
public void testSearchSubtreeMemberOfPlusFilter() throws LDAPException {
LDAPAttributeSet attribs = new LDAPAttributeSet();
attribs.add(new LDAPAttribute("objectClass", "inetOrgPerson"));
attribs.add(new LDAPAttribute("cn", "Test User"));
attribs.add(new LDAPAttribute("sn", "User"));
attribs.add(new LDAPAttribute("uid", "testUser"));
attribs.add(new LDAPAttribute("userPassword", "secret"));
LDAPEntry entry2 = new LDAPEntry("cn=Test User,ou=internal,o=mycompany,c=us", attribs);
attribs = new LDAPAttributeSet();
attribs.add(new LDAPAttribute("objectClass", "inetOrgPerson"));
attribs.add(new LDAPAttribute("cn", "Test Cust"));
attribs.add(new LDAPAttribute("sn", "Cust"));
attribs.add(new LDAPAttribute("uid", "testCust"));
attribs.add(new LDAPAttribute("userPassword", "secret"));
attribs.add(new LDAPAttribute("memberOf", "cn=Test Group,ou=external,o=mycompany,c=us"));
LDAPEntry entry1 = new LDAPEntry("cn=Test Cust,ou=external,o=mycompany,c=us", attribs);
LDAPConnection con = new LDAPConnection();
con.connect("127.0.0.1", 50983);
// con.bind(3,"cn=admin,o=mycompany","manager".getBytes());
LDAPSearchResults res = con.search("o=mycompany,c=us", 2, "(|(memberOf=cn=Test Group,ou=external,o=mycompany,c=us)(uid=testUser))", new String[] {},
false);
int size = 0;
while (res.hasMore()) {
LDAPEntry fromDir = res.next();
LDAPEntry controlEntry = null;// control.get(fromDir.getEntry().getDN());
if (size == 0) {
controlEntry = entry1;
} else if (size == 1) {
controlEntry = entry2;
} else {
controlEntry = null;
}
if (controlEntry == null) {
fail("Entry " + fromDir.getDN() + " should not be returned");
return;
}
if (!Util.compareEntry(fromDir, controlEntry)) {
fail("The entry was not correct : \n" + Util.toLDIF(fromDir) + "\nfrom control:\n"
+ Util.toLDIF(controlEntry));
return;
}
size++;
}
if (size != 2) {
fail("Not the correct number of entries : " + size);
}
con.disconnect();
}
@AfterClass
public static void tearDown() throws Exception {
baseServer.stopServer();
internalServer.stopServer();
externalServer.stopServer();
server.stopServer();
// server.stopServer();
}
}
| [
"\"PROJ_DIR\"",
"\"PROJ_DIR\"",
"\"PROJ_DIR\"",
"\"PROJ_DIR\"",
"\"PROJ_DIR\"",
"\"PROJ_DIR\""
]
| []
| [
"PROJ_DIR"
]
| [] | ["PROJ_DIR"] | java | 1 | 0 | |
example/example_functions_test.go | // Copyright (c) 2016, 2018, 2022, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Example code for functions API
package example
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net/http"
"os"
"reflect"
"github.com/oracle/oci-go-sdk/v58/common"
"github.com/oracle/oci-go-sdk/v58/core"
"github.com/oracle/oci-go-sdk/v58/example/helpers"
"github.com/oracle/oci-go-sdk/v58/functions"
)
var (
fnImage string
gwDisplayName = "OCI-GOSDK-Sample-Gateway"
rtDisplyName = "Default Route Table for OCI-GOSDK-Sample-VCN"
)
/*
SETUP:
This test requires that you have a [DEFAULT] OCI user profile setup e.g. in ~/.oci/config
the DEFAULT user will be used in these tests, so any variables supplied must be compatible with that user
This test requires 4 environment variables to be set:
for these environment variables, see example/example_test.go {
OCI_COMPARTMENT_ID
OCI_AVAILABILITY_DOMAIN
OCI_ROOT_COMPARTMENT_ID
}
OCI_FN_IMAGE : The URI of a publicly available image in the Oracle Cloud Infrastructure Registry (OCIR) e.g. phx.ocir.io/<tenancy-name>/<directory>/<image-name>:<image-tag>
RUN:
To run this test/example run:
go test github.com/oracle/oci-go-sdk/example -run ExampleFunctionInvoke
*/
func ExampleFunctionInvoke() {
managementClient, err := functions.NewFunctionsManagementClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)
fnImage = os.Getenv("OCI_FN_IMAGE")
ctx := context.Background()
subnetID := createOrGetNetworkInfrastructure()
// A subnet is required to expose and be able invoke functions.
// In multiple AD regions, subnets can be created in multiple ADs to provide redundancy.
fmt.Println("Network Layer Created")
// An application's name must be unique per-compartment
appName := "Example-Go-SDK-App"
// A function's name must be unique per-application
fnName := "Example-Go-SDK-Fn"
// We must specify which compartment we want to create our Application in
compartmentID := helpers.CompartmentID()
createdApp := createApplication(ctx, managementClient, appName, compartmentID, []string{*subnetID})
fmt.Println("Application Created:", *createdApp.DisplayName)
gotApp := getReadyApplication(ctx, managementClient, createdApp.Id)
fmt.Println("Application Got:", *gotApp.DisplayName)
listedApps := listApplications(ctx, managementClient, compartmentID)
fmt.Println("Applications Listed:", *listedApps[0].DisplayName)
createdFn := createFunction(ctx, managementClient, fnName, createdApp.Id)
fmt.Println("Function Created:", *createdFn.DisplayName)
gotFn := getReadyFunction(ctx, managementClient, createdFn.Id)
fmt.Println("Function Got:", *gotFn.DisplayName)
listedFns := listFunctions(ctx, managementClient, createdApp.Id)
fmt.Println("Functions Listed:", *listedFns[0].DisplayName)
invokeClient, err := functions.NewFunctionsInvokeClientWithConfigurationProvider(common.DefaultConfigProvider(), *createdFn.InvokeEndpoint)
helpers.FatalIfError(err)
invokeFunction(ctx, invokeClient, createdFn.Id)
fmt.Println("Function invoked")
deleteFunction(ctx, managementClient, createdFn.Id)
fmt.Println("Function Deleted:", *createdFn.DisplayName)
deleteApplication(ctx, managementClient, createdApp.Id)
fmt.Println("Application Deleted:", *createdApp.DisplayName)
// Output:
// Network Layer Created
// Application Created: Example-Go-SDK-App
// Application Got: Example-Go-SDK-App
// Applications Listed: Example-Go-SDK-App
// Function Created: Example-Go-SDK-Fn
// Function Got: Example-Go-SDK-Fn
// Functions Listed: Example-Go-SDK-Fn
// Function invoked
// Function Deleted: Example-Go-SDK-Fn
// Application Deleted: Example-Go-SDK-App
}
func createApplication(ctx context.Context, client functions.FunctionsManagementClient, appName string, compartmentID *string, subnetIDs []string) functions.Application {
details := functions.CreateApplicationDetails{
CompartmentId: compartmentID,
DisplayName: &appName,
SubnetIds: subnetIDs,
}
request := functions.CreateApplicationRequest{CreateApplicationDetails: details}
response, err := client.CreateApplication(ctx, request)
helpers.FatalIfError(err)
return response.Application
}
//Gets an application, if that application is not ready polls until the application is ready
func getReadyApplication(ctx context.Context, client functions.FunctionsManagementClient, appID *string) (app functions.Application) {
metaWithRetry := helpers.GetRequestMetadataWithCustomizedRetryPolicy(shouldRetryGetApplication)
request := functions.GetApplicationRequest{
ApplicationId: appID,
RequestMetadata: metaWithRetry,
}
response, err := client.GetApplication(ctx, request)
helpers.FatalIfError(err)
return response.Application
}
func listApplications(ctx context.Context, client functions.FunctionsManagementClient, compartmentID *string) []functions.ApplicationSummary {
request := functions.ListApplicationsRequest{CompartmentId: compartmentID}
response, err := client.ListApplications(ctx, request)
helpers.FatalIfError(err)
return response.Items
}
func deleteApplication(ctx context.Context, client functions.FunctionsManagementClient, appID *string) {
request := functions.DeleteApplicationRequest{ApplicationId: appID}
_, err := client.DeleteApplication(ctx, request)
helpers.FatalIfError(err)
return
}
func createFunction(ctx context.Context, client functions.FunctionsManagementClient, fnName string, appID *string) functions.Function {
memory := int64(128)
details := functions.CreateFunctionDetails{
DisplayName: &fnName,
ApplicationId: appID,
Image: &fnImage,
MemoryInMBs: &memory,
}
request := functions.CreateFunctionRequest{CreateFunctionDetails: details}
response, err := client.CreateFunction(ctx, request)
helpers.FatalIfError(err)
return response.Function
}
func getReadyFunction(ctx context.Context, client functions.FunctionsManagementClient, fnID *string) functions.Function {
metaWithRetry := helpers.GetRequestMetadataWithCustomizedRetryPolicy(shouldRetryGetFunction)
request := functions.GetFunctionRequest{
FunctionId: fnID,
RequestMetadata: metaWithRetry,
}
response, err := client.GetFunction(ctx, request)
helpers.FatalIfError(err)
return response.Function
}
func listFunctions(ctx context.Context, client functions.FunctionsManagementClient, appID *string) []functions.FunctionSummary {
request := functions.ListFunctionsRequest{ApplicationId: appID}
response, err := client.ListFunctions(ctx, request)
helpers.FatalIfError(err)
return response.Items
}
func invokeFunction(ctx context.Context, client functions.FunctionsInvokeClient, fnID *string) *string {
// Retry function invocation with a standard back-off if we get a 404 in response.
// This is in case the function creation has not yet completed by the time invocation is attempted
metaWithRetry := helpers.GetRequestMetadataWithCustomizedRetryPolicy(shouldRetryInvokeFunction)
requestBody := ioutil.NopCloser(bytes.NewReader([]byte("")))
request := functions.InvokeFunctionRequest{
FunctionId: fnID,
InvokeFunctionBody: requestBody,
RequestMetadata: metaWithRetry,
}
response, err := client.InvokeFunction(ctx, request)
if err != nil {
fmt.Println("Invoke Error:", err)
return nil
}
resp := response.RawResponse
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
fmt.Println("Invoke Failed:", resp.StatusCode)
return nil
}
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println("Could not read invoke body:", err)
}
responseBody := string(bodyBytes)
return &responseBody
}
func deleteFunction(ctx context.Context, client functions.FunctionsManagementClient, fnID *string) {
request := functions.DeleteFunctionRequest{FunctionId: fnID}
_, err := client.DeleteFunction(ctx, request)
helpers.FatalIfError(err)
return
}
func createOrGetNetworkInfrastructure() *string {
c, err := core.NewVirtualNetworkClientWithConfigurationProvider(common.DefaultConfigProvider())
if err != nil {
fmt.Println("Network client request error:", err)
}
sn := CreateOrGetSubnet()
gw := createOrGetInternetGateway(c, sn.VcnId)
createOrGetRouteTable(c, gw.Id, sn.VcnId)
return sn.Id
}
func createOrGetInternetGateway(c core.VirtualNetworkClient, vcnID *string) (gateway core.InternetGateway) {
ctx := context.Background()
//List Gateways
listGWRequest := core.ListInternetGatewaysRequest{
CompartmentId: helpers.CompartmentID(),
VcnId: vcnID,
DisplayName: &gwDisplayName,
}
listGWRespone, err := c.ListInternetGateways(ctx, listGWRequest)
if err != nil {
fmt.Println("Internet gateway list error:", err)
}
if len(listGWRespone.Items) >= 1 {
//Gateway with name already exists
gateway = listGWRespone.Items[0]
} else {
//Create new Gateway
enabled := true
createGWDetails := core.CreateInternetGatewayDetails{
CompartmentId: helpers.CompartmentID(),
IsEnabled: &enabled,
VcnId: vcnID,
DisplayName: &gwDisplayName,
}
createGWRequest := core.CreateInternetGatewayRequest{CreateInternetGatewayDetails: createGWDetails}
createGWResponse, err := c.CreateInternetGateway(ctx, createGWRequest)
if err != nil {
fmt.Println("Internet gateway create error:", err)
}
gateway = createGWResponse.InternetGateway
}
return
}
func createOrGetRouteTable(c core.VirtualNetworkClient, gatewayID, VcnID *string) (routeTable core.RouteTable) {
ctx := context.Background()
//List Route Table
listRTRequest := core.ListRouteTablesRequest{
CompartmentId: helpers.CompartmentID(),
VcnId: VcnID,
DisplayName: &rtDisplyName,
}
listRTResponse, err := c.ListRouteTables(ctx, listRTRequest)
if err != nil {
fmt.Println("Route table list error", err)
}
cidrRange := "0.0.0.0/0"
rr := core.RouteRule{
NetworkEntityId: gatewayID,
Destination: &cidrRange,
DestinationType: core.RouteRuleDestinationTypeCidrBlock,
}
if len(listRTResponse.Items) >= 1 {
//Default Route Table found and has at least 1 route rule
if len(listRTResponse.Items[0].RouteRules) >= 1 {
routeTable = listRTResponse.Items[0]
//Default Route table needs route rule adding
} else {
updateRTDetails := core.UpdateRouteTableDetails{
RouteRules: []core.RouteRule{rr},
}
updateRTRequest := core.UpdateRouteTableRequest{
RtId: listRTResponse.Items[0].Id,
UpdateRouteTableDetails: updateRTDetails,
}
updateRTResponse, err := c.UpdateRouteTable(ctx, updateRTRequest)
if err != nil {
fmt.Println("Error updating route table:", err)
}
routeTable = updateRTResponse.RouteTable
}
} else {
//No default route table found
fmt.Println("Error could not find VCN default route table, VCN OCID:", *VcnID, "Could not find route table:", rtDisplyName)
}
return
}
func shouldRetryGetApplication(response common.OCIOperationResponse) bool {
createResponse, correctType := response.Response.(functions.GetApplicationResponse)
if !correctType {
fmt.Println("Retry attempt used incompatible response type, expected GetApplicationResponse, found:", reflect.TypeOf(response.Response))
}
if createResponse.LifecycleState != functions.ApplicationLifecycleStateActive {
return true
}
return false
}
func shouldRetryGetFunction(response common.OCIOperationResponse) bool {
createResponse, correctType := response.Response.(functions.GetFunctionResponse)
if !correctType {
fmt.Println("Retry attempt used incompatible response type, expected GetFunctionResponse, found:", reflect.TypeOf(response.Response))
}
if createResponse.LifecycleState != functions.FunctionLifecycleStateActive {
return true
}
return false
}
func shouldRetryInvokeFunction(response common.OCIOperationResponse) bool {
invokeResponse, correctType := response.Response.(functions.InvokeFunctionResponse)
if !correctType {
fmt.Println("Retry attempt used incompatible response type, expected InvokeFunctionResponse, found:", reflect.TypeOf(response.Response))
}
if invokeResponse.RawResponse.StatusCode == 404 {
return true
}
return false
}
| [
"\"OCI_FN_IMAGE\""
]
| []
| [
"OCI_FN_IMAGE"
]
| [] | ["OCI_FN_IMAGE"] | go | 1 | 0 | |
tests/_check_backup_files.py | import os
import re
from typing import TYPE_CHECKING
import boto3
if TYPE_CHECKING:
from mypy_boto3_s3.service_resource import BucketObjectsCollection, _Bucket
def get_bucket() -> "_Bucket":
return boto3.resource(
service_name="s3",
endpoint_url="http://minio:9000",
region_name="us-east-1",
aws_access_key_id=os.getenv("S3_ACCESS_KEY"),
aws_secret_access_key=os.getenv("S3_SECRET_KEY"),
).Bucket("frappe")
def get_key_builder():
site_name = os.getenv("SITE_NAME")
assert site_name
def builder(key: str, suffix: str) -> bool:
return bool(re.match(rf"{site_name}.*{suffix}$", key))
return builder
def check_keys(objects: "BucketObjectsCollection"):
check_key = get_key_builder()
db = False
config = False
private_files = False
public_files = False
for obj in objects:
if check_key(obj.key, "database.sql.gz"):
db = True
elif check_key(obj.key, "site_config_backup.json"):
config = True
elif check_key(obj.key, "private-files.tar"):
private_files = True
elif check_key(obj.key, "files.tar"):
public_files = True
exc = lambda type_: Exception(f"Didn't push {type_} backup")
if not db:
raise exc("database")
if not config:
raise exc("site config")
if not private_files:
raise exc("private files")
if not public_files:
raise exc("public files")
print("All files were pushed to S3!")
def main() -> int:
bucket = get_bucket()
check_keys(bucket.objects.all())
return 0
if __name__ == "__main__":
raise SystemExit(main())
| []
| []
| [
"S3_ACCESS_KEY",
"SITE_NAME",
"S3_SECRET_KEY"
]
| [] | ["S3_ACCESS_KEY", "SITE_NAME", "S3_SECRET_KEY"] | python | 3 | 0 | |
main_test.go | package bosh_windows_acceptance_tests_test
import (
"archive/zip"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"bytes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
"gopkg.in/yaml.v2"
)
func init() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
log.SetOutput(GinkgoWriter)
}
const BOSH_TIMEOUT = 90 * time.Minute
const GoZipFile = "go1.12.7.windows-amd64.zip"
const GolangURL = "https://storage.googleapis.com/golang/" + GoZipFile
const LgpoUrl = "https://download.microsoft.com/download/8/5/C/85C25433-A1B0-4FFA-9429-7E023E7DA8D8/LGPO.zip"
const lgpoFile = "LGPO.exe"
const redeployRetries = 10
type ManifestProperties struct {
DeploymentName string
ReleaseName string
AZ string
VmType string
RootEphemeralVmType string
VmExtensions string
Network string
StemcellOs string
StemcellVersion string
ReleaseVersion string
DefaultUsername string
DefaultPassword string
MountEphemeralDisk bool
SSHDisabledByDefault bool
SecurityComplianceApplied bool
}
type StemcellYML struct {
Version string `yaml:"version"`
Name string `yaml:"name"`
}
type Config struct {
Bosh struct {
CaCert string `json:"ca_cert"`
Client string `json:"client"`
ClientSecret string `json:"client_secret"`
Target string `json:"target"`
} `json:"bosh"`
Stemcellpath string `json:"stemcell_path"`
StemcellOs string `json:"stemcell_os"`
Az string `json:"az"`
VmType string `json:"vm_type"`
RootEphemeralVmType string `json:"root_ephemeral_vm_type"`
VmExtensions string `json:"vm_extensions"`
Network string `json:"network"`
DefaultUsername string `json:"default_username"`
DefaultPassword string `json:"default_password"`
SkipCleanup bool `json:"skip_cleanup"`
MountEphemeralDisk bool `json:"mount_ephemeral_disk"`
SkipMSUpdateTest bool `json:"skip_ms_update_test"`
SSHDisabledByDefault bool `json:"ssh_disabled_by_default"`
SecurityComplianceApplied bool `json:"security_compliance_applied"`
}
func NewConfig() (*Config, error) {
configFilePath := os.Getenv("CONFIG_JSON")
if configFilePath == "" {
return nil, fmt.Errorf("invalid config file path: %v", configFilePath)
}
body, err := ioutil.ReadFile(configFilePath)
if err != nil {
return nil, fmt.Errorf("empty config file path: %v", configFilePath)
}
var config Config
err = json.Unmarshal(body, &config)
if err != nil {
return nil, fmt.Errorf("unable to parse config file: %s: %s", err.Error(), string(body))
}
if config.StemcellOs == "" {
return nil, fmt.Errorf("missing required field: %v", "stemcell_os")
}
if config.VmExtensions == "" {
config.VmExtensions = "500GB_ephemeral_disk"
}
return &config, nil
}
type BoshCommand struct {
DirectorIP string
Client string
ClientSecret string
CertPath string // Path to CA CERT file, if any
Timeout time.Duration
}
func setupBosh(config *Config) *BoshCommand {
var boshCertPath string
cert := config.Bosh.CaCert
if cert != "" {
certFile, err := ioutil.TempFile("", "")
Expect(err).NotTo(HaveOccurred())
_, err = certFile.Write([]byte(cert))
Expect(err).NotTo(HaveOccurred())
boshCertPath, err = filepath.Abs(certFile.Name())
Expect(err).NotTo(HaveOccurred())
}
timeout := BOSH_TIMEOUT
var err error
if s := os.Getenv("BWATS_BOSH_TIMEOUT"); s != "" {
timeout, err = time.ParseDuration(s)
log.Printf("Using BWATS_BOSH_TIMEOUT (%s) as timeout\n", s)
if err != nil {
log.Printf("Error parsing BWATS_BOSH_TIMEOUT (%s): %s - falling back to default\n", s, err)
}
}
return &BoshCommand{
DirectorIP: config.Bosh.Target,
Client: config.Bosh.Client,
ClientSecret: config.Bosh.ClientSecret,
CertPath: boshCertPath,
Timeout: timeout,
}
}
func (c *BoshCommand) args(command string) []string {
args := strings.Split(command, " ")
args = append([]string{"-n", "-e", c.DirectorIP, "--client", c.Client, "--client-secret", c.ClientSecret}, args...)
if c.CertPath != "" {
args = append([]string{"--ca-cert", c.CertPath}, args...)
}
return args
}
func (c *BoshCommand) Run(command string) error {
return c.RunIn(command, "")
}
func (c *BoshCommand) RunInStdOut(command, dir string) ([]byte, error) {
cmd := exec.Command("bosh", c.args(command)...)
if dir != "" {
cmd.Dir = dir
log.Printf("\nRUNNING %q IN %q\n", strings.Join(cmd.Args, " "), dir)
} else {
log.Printf("\nRUNNING %q\n", strings.Join(cmd.Args, " "))
}
session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
if err != nil {
return nil, err
}
Eventually(session, c.Timeout).Should(gexec.Exit())
exitCode := session.ExitCode()
stdout := session.Out.Contents()
if exitCode != 0 {
var stderr []byte
if session.Err != nil {
stderr = session.Err.Contents()
}
return stdout, fmt.Errorf("Non-zero exit code for cmd %q: %d\nSTDERR:\n%s\nSTDOUT:%s\n",
strings.Join(cmd.Args, " "), exitCode, stderr, stdout)
}
return stdout, nil
}
func (c *BoshCommand) RunIn(command, dir string) error {
_, err := c.RunInStdOut(command, dir)
return err
}
var (
bosh *BoshCommand
deploymentName string
manifestPath string
stemcellName string
stemcellVersion string
releaseVersion string
tightLoopStemcellVersions []string
config *Config
)
var _ = Describe("BOSH Windows", func() {
BeforeSuite(func() {
var err error
config, err = NewConfig()
Expect(err).NotTo(HaveOccurred())
bosh = setupBosh(config)
bosh.Run("login")
deploymentName = fmt.Sprintf("windows-acceptance-test-%d", getTimestampInMs())
stemcellYML, err := fetchStemcellInfo(config.Stemcellpath)
Expect(err).NotTo(HaveOccurred())
stemcellName = stemcellYML.Name
stemcellVersion = stemcellYML.Version
releaseVersion = createBwatsRelease(bosh)
uploadStemcell(config, bosh)
err = config.deploy(bosh, deploymentName, stemcellVersion, releaseVersion)
Expect(err).NotTo(HaveOccurred())
})
AfterSuite(func() {
// Delete the releases created by the tight loop test
for index, version := range tightLoopStemcellVersions {
if index == len(tightLoopStemcellVersions)-1 {
continue // Last release is still being used by the deployment, so it cannot be deleted yet
}
bosh.Run(fmt.Sprintf("delete-release bwats-release/%s", version))
}
if config.SkipCleanup {
return
}
bosh.Run(fmt.Sprintf("-d %s delete-deployment --force", deploymentName))
bosh.Run(fmt.Sprintf("delete-stemcell %s/%s", stemcellName, stemcellVersion))
bosh.Run(fmt.Sprintf("delete-release bwats-release/%s", releaseVersion))
if len(tightLoopStemcellVersions) != 0 {
bosh.Run(fmt.Sprintf("delete-release bwats-release/%s", tightLoopStemcellVersions[len(tightLoopStemcellVersions)-1]))
}
if bosh.CertPath != "" {
os.RemoveAll(bosh.CertPath)
}
})
It("can run a job that relies on a package", func() {
time.Sleep(60 * time.Second)
Eventually(downloadLogs("check-multiple", "simple-job", 0, bosh),
time.Second*65).Should(gbytes.Say("60 seconds passed"))
})
It("successfully runs redeploy in a tight loop", func() {
pwd, err := os.Getwd()
Expect(err).To(BeNil())
releaseDir := filepath.Join(pwd, "assets", "bwats-release")
f, err := os.OpenFile(filepath.Join(releaseDir, "jobs", "simple-job", "templates", "pre-start.ps1"),
os.O_APPEND|os.O_WRONLY, 0600)
Expect(err).ToNot(HaveOccurred())
defer f.Close()
for i := 0; i < redeployRetries; i++ {
log.Printf("Redeploy attempt: #%d\n", i)
version := fmt.Sprintf("0.dev+%d", getTimestampInMs())
tightLoopStemcellVersions = append(tightLoopStemcellVersions, version)
Expect(bosh.RunIn("create-release --force --version "+version, releaseDir)).To(Succeed())
Expect(bosh.RunIn("upload-release", releaseDir)).To(Succeed())
err = config.deploy(bosh, deploymentName, stemcellVersion, version)
if err != nil {
downloadLogs("check-multiple", "simple-job", 0, bosh)
Fail(err.Error())
}
}
})
It("checks system dependencies and security, auto update has turned off, currently has a Service StartType of 'Manual' and initially had a StartType of 'Delayed', and password is randomized", func() {
err := runTest("check-system")
Expect(err).NotTo(HaveOccurred())
})
It("is fully updated", func() { // 860s
if config.SkipMSUpdateTest {
Skip("Skipping check-updates test - SkipMSUpdateTest set to true")
} else {
err := runTest("check-updates")
Expect(err).NotTo(HaveOccurred())
}
})
It("has all certificate authority certs that are present on the Windows Update Server", func() {
err := runTest("check-wu-certs")
Expect(err).NotTo(HaveOccurred())
})
It("mounts ephemeral disks when asked to do so and does not mount them otherwise", func() {
err := runTest("ephemeral-disk")
Expect(err).NotTo(HaveOccurred())
})
Context("slow compiling go package", func() {
var slowCompilingDeploymentName string
AfterEach(func() {
bosh.Run(fmt.Sprintf("-d %s delete-deployment --force", slowCompilingDeploymentName))
})
It("deploys when there is a slow to compile go package", func() {
pwd, err := os.Getwd()
Expect(err).NotTo(HaveOccurred())
manifestPath = filepath.Join(pwd, "assets", "slow-compile-manifest.yml")
slowCompilingDeploymentName = fmt.Sprintf("windows-acceptance-test-slow-compile-%d", getTimestampInMs())
config.deployWithManifest(bosh, slowCompilingDeploymentName, stemcellVersion, releaseVersion, manifestPath)
})
})
Context("ssh enabled", func() {
It("allows SSH connection", func() {
err := bosh.Run(fmt.Sprintf("-d %s ssh --opts=-T --command=exit", deploymentName))
Expect(err).NotTo(HaveOccurred())
})
It("cleans up ssh users after a successful connection", func() {
err := bosh.Run(fmt.Sprintf("-d %s ssh --opts=-T --command=exit", deploymentName))
Expect(err).NotTo(HaveOccurred())
err = runTest("check-ssh") // test for C:\Users only having one ssh user, net users only containing one ssh user.
Expect(err).NotTo(HaveOccurred())
})
})
})
func runTest(testName string) error {
return bosh.Run(fmt.Sprintf("-d %s run-errand --download-logs %s --tty", deploymentName, testName))
}
func uploadStemcell(config *Config, bosh *BoshCommand) {
matches, err := filepath.Glob(config.Stemcellpath)
Expect(err).NotTo(HaveOccurred())
Expect(matches).To(HaveLen(1))
for {
// the ami may not be immediately available, so we retry every three minutes.
// if it is actually broken, the concourse timeout will kick in at 90 minutes.
err = bosh.Run(fmt.Sprintf("upload-stemcell %s", matches[0]))
if err != nil {
time.Sleep(3 * time.Minute)
} else {
break
}
}
Expect(err).NotTo(HaveOccurred())
}
func createBwatsRelease(bosh *BoshCommand) string {
pwd, err := os.Getwd()
Expect(err).NotTo(HaveOccurred())
releaseVersion = fmt.Sprintf("0.dev+%d", getTimestampInMs())
var goZipPath string
if _, err := os.Stat(filepath.Join(pwd, GoZipFile)); os.IsNotExist(err) {
goZipPath, err = downloadFile("golang-", GolangURL)
Expect(err).NotTo(HaveOccurred())
} else {
goZipPath = filepath.Join(pwd, GoZipFile)
}
releaseDir := filepath.Join(pwd, "assets", "bwats-release")
Expect(bosh.RunIn(fmt.Sprintf("add-blob %s golang-windows/%s", goZipPath, GoZipFile), releaseDir)).To(Succeed())
var lgpoZipPath string
if _, err := os.Stat(filepath.Join(pwd, "LGPO.zip")); os.IsNotExist(err) {
lgpoZipPath, err = downloadFile("lgpo-", LgpoUrl)
Expect(err).NotTo(HaveOccurred())
} else {
lgpoZipPath = filepath.Join(pwd, "LGPO.zip")
}
zipReader, err := zip.OpenReader(lgpoZipPath)
Expect(err).NotTo(HaveOccurred())
lgpoPath, err := ioutil.TempFile("", lgpoFile)
Expect(err).NotTo(HaveOccurred())
for _, zipFile := range zipReader.File {
if zipFile.Name == "LGPO_30/"+lgpoFile {
filename := lgpoPath.Name()
f, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, zipFile.Mode())
Expect(err).NotTo(HaveOccurred())
zipRC, err := zipFile.Open()
Expect(err).NotTo(HaveOccurred())
_, err = io.Copy(f, zipRC)
Expect(err).NotTo(HaveOccurred())
err = f.Close()
Expect(err).NotTo(HaveOccurred())
err = zipRC.Close()
Expect(err).NotTo(HaveOccurred())
}
}
Expect(lgpoPath.Name()).To(BeAnExistingFile())
Expect(bosh.RunIn(fmt.Sprintf("add-blob %s lgpo/%s", lgpoPath.Name(), lgpoFile), releaseDir)).To(Succeed())
Expect(bosh.RunIn(fmt.Sprintf("create-release --force --version %s", releaseVersion), releaseDir)).To(Succeed())
Expect(bosh.RunIn("upload-release", releaseDir)).To(Succeed())
return releaseVersion
}
func (m ManifestProperties) toVarsString() string {
manifest := m.toMap()
fmtString := "-v %s=%s "
var b bytes.Buffer
for k, v := range manifest {
if v != "" {
_, err := fmt.Fprintf(&b, fmtString, k, v)
Expect(err).NotTo(HaveOccurred())
}
}
boolOperators := []string{
fmt.Sprintf("-v MountEphemeralDisk=%t", m.MountEphemeralDisk),
fmt.Sprintf("-v SSHDisabledByDefault=%t", m.SSHDisabledByDefault),
fmt.Sprintf("-v SecurityComplianceApplied=%t", m.SecurityComplianceApplied),
}
_, err := fmt.Fprintf(&b, strings.Join(boolOperators, " "))
Expect(err).NotTo(HaveOccurred())
return b.String()
}
func (m ManifestProperties) toMap() map[string]string {
manifest := make(map[string]string)
manifest["DeploymentName"] = m.DeploymentName
manifest["ReleaseName"] = m.ReleaseName
manifest["AZ"] = m.AZ
manifest["VmType"] = m.VmType
manifest["RootEphemeralVmType"] = m.RootEphemeralVmType
manifest["VmExtensions"] = m.VmExtensions
manifest["Network"] = m.Network
manifest["StemcellOs"] = m.StemcellOs
manifest["StemcellVersion"] = m.StemcellVersion
manifest["ReleaseVersion"] = m.ReleaseVersion
manifest["DefaultUsername"] = m.DefaultUsername
manifest["DefaultPassword"] = m.DefaultPassword
return manifest
}
func downloadLogs(instanceName string, jobName string, index int, bosh *BoshCommand) *gbytes.Buffer {
tempDir, err := ioutil.TempDir("", "")
Expect(err).NotTo(HaveOccurred())
defer os.RemoveAll(tempDir)
err = bosh.Run(fmt.Sprintf("-d %s logs %s/%d --dir %s", deploymentName, instanceName, index, tempDir))
Expect(err).NotTo(HaveOccurred())
matches, err := filepath.Glob(filepath.Join(tempDir, fmt.Sprintf("%s.%s.%d-*.tgz", deploymentName, instanceName, index)))
Expect(err).NotTo(HaveOccurred())
Expect(matches).To(HaveLen(1))
cmd := exec.Command("tar", "xf", matches[0], "-O", fmt.Sprintf("./%s/%s/job-service-wrapper.out.log", jobName, jobName))
session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
return session.Wait().Out
}
func getTimestampInMs() int64 {
return time.Now().UTC().UnixNano() / int64(time.Millisecond)
}
func fetchStemcellInfo(stemcellPath string) (StemcellYML, error) {
var stemcellInfo StemcellYML
tempDir, err := ioutil.TempDir("", "")
Expect(err).NotTo(HaveOccurred())
defer os.RemoveAll(tempDir)
cmd := exec.Command("tar", "xf", stemcellPath, "-C", tempDir, "stemcell.MF")
session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
Eventually(session, 20*time.Minute).Should(gexec.Exit())
exitCode := session.ExitCode()
if exitCode != 0 {
var stderr []byte
if session.Err != nil {
stderr = session.Err.Contents()
}
stdout := session.Out.Contents()
return stemcellInfo, fmt.Errorf("Non-zero exit code for cmd %q: %d\nSTDERR:\n%s\nSTDOUT:%s\n",
strings.Join(cmd.Args, " "), exitCode, stderr, stdout)
}
stemcellMF, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", tempDir, "stemcell.MF"))
Expect(err).NotTo(HaveOccurred())
err = yaml.Unmarshal(stemcellMF, &stemcellInfo)
Expect(err).NotTo(HaveOccurred())
Expect(stemcellInfo.Version).ToNot(BeNil())
Expect(stemcellInfo.Version).ToNot(BeEmpty())
return stemcellInfo, nil
}
func downloadFile(prefix, sourceUrl string) (string, error) {
tempfile, err := ioutil.TempFile("", prefix)
if err != nil {
return "", err
}
filename := tempfile.Name()
f, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)
if err != nil {
return "", err
}
defer f.Close()
res, err := http.Get(sourceUrl)
if err != nil {
return "", err
}
defer res.Body.Close()
if _, err := io.Copy(f, res.Body); err != nil {
return "", err
}
return filename, nil
}
func (c *Config) deployWithManifest(bosh *BoshCommand, deploymentName string, stemcellVersion string, bwatsVersion string, manifestPath string) error {
manifestProperties := ManifestProperties{
DeploymentName: deploymentName,
ReleaseName: "bwats-release",
AZ: c.Az,
VmType: c.VmType,
RootEphemeralVmType: c.RootEphemeralVmType,
VmExtensions: c.VmExtensions,
Network: c.Network,
DefaultUsername: c.DefaultUsername,
DefaultPassword: c.DefaultPassword,
StemcellOs: c.StemcellOs,
StemcellVersion: fmt.Sprintf(`"%s"`, stemcellVersion),
ReleaseVersion: bwatsVersion,
MountEphemeralDisk: c.MountEphemeralDisk,
SSHDisabledByDefault: c.SSHDisabledByDefault,
SecurityComplianceApplied: c.SecurityComplianceApplied,
}
var err error
if c.RootEphemeralVmType != "" {
pwd, err := os.Getwd()
Expect(err).NotTo(HaveOccurred())
opsFilePath := filepath.Join(pwd, "assets", "root-disk-as-ephemeral.yml")
err = bosh.Run(fmt.Sprintf(
"-d %s deploy %s -o %s %s",
deploymentName,
manifestPath,
opsFilePath,
manifestProperties.toVarsString(),
))
} else {
err = bosh.Run(fmt.Sprintf("-d %s deploy %s %s", deploymentName, manifestPath, manifestProperties.toVarsString()))
}
if err != nil {
return err
}
return nil
}
func (c *Config) deploy(bosh *BoshCommand, deploymentName string, stemcellVersion string, bwatsVersion string) error {
pwd, err := os.Getwd()
Expect(err).NotTo(HaveOccurred())
manifestPath = filepath.Join(pwd, "assets", "manifest.yml")
return c.deployWithManifest(bosh, deploymentName, stemcellVersion, bwatsVersion, manifestPath)
}
| [
"\"CONFIG_JSON\"",
"\"BWATS_BOSH_TIMEOUT\""
]
| []
| [
"BWATS_BOSH_TIMEOUT",
"CONFIG_JSON"
]
| [] | ["BWATS_BOSH_TIMEOUT", "CONFIG_JSON"] | go | 2 | 0 | |
library/dfa-lib-python/dfa_lib_python/task.py | import requests
import os
from .ProvenanceObject import ProvenanceObject
from .dependency import Dependency
from .task_status import TaskStatus
from .dataset import DataSet
from .performance import Performance
from datetime import datetime
dfa_url = os.environ.get('DFA_URL',"http://localhost:22000/")
class Task(ProvenanceObject):
"""
This class defines a dataflow task.
Attributes:
- id (:obj:`str`): Task Id.
- dataflow_tag (:obj:`str`): Dataflow tag.
- transformation_tag (:obj:`str`): Transformation tag.
- sub_id (:obj:`str`, optional): Task Sub Id.
- dependency (:obj:`Task`): Task which the object has a dependency.
- workspace (:obj:`str`, optional): Task workspace.
- resource (:obj:`str`, optional): Task resource.
- output (:obj:`str`, optional): Task output.
- error (:obj:`str`, optional): Task error.
"""
def __init__(self, id, dataflow_tag, transformation_tag,
sub_id="", dependency=None, workspace="", resource="",
output="", error=""):
ProvenanceObject.__init__(self, transformation_tag)
self._workspace = workspace
self._resource = resource
self._dependency = ""
self._output = output
self._error = error
self._sets = []
self._status = TaskStatus.READY.value
self._dataflow = dataflow_tag.lower()
self._transformation = transformation_tag.lower()
self._id = str(id)
self._sub_id = sub_id
self._performances = []
self.dfa_url = dfa_url
self.start_time = None
self.end_time = None
if isinstance(dependency, Task):
dependency = Dependency([dependency._tag], [dependency._id])
self._dependency = dependency.get_specification()
def add_dependency(self, dependency):
""" Add a dependency to the Task.
Args:
- dependency (:obj:`Dependency`): A :obj:`Dependency` object.
"""
assert isinstance(dependency, Dependency), \
"The dependency must be valid."
self._dependency = dependency.get_specification()
def set_datasets(self, datasets):
""" Set the Task DataSets.
Args:
- dataset (:obj:`list`): A :obj:`list` containing :obj:`DataSet` objects.
"""
assert isinstance(datasets, list), \
"The parameter must be a list."
for dataset in datasets:
self.add_dataset(dataset)
def add_dataset(self, dataset):
""" Add a dataset to the Task.
Args:
- dataset (:obj:`DataSet`): A :obj:`DataSet` object.
"""
assert isinstance(dataset, DataSet), "The dataset must be valid."
self._sets.append(dataset.get_specification())
def set_status(self, status):
""" Change the Task Status.
Args:
- status (:obj:`TaskStatus`): A :obj:`TaskStatus` object.
"""
assert isinstance(status, TaskStatus), \
"The task status must be valid."
self._status = status.value
def begin(self):
""" Send a post request to the Dataflow Analyzer API to store the Task.
"""
self.set_status(TaskStatus.RUNNING)
self.start_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.save()
def end(self):
""" Send a post request to the Dataflow Analyzer API to store the Task.
"""
self.set_status(TaskStatus.FINISHED)
self.end_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
performance = Performance(self.start_time, self.end_time)
self._performances.append(performance.get_specification())
self.save()
def save(self):
""" Send a post request to the Dataflow Analyzer API to store the Task.
"""
url = dfa_url + '/pde/task/json'
message = self.get_specification()
r = requests.post(url, json=message)
print(r.status_code)
| []
| []
| [
"DFA_URL"
]
| [] | ["DFA_URL"] | python | 1 | 0 | |
avocadobites/avocadobites/asgi.py | """
ASGI config for avocadobites project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'avocadobites.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
config/config_test.go | package config
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestLoadConfig(t *testing.T) {
sc := &SafeConfig{
C: &List{},
}
err := sc.Load("testdata/good.yaml")
if err != nil {
t.Errorf("Error loading config %v: %v", "good.yml", err)
}
c := sc.C
assert.NoError(t, err)
assert.Len(t, c.Config, 3)
assert.Equal(t, c.Config[0].Name, "server2")
assert.Equal(t, c.Config[0].LogFile, "examples/server2.status")
assert.Equal(t, c.Config[1].Name, "server3")
assert.Equal(t, c.Config[2].Name, "client")
}
func TestLoadBadConfigs(t *testing.T) {
sc := &SafeConfig{
C: &List{},
}
tests := []struct {
ConfigFile string
ExpectedError string
}{
{
ConfigFile: "testdata/bad_config1.yaml",
ExpectedError: "error parsing config file: yaml: unmarshal errors:\n line 2: cannot unmarshal !!map into []config.Config",
},
{
ConfigFile: "testdata/bad_config2.yaml",
ExpectedError: "error parsing config file: config:logfile is required",
},
{
ConfigFile: "testdata/bad_config3.yaml",
ExpectedError: "error parsing config file: config:name is required",
},
{
ConfigFile: "testdata/bad_config4.yaml",
ExpectedError: "error parsing config file: yaml: unmarshal errors:\n line 4: field blah not found in type config.plain",
},
}
for i, test := range tests {
err := sc.Load(test.ConfigFile)
if err == nil {
t.Errorf("In case %v:\nExpected:\n%v\nGot:\nnil", i, test.ExpectedError)
continue
}
if err.Error() != test.ExpectedError {
t.Errorf("In case %v:\nExpected:\n%v\nGot:\n%v", i, test.ExpectedError, err.Error())
}
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
tests/integration/parity/conftest.py | import os
import pytest
import tempfile
import zipfile
from eth_utils import (
is_checksum_address,
is_dict,
)
from .install_parity import (
get_executable_path,
install_parity,
)
from .utils import (
get_process,
)
KEYFILE_PW = 'web3py-test'
PARITY_2_5_13_FIXTURE = {
'zip': 'parity-2.5.13-fixture.zip',
'coinbase': 'dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd',
'block_hash_revert_no_msg': '0x0496eca70b312db0d7b14753f5545e48f43ad70e8a2dd4fa5dc6c6409448c394', # noqa: E501
'block_hash_revert_with_msg': '0xf117005506c338177df21519008e3805cb718c2cad5f4a4c07754bf59bfd996c', # noqa: E501
'block_hash_with_log': '0xeb1cd5061f8e8c0a1a3b75dd48600fa126b6c0e753406276f2a6a544f1e41ec1',
'block_with_txn_hash': '0xabf9142af100741620657e5a9a46e788c776303ca4986478f892384b83d8761c',
'emitter_address': '0x4aA591a07989b4F810E2F5cE97e769D60710f168',
'emitter_deploy_txn_hash': '0xef44cd36d86c41640c710026acf45f3b63731f72ac2a1744a005f6690bfa7613',
'empty_block_hash': '0x63b0e0f16340e65662948cde20ba93c96ae311997db672b0101357f15b071fe5',
'keyfile_pw': 'web3py-test',
'math_address': '0xd794C821fCCFF5D96F5Db44af7e29977630A9dc2',
'math_deploy_txn_hash': '0x356278504f40db914545888674cedc0d8ccd4a939665eb4ab83b569db0c477cb',
'mined_txn_hash': '0x86fbfe56cce542ff0a2a2716c31675a0c9c43701725c4a751d20ee2ddf8a733d',
'raw_txn_account': '0x39EEed73fb1D3855E90Cbd42f348b3D7b340aAA6',
'revert_address': '0x14F3674571D76Bf66cA8EBD84dC02060933400b4',
'txn_hash_with_log': '0x1407ae0fbc79622e60c21b59b0cb047a5f8d0219ad95969096c8c0e23f342f5c'}
@pytest.fixture(scope='module')
def parity_binary():
if 'PARITY_BINARY' in os.environ:
return os.environ['PARITY_BINARY']
elif 'PARITY_VERSION' in os.environ:
parity_version = os.environ['PARITY_VERSION']
_parity_binary = get_executable_path(parity_version)
if not os.path.exists(_parity_binary):
install_parity(parity_version)
assert os.path.exists(_parity_binary)
return _parity_binary
else:
return 'parity'
def get_parity_version(parity_binary):
pass
@pytest.fixture(scope="module")
def parity_fixture_data(parity_binary):
return PARITY_2_5_13_FIXTURE
@pytest.fixture(scope='module')
def datadir(tmpdir_factory, parity_fixture_data):
zipfile_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'..',
parity_fixture_data['zip'],
))
base_dir = tmpdir_factory.mktemp('parity')
tmp_datadir = os.path.join(str(base_dir), 'datadir')
with zipfile.ZipFile(zipfile_path, 'r') as zip_ref:
zip_ref.extractall(tmp_datadir)
return tmp_datadir
@pytest.fixture(scope="module")
def author(parity_fixture_data):
# need the address to unlock before web3 module has been opened
author = parity_fixture_data['coinbase']
return author
@pytest.fixture(scope="module")
def passwordfile():
password_dir = tempfile.mkdtemp()
password_path = os.path.join(password_dir, 'password')
with open(password_path, 'w') as f:
f.write(KEYFILE_PW)
yield password_path
if os.path.exists(password_path):
os.remove(password_path)
@pytest.fixture(scope="module")
def parity_process(parity_command_arguments):
yield from get_process(parity_command_arguments)
@pytest.fixture(scope="module")
def parity_import_blocks_process(parity_import_blocks_command):
yield from get_process(parity_import_blocks_command, terminates=True)
@pytest.fixture(scope='module')
def coinbase(web3):
return web3.eth.coinbase
@pytest.fixture(scope="module")
def math_contract_deploy_txn_hash(parity_fixture_data):
return parity_fixture_data['math_deploy_txn_hash']
@pytest.fixture(scope="module")
def math_contract(web3, math_contract_factory, parity_fixture_data):
return math_contract_factory(address=parity_fixture_data['math_address'])
@pytest.fixture()
def math_contract_address(math_contract, address_conversion_func):
return address_conversion_func(math_contract.address)
@pytest.fixture(scope="module")
def emitter_contract(web3, emitter_contract_factory, parity_fixture_data):
return emitter_contract_factory(address=parity_fixture_data['emitter_address'])
@pytest.fixture()
def emitter_contract_address(emitter_contract, address_conversion_func):
return address_conversion_func(emitter_contract.address)
@pytest.fixture(scope="module")
def unlocked_account(web3, unlockable_account, unlockable_account_pw):
yield unlockable_account
@pytest.fixture(scope='module')
def unlockable_account_pw(parity_fixture_data):
return parity_fixture_data['keyfile_pw']
@pytest.fixture(scope="module")
def unlockable_account(web3, coinbase):
yield coinbase
@pytest.fixture()
def unlockable_account_dual_type(unlockable_account, address_conversion_func):
return address_conversion_func(unlockable_account)
@pytest.fixture
def unlocked_account_dual_type(unlockable_account_dual_type):
return unlockable_account_dual_type
@pytest.fixture(scope="module")
def funded_account_for_raw_txn(parity_fixture_data):
account = parity_fixture_data['raw_txn_account']
assert is_checksum_address(account)
return account
@pytest.fixture(scope="module")
def empty_block(web3, parity_fixture_data):
block = web3.eth.get_block(parity_fixture_data['empty_block_hash'])
assert is_dict(block)
return block
@pytest.fixture(scope="module")
def block_with_txn(web3, parity_fixture_data):
block = web3.eth.get_block(parity_fixture_data['block_with_txn_hash'])
assert is_dict(block)
return block
@pytest.fixture(scope="module")
def mined_txn_hash(parity_fixture_data):
return parity_fixture_data['mined_txn_hash']
@pytest.fixture(scope="module")
def block_with_txn_with_log(web3, parity_fixture_data):
block = web3.eth.get_block(parity_fixture_data['block_hash_with_log'])
assert is_dict(block)
return block
@pytest.fixture(scope="module")
def txn_hash_with_log(parity_fixture_data):
return parity_fixture_data['txn_hash_with_log']
@pytest.fixture(scope="module")
def txn_filter_params(coinbase):
return {
"fromBlock": "earliest",
"toBlock": "latest",
"fromAddress": [coinbase],
}
@pytest.fixture(scope="module")
def block_hash_revert_no_msg(parity_fixture_data):
return parity_fixture_data['block_hash_revert_no_msg']
@pytest.fixture(scope="module")
def block_hash_revert_with_msg(parity_fixture_data):
return parity_fixture_data['block_hash_revert_with_msg']
@pytest.fixture(scope="module")
def revert_contract(revert_contract_factory, parity_fixture_data):
return revert_contract_factory(address=parity_fixture_data['revert_address'])
| []
| []
| [
"PARITY_VERSION",
"PARITY_BINARY"
]
| [] | ["PARITY_VERSION", "PARITY_BINARY"] | python | 2 | 0 | |
ezsub/const.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import platform
from pathlib import Path
from datetime import datetime
# setup.py and pypi will remove leading zero in month and day.
# But we keep leading zeroes in calver like YYYY.MM.DD also in changelog and tags.
# In what's new this format is tested
__version__ = '2021.09.07'
SEMVER = '0.4.3'
CALVER = __version__
__author__ = 'Zaman'
__contact__ = '[email protected]'
__url__ = 'http://github.com/7aman/ezsub'
__license__ = 'MIT'
def get_root(OS):
if OS == 'Windows':
root = Path(os.environ['PROGRAMDATA']).joinpath('ezsub')
elif OS == 'Linux':
root = Path().home().joinpath('.ezsub')
else:
root = Path().home().joinpath('.ezsub')
root.mkdir(parents=True, exist_ok=True)
return root
def get_destination():
return Path().home().joinpath('Downloads', 'ezsub').resolve()
PROGRAMNAME = 'ezsub'
OS = platform.system()
ROOT = get_root(OS)
HISTORY = ROOT.joinpath('history.txt')
CONFIGFILE = ROOT.joinpath('user.conf')
LOGFILE = ROOT.joinpath('ezsub.log')
LOGLEVEL = 'INFO'
LOGFILEMODE = 'a'
LOGFORMAT = "[%(asctime)s][%(levelname)s]{%(name)s:%(lineno)d}# %(message)s"
DESTINATION = get_destination()
MIRRORS = ['subscene', 'hasti', 'subf2m', 'delta']
SITE = MIRRORS[0]
AUTO_SELECT = False
OPEN_AFTER = True
GROUP = True
LNGS = 'en'
PERIOD = 7 # days delay to remind for update
BOOLEAN_STATES = {
'0': False,
'false': False,
'False': False,
'no': False,
'No': False,
'1': True,
'true': True,
'True': True,
'yes': True,
'Yes': True,
}
LANGUAGE_PAIRS = {
"ar": "arabic",
"da": "danish",
"en": "english",
"es": "spanish",
"fa": "farsi_persian",
"fr": "french",
"he": "hebrew",
"id": "indonesian",
"it": "italian",
"no": "norwegian",
"sv": "swedish",
"vi": "vietnamese",
"big5": "big_5_code",
}
SUPPORTED_LNGS = ["*",] + list(LANGUAGE_PAIRS.keys())[:]
TODAY = datetime.now()
TODAY_STAMP = str(int(TODAY.timestamp()))
TODAY_TIMEADD = TODAY.strftime('%Y%m%d-%H%M')
def valid_boolean(value):
if value in BOOLEAN_STATES.keys():
return True
return False
def valid_site(value):
"""site value can be space separated site names"""
sites = value.split()
for site in sites:
if site not in MIRRORS:
return False
return True
def valid_lngs_string(value):
for lng in value.split():
if lng not in SUPPORTED_LNGS:
return False
return True
def valid_destination(value):
path = Path(value).resolve()
if path.exists():
if not path.is_dir():
return False
return True
def valid_captcha(value):
return True
def valid_reminder(value):
try:
value = int(value)
# zero means never
return value > -1
except ValueError:
return False
def valid_timestamp(value):
try:
value = int(value)
return value > 0
except ValueError:
return False
SETTINGS_SKELETON = {
'Defaults': {
'open_after': valid_boolean,
'auto_select': valid_boolean,
'group': valid_boolean,
'site': valid_site,
'lngs': valid_lngs_string,
'destination': valid_destination
},
'Login': {
'captcha': valid_captcha
},
'Update': {
"remind_every": valid_reminder,
"last_check": valid_timestamp
}
}
# requests timeout
TIMEOUT = 5
RETRY = 3
SIGNS = [
"Temporary unavailable",
"Request Timeout",
"please retry a few minutes later.",
'Backend server error',
'many requests'
]
BAD = [
"Bad request"
]
MAX_WORKERS = 8
# colors and text formats
class Style:
END = '\033[0m'
BOLD = '\033[1m'
ITALIC = '\033[3m'
UNDERSCORE = '\033[4m'
URL = UNDERSCORE
REVERSE = '\033[7m'
TITLE = REVERSE
CROSSED = '\033[9m'
OVERLINED = '\033[53m'
BLUE = '\033[34m'
RED = '\033[91m'
FAIL = RED
ERROR = RED
GREEN = '\033[92m'
SUCCESS = GREEN
OK = GREEN
YELLOW = '\033[93m'
WARNING = YELLOW
WARN = YELLOW
INFO = '\033[94m'
def __init__(self):
if OS == "Windows":
os.system('color')
def demo(self):
print("------start demo----")
variables = dir(self)
for a in variables:
try:
print(getattr(self, a) + a + self.END)
print()
except:
break
print("------stop demo----")
def render(self, styles, text):
ss = []
for s in styles.split(';'):
if hasattr(self, s.upper()):
ss.append(getattr(self, s.upper()))
ss.append(str(text))
ss.append(self.END)
return "".join(ss)
class Curser:
LU = '\033[F'
SAVE = '\033[s'
RESTORE = '\033[u'
CFH = '\033[0K'
CTH = '\033[1K'
CL = '\033[2K'
| []
| []
| [
"PROGRAMDATA"
]
| [] | ["PROGRAMDATA"] | python | 1 | 0 | |
venv/Lib/site-packages/matplotlib/backends/backend_qt5.py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import functools
import os
import re
import signal
import sys
from six import unichr
import traceback
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
TimerBase, cursors, ToolContainerBase, StatusbarBase)
import matplotlib.backends.qt_editor.figureoptions as figureoptions
from matplotlib.backends.qt_editor.formsubplottool import UiSubplotTool
from matplotlib.figure import Figure
from matplotlib.backend_managers import ToolManager
from matplotlib import backend_tools
from .qt_compat import (
QtCore, QtGui, QtWidgets, _getSaveFileName, is_pyqt5, __version__, QT_API)
backend_version = __version__
# SPECIAL_KEYS are keys that do *not* return their unicode name
# instead they have manually specified names
SPECIAL_KEYS = {QtCore.Qt.Key_Control: 'control',
QtCore.Qt.Key_Shift: 'shift',
QtCore.Qt.Key_Alt: 'alt',
QtCore.Qt.Key_Meta: 'super',
QtCore.Qt.Key_Return: 'enter',
QtCore.Qt.Key_Left: 'left',
QtCore.Qt.Key_Up: 'up',
QtCore.Qt.Key_Right: 'right',
QtCore.Qt.Key_Down: 'down',
QtCore.Qt.Key_Escape: 'escape',
QtCore.Qt.Key_F1: 'f1',
QtCore.Qt.Key_F2: 'f2',
QtCore.Qt.Key_F3: 'f3',
QtCore.Qt.Key_F4: 'f4',
QtCore.Qt.Key_F5: 'f5',
QtCore.Qt.Key_F6: 'f6',
QtCore.Qt.Key_F7: 'f7',
QtCore.Qt.Key_F8: 'f8',
QtCore.Qt.Key_F9: 'f9',
QtCore.Qt.Key_F10: 'f10',
QtCore.Qt.Key_F11: 'f11',
QtCore.Qt.Key_F12: 'f12',
QtCore.Qt.Key_Home: 'home',
QtCore.Qt.Key_End: 'end',
QtCore.Qt.Key_PageUp: 'pageup',
QtCore.Qt.Key_PageDown: 'pagedown',
QtCore.Qt.Key_Tab: 'tab',
QtCore.Qt.Key_Backspace: 'backspace',
QtCore.Qt.Key_Enter: 'enter',
QtCore.Qt.Key_Insert: 'insert',
QtCore.Qt.Key_Delete: 'delete',
QtCore.Qt.Key_Pause: 'pause',
QtCore.Qt.Key_SysReq: 'sysreq',
QtCore.Qt.Key_Clear: 'clear', }
# define which modifier keys are collected on keyboard events.
# elements are (mpl names, Modifier Flag, Qt Key) tuples
SUPER = 0
ALT = 1
CTRL = 2
SHIFT = 3
MODIFIER_KEYS = [('super', QtCore.Qt.MetaModifier, QtCore.Qt.Key_Meta),
('alt', QtCore.Qt.AltModifier, QtCore.Qt.Key_Alt),
('ctrl', QtCore.Qt.ControlModifier, QtCore.Qt.Key_Control),
('shift', QtCore.Qt.ShiftModifier, QtCore.Qt.Key_Shift),
]
if sys.platform == 'darwin':
# in OSX, the control and super (aka cmd/apple) keys are switched, so
# switch them back.
SPECIAL_KEYS.update({QtCore.Qt.Key_Control: 'cmd', # cmd/apple key
QtCore.Qt.Key_Meta: 'control',
})
MODIFIER_KEYS[0] = ('cmd', QtCore.Qt.ControlModifier,
QtCore.Qt.Key_Control)
MODIFIER_KEYS[2] = ('ctrl', QtCore.Qt.MetaModifier,
QtCore.Qt.Key_Meta)
cursord = {
cursors.MOVE: QtCore.Qt.SizeAllCursor,
cursors.HAND: QtCore.Qt.PointingHandCursor,
cursors.POINTER: QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION: QtCore.Qt.CrossCursor,
cursors.WAIT: QtCore.Qt.WaitCursor,
}
# make place holder
qApp = None
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one.
"""
global qApp
if qApp is None:
app = QtWidgets.QApplication.instance()
if app is None:
# check for DISPLAY env variable on X11 build of Qt
if is_pyqt5():
try:
from PyQt5 import QtX11Extras
is_x11_build = True
except ImportError:
is_x11_build = False
else:
is_x11_build = hasattr(QtGui, "QX11Info")
if is_x11_build:
display = os.environ.get('DISPLAY')
if display is None or not re.search(r':\d', display):
raise RuntimeError('Invalid DISPLAY variable')
qApp = QtWidgets.QApplication([b"matplotlib"])
qApp.lastWindowClosed.connect(qApp.quit)
else:
qApp = app
if is_pyqt5():
try:
qApp.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
qApp.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
except AttributeError:
pass
def _allow_super_init(__init__):
"""
Decorator for ``__init__`` to allow ``super().__init__`` on PyQt4/PySide2.
"""
if QT_API == "PyQt5":
return __init__
else:
# To work around lack of cooperative inheritance in PyQt4, PySide,
# and PySide2, when calling FigureCanvasQT.__init__, we temporarily
# patch QWidget.__init__ by a cooperative version, that first calls
# QWidget.__init__ with no additional arguments, and then finds the
# next class in the MRO with an __init__ that does support cooperative
# inheritance (i.e., not defined by the PyQt4, PySide, PySide2, sip
# or Shiboken packages), and manually call its `__init__`, once again
# passing the additional arguments.
qwidget_init = QtWidgets.QWidget.__init__
def cooperative_qwidget_init(self, *args, **kwargs):
qwidget_init(self)
mro = type(self).__mro__
next_coop_init = next(
cls for cls in mro[mro.index(QtWidgets.QWidget) + 1:]
if cls.__module__.split(".")[0] not in [
"PyQt4", "sip", "PySide", "PySide2", "Shiboken"])
next_coop_init.__init__(self, *args, **kwargs)
@functools.wraps(__init__)
def wrapper(self, **kwargs):
try:
QtWidgets.QWidget.__init__ = cooperative_qwidget_init
__init__(self, **kwargs)
finally:
# Restore __init__
QtWidgets.QWidget.__init__ = qwidget_init
return wrapper
class TimerQT(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Qt timer events.
Attributes
----------
interval : int
The time between timer events in milliseconds. Default is 1000 ms.
single_shot : bool
Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
callbacks : list
Stores list of (func, args) tuples that will be called upon timer
events. This list can be manipulated directly, or the functions
`add_callback` and `remove_callback` can be used.
'''
def __init__(self, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
# Create a new timer and connect the timeout() signal to the
# _on_timer method.
self._timer = QtCore.QTimer()
self._timer.timeout.connect(self._on_timer)
self._timer_set_interval()
def _timer_set_single_shot(self):
self._timer.setSingleShot(self._single)
def _timer_set_interval(self):
self._timer.setInterval(self._interval)
def _timer_start(self):
self._timer.start()
def _timer_stop(self):
self._timer.stop()
class FigureCanvasQT(QtWidgets.QWidget, FigureCanvasBase):
# map Qt button codes to MouseEvent's ones:
buttond = {QtCore.Qt.LeftButton: 1,
QtCore.Qt.MidButton: 2,
QtCore.Qt.RightButton: 3,
# QtCore.Qt.XButton1: None,
# QtCore.Qt.XButton2: None,
}
@_allow_super_init
def __init__(self, figure):
_create_qApp()
super(FigureCanvasQT, self).__init__(figure=figure)
self.figure = figure
# We don't want to scale up the figure DPI more than once.
# Note, we don't handle a signal for changing DPI yet.
figure._original_dpi = figure.dpi
self._update_figure_dpi()
# In cases with mixed resolution displays, we need to be careful if the
# dpi_ratio changes - in this case we need to resize the canvas
# accordingly. We could watch for screenChanged events from Qt, but
# the issue is that we can't guarantee this will be emitted *before*
# the first paintEvent for the canvas, so instead we keep track of the
# dpi_ratio value here and in paintEvent we resize the canvas if
# needed.
self._dpi_ratio_prev = None
self._draw_pending = False
self._is_drawing = False
self._draw_rect_callback = lambda painter: None
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
self.setMouseTracking(True)
self.resize(*self.get_width_height())
# Key auto-repeat enabled by default
self._keyautorepeat = True
palette = QtGui.QPalette(QtCore.Qt.white)
self.setPalette(palette)
def _update_figure_dpi(self):
dpi = self._dpi_ratio * self.figure._original_dpi
self.figure._set_dpi(dpi, forward=False)
@property
def _dpi_ratio(self):
# Not available on Qt4 or some older Qt5.
try:
# self.devicePixelRatio() returns 0 in rare cases
return self.devicePixelRatio() or 1
except AttributeError:
return 1
def _update_dpi(self):
# As described in __init__ above, we need to be careful in cases with
# mixed resolution displays if dpi_ratio is changing between painting
# events.
# Return whether we triggered a resizeEvent (and thus a paintEvent)
# from within this function.
if self._dpi_ratio != self._dpi_ratio_prev:
# We need to update the figure DPI.
self._update_figure_dpi()
self._dpi_ratio_prev = self._dpi_ratio
# The easiest way to resize the canvas is to emit a resizeEvent
# since we implement all the logic for resizing the canvas for
# that event.
event = QtGui.QResizeEvent(self.size(), self.size())
self.resizeEvent(event)
# resizeEvent triggers a paintEvent itself, so we exit this one
# (after making sure that the event is immediately handled).
return True
return False
def get_width_height(self):
w, h = FigureCanvasBase.get_width_height(self)
return int(w / self._dpi_ratio), int(h / self._dpi_ratio)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, guiEvent=event)
def leaveEvent(self, event):
QtWidgets.QApplication.restoreOverrideCursor()
FigureCanvasBase.leave_notify_event(self, guiEvent=event)
def mouseEventCoords(self, pos):
"""Calculate mouse coordinates in physical pixels
Qt5 use logical pixels, but the figure is scaled to physical
pixels for rendering. Transform to physical pixels so that
all of the down-stream transforms work as expected.
Also, the origin is different and needs to be corrected.
"""
dpi_ratio = self._dpi_ratio
x = pos.x()
# flip y so y=0 is bottom of canvas
y = self.figure.bbox.height / dpi_ratio - pos.y()
return x * dpi_ratio, y * dpi_ratio
def mousePressEvent(self, event):
x, y = self.mouseEventCoords(event.pos())
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y, button,
guiEvent=event)
def mouseDoubleClickEvent(self, event):
x, y = self.mouseEventCoords(event.pos())
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y,
button, dblclick=True,
guiEvent=event)
def mouseMoveEvent(self, event):
x, y = self.mouseEventCoords(event)
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def mouseReleaseEvent(self, event):
x, y = self.mouseEventCoords(event)
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_release_event(self, x, y, button,
guiEvent=event)
if is_pyqt5():
def wheelEvent(self, event):
x, y = self.mouseEventCoords(event)
# from QWheelEvent::delta doc
if event.pixelDelta().x() == 0 and event.pixelDelta().y() == 0:
steps = event.angleDelta().y() / 120
else:
steps = event.pixelDelta().y()
if steps:
FigureCanvasBase.scroll_event(
self, x, y, steps, guiEvent=event)
else:
def wheelEvent(self, event):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
# from QWheelEvent::delta doc
steps = event.delta() / 120
if event.orientation() == QtCore.Qt.Vertical:
FigureCanvasBase.scroll_event(
self, x, y, steps, guiEvent=event)
def keyPressEvent(self, event):
key = self._get_key(event)
if key is not None:
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def keyReleaseEvent(self, event):
key = self._get_key(event)
if key is not None:
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
@property
def keyAutoRepeat(self):
"""
If True, enable auto-repeat for key events.
"""
return self._keyautorepeat
@keyAutoRepeat.setter
def keyAutoRepeat(self, val):
self._keyautorepeat = bool(val)
def resizeEvent(self, event):
# _dpi_ratio_prev will be set the first time the canvas is painted, and
# the rendered buffer is useless before anyways.
if self._dpi_ratio_prev is None:
return
w = event.size().width() * self._dpi_ratio
h = event.size().height() * self._dpi_ratio
dpival = self.figure.dpi
winch = w / dpival
hinch = h / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
# pass back into Qt to let it finish
QtWidgets.QWidget.resizeEvent(self, event)
# emit our resize events
FigureCanvasBase.resize_event(self)
def sizeHint(self):
w, h = self.get_width_height()
return QtCore.QSize(w, h)
def minumumSizeHint(self):
return QtCore.QSize(10, 10)
def _get_key(self, event):
if not self._keyautorepeat and event.isAutoRepeat():
return None
event_key = event.key()
event_mods = int(event.modifiers()) # actually a bitmask
# get names of the pressed modifier keys
# bit twiddling to pick out modifier keys from event_mods bitmask,
# if event_key is a MODIFIER, it should not be duplicated in mods
mods = [name for name, mod_key, qt_key in MODIFIER_KEYS
if event_key != qt_key and (event_mods & mod_key) == mod_key]
try:
# for certain keys (enter, left, backspace, etc) use a word for the
# key, rather than unicode
key = SPECIAL_KEYS[event_key]
except KeyError:
# unicode defines code points up to 0x0010ffff
# QT will use Key_Codes larger than that for keyboard keys that are
# are not unicode characters (like multimedia keys)
# skip these
# if you really want them, you should add them to SPECIAL_KEYS
MAX_UNICODE = 0x10ffff
if event_key > MAX_UNICODE:
return None
key = unichr(event_key)
# qt delivers capitalized letters. fix capitalization
# note that capslock is ignored
if 'shift' in mods:
mods.remove('shift')
else:
key = key.lower()
mods.reverse()
return '+'.join(mods + [key])
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting
periodic events through the backend's native event
loop. Implemented only for backends with GUIs.
Other Parameters
----------------
interval : scalar
Timer interval in milliseconds
callbacks : list
Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
will be executed by the timer every *interval*.
"""
return TimerQT(*args, **kwargs)
def flush_events(self):
qApp.processEvents()
def start_event_loop(self, timeout=0):
if hasattr(self, "_event_loop") and self._event_loop.isRunning():
raise RuntimeError("Event loop already running")
self._event_loop = event_loop = QtCore.QEventLoop()
if timeout:
timer = QtCore.QTimer.singleShot(timeout * 1000, event_loop.quit)
event_loop.exec_()
def stop_event_loop(self, event=None):
if hasattr(self, "_event_loop"):
self._event_loop.quit()
def draw(self):
"""Render the figure, and queue a request for a Qt draw.
"""
# The renderer draw is done here; delaying causes problems with code
# that uses the result of the draw() to update plot elements.
if self._is_drawing:
return
self._is_drawing = True
try:
super(FigureCanvasQT, self).draw()
finally:
self._is_drawing = False
self.update()
def draw_idle(self):
"""Queue redraw of the Agg buffer and request Qt paintEvent.
"""
# The Agg draw needs to be handled by the same thread matplotlib
# modifies the scene graph from. Post Agg draw request to the
# current event loop in order to ensure thread affinity and to
# accumulate multiple draw requests from event handling.
# TODO: queued signal connection might be safer than singleShot
if not (self._draw_pending or self._is_drawing):
self._draw_pending = True
QtCore.QTimer.singleShot(0, self._draw_idle)
def _draw_idle(self):
if self.height() < 0 or self.width() < 0:
self._draw_pending = False
if not self._draw_pending:
return
try:
self.draw()
except Exception:
# Uncaught exceptions are fatal for PyQt5, so catch them instead.
traceback.print_exc()
finally:
self._draw_pending = False
def drawRectangle(self, rect):
# Draw the zoom rectangle to the QPainter. _draw_rect_callback needs
# to be called at the end of paintEvent.
if rect is not None:
def _draw_rect_callback(painter):
pen = QtGui.QPen(QtCore.Qt.black, 1 / self._dpi_ratio,
QtCore.Qt.DotLine)
painter.setPen(pen)
painter.drawRect(*(pt / self._dpi_ratio for pt in rect))
else:
def _draw_rect_callback(painter):
return
self._draw_rect_callback = _draw_rect_callback
self.update()
class MainWindow(QtWidgets.QMainWindow):
closing = QtCore.Signal()
def closeEvent(self, event):
self.closing.emit()
QtWidgets.QMainWindow.closeEvent(self, event)
class FigureManagerQT(FigureManagerBase):
"""
Attributes
----------
canvas : `FigureCanvas`
The FigureCanvas instance
num : int or str
The Figure number
toolbar : qt.QToolBar
The qt.QToolBar
window : qt.QMainWindow
The qt.QMainWindow
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
self.canvas = canvas
self.window = MainWindow()
self.window.closing.connect(canvas.close_event)
self.window.closing.connect(self._widgetclosed)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.svg')
self.window.setWindowIcon(QtGui.QIcon(image))
# Give the keyboard focus to the figure instead of the
# manager; StrongFocus accepts both tab and click to focus and
# will enable the canvas to process event w/o clicking.
# ClickFocus only takes the focus is the window has been
# clicked
# on. http://qt-project.org/doc/qt-4.8/qt.html#FocusPolicy-enum or
# http://doc.qt.digia.com/qt/qt.html#FocusPolicy-enum
self.canvas.setFocusPolicy(QtCore.Qt.StrongFocus)
self.canvas.setFocus()
self.window._destroying = False
self.toolmanager = self._get_toolmanager()
self.toolbar = self._get_toolbar(self.canvas, self.window)
self.statusbar = None
if self.toolmanager:
backend_tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
backend_tools.add_tools_to_container(self.toolbar)
self.statusbar = StatusbarQt(self.window, self.toolmanager)
if self.toolbar is not None:
self.window.addToolBar(self.toolbar)
if not self.toolmanager:
# add text label to status bar
statusbar_label = QtWidgets.QLabel()
self.window.statusBar().addWidget(statusbar_label)
self.toolbar.message.connect(statusbar_label.setText)
tbs_height = self.toolbar.sizeHint().height()
else:
tbs_height = 0
# resize the main window so it will display the canvas with the
# requested size:
cs = canvas.sizeHint()
sbs = self.window.statusBar().sizeHint()
self._status_and_tool_height = tbs_height + sbs.height()
height = cs.height() + self._status_and_tool_height
self.window.resize(cs.width(), height)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
self.canvas.draw_idle()
def notify_axes_change(fig):
# This will be called whenever the current axes is changed
if self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.window.raise_()
def full_screen_toggle(self):
if self.window.isFullScreen():
self.window.showNormal()
else:
self.window.showFullScreen()
def _widgetclosed(self):
if self.window._destroying:
return
self.window._destroying = True
try:
Gcf.destroy(self.num)
except AttributeError:
pass
# It seems that when the python session is killed,
# Gcf can get destroyed before the Gcf.destroy
# line is run, leading to a useless AttributeError.
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
elif matplotlib.rcParams['toolbar'] == 'toolmanager':
toolbar = ToolbarQt(self.toolmanager, self.window)
else:
toolbar = None
return toolbar
def _get_toolmanager(self):
if matplotlib.rcParams['toolbar'] == 'toolmanager':
toolmanager = ToolManager(self.canvas.figure)
else:
toolmanager = None
return toolmanager
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height + self._status_and_tool_height)
def show(self):
self.window.show()
self.window.activateWindow()
self.window.raise_()
def destroy(self, *args):
# check for qApp first, as PySide deletes it in its atexit handler
if QtWidgets.QApplication.instance() is None:
return
if self.window._destroying:
return
self.window._destroying = True
if self.toolbar:
self.toolbar.destroy()
self.window.close()
def get_window_title(self):
return six.text_type(self.window.windowTitle())
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT(NavigationToolbar2, QtWidgets.QToolBar):
message = QtCore.Signal(str)
def __init__(self, canvas, parent, coordinates=True):
""" coordinates: should we show the coordinates on the right? """
self.canvas = canvas
self.parent = parent
self.coordinates = coordinates
self._actions = {}
"""A mapping of toolitem method names to their QActions"""
QtWidgets.QToolBar.__init__(self, parent)
NavigationToolbar2.__init__(self, canvas)
def _icon(self, name):
if is_pyqt5():
name = name.replace('.png', '_large.png')
pm = QtGui.QPixmap(os.path.join(self.basedir, name))
if hasattr(pm, 'setDevicePixelRatio'):
pm.setDevicePixelRatio(self.canvas._dpi_ratio)
return QtGui.QIcon(pm)
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.addSeparator()
else:
a = self.addAction(self._icon(image_file + '.png'),
text, getattr(self, callback))
self._actions[callback] = a
if callback in ['zoom', 'pan']:
a.setCheckable(True)
if tooltip_text is not None:
a.setToolTip(tooltip_text)
if text == 'Subplots':
a = self.addAction(self._icon("qt4_editor_options.png"),
'Customize', self.edit_parameters)
a.setToolTip('Edit axis, curve and image parameters')
self.buttons = {}
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
if self.coordinates:
self.locLabel = QtWidgets.QLabel("", self)
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self.locLabel.setSizePolicy(
QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
# reference holder for subplots_adjust window
self.adj_window = None
# Esthetic adjustments - we need to set these explicitly in PyQt5
# otherwise the layout looks different - but we don't want to set it if
# not using HiDPI icons otherwise they look worse than before.
if is_pyqt5():
self.setIconSize(QtCore.QSize(24, 24))
self.layout().setSpacing(12)
if is_pyqt5():
# For some reason, self.setMinimumHeight doesn't seem to carry over to
# the actual sizeHint, so override it instead in order to make the
# aesthetic adjustments noted above.
def sizeHint(self):
size = super(NavigationToolbar2QT, self).sizeHint()
size.setHeight(max(48, size.height()))
return size
def edit_parameters(self):
allaxes = self.canvas.figure.get_axes()
if not allaxes:
QtWidgets.QMessageBox.warning(
self.parent, "Error", "There are no axes to edit.")
return
elif len(allaxes) == 1:
axes, = allaxes
else:
titles = []
for axes in allaxes:
name = (axes.get_title() or
" - ".join(filter(None, [axes.get_xlabel(),
axes.get_ylabel()])) or
"<anonymous {} (id: {:#x})>".format(
type(axes).__name__, id(axes)))
titles.append(name)
item, ok = QtWidgets.QInputDialog.getItem(
self.parent, 'Customize', 'Select axes:', titles, 0, False)
if ok:
axes = allaxes[titles.index(six.text_type(item))]
else:
return
figureoptions.figure_edit(axes, self)
def _update_buttons_checked(self):
# sync button checkstates to match active mode
self._actions['pan'].setChecked(self._active == 'PAN')
self._actions['zoom'].setChecked(self._active == 'ZOOM')
def pan(self, *args):
super(NavigationToolbar2QT, self).pan(*args)
self._update_buttons_checked()
def zoom(self, *args):
super(NavigationToolbar2QT, self).zoom(*args)
self._update_buttons_checked()
def set_message(self, s):
self.message.emit(s)
if self.coordinates:
self.locLabel.setText(s)
def set_cursor(self, cursor):
self.canvas.setCursor(cursord[cursor])
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)]
self.canvas.drawRectangle(rect)
def remove_rubberband(self):
self.canvas.drawRectangle(None)
def configure_subplots(self):
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
dia = SubplotToolQt(self.canvas.figure, self.parent)
dia.setWindowIcon(QtGui.QIcon(image))
dia.exec_()
def save_figure(self, *args):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = sorted(six.iteritems(filetypes))
default_filetype = self.canvas.get_default_filetype()
startpath = os.path.expanduser(
matplotlib.rcParams['savefig.directory'])
start = os.path.join(startpath, self.canvas.get_default_filename())
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname, filter = _getSaveFileName(self.parent,
"Choose a filename to save to",
start, filters, selectedFilter)
if fname:
# Save dir for next time, unless empty str (i.e., use cwd).
if startpath != "":
matplotlib.rcParams['savefig.directory'] = (
os.path.dirname(six.text_type(fname)))
try:
self.canvas.figure.savefig(six.text_type(fname))
except Exception as e:
QtWidgets.QMessageBox.critical(
self, "Error saving file", six.text_type(e),
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
class SubplotToolQt(UiSubplotTool):
def __init__(self, targetfig, parent):
UiSubplotTool.__init__(self, None)
self._figure = targetfig
for lower, higher in [("bottom", "top"), ("left", "right")]:
self._widgets[lower].valueChanged.connect(
lambda val: self._widgets[higher].setMinimum(val + .001))
self._widgets[higher].valueChanged.connect(
lambda val: self._widgets[lower].setMaximum(val - .001))
self._attrs = ["top", "bottom", "left", "right", "hspace", "wspace"]
self._defaults = {attr: vars(self._figure.subplotpars)[attr]
for attr in self._attrs}
# Set values after setting the range callbacks, but before setting up
# the redraw callbacks.
self._reset()
for attr in self._attrs:
self._widgets[attr].valueChanged.connect(self._on_value_changed)
for action, method in [("Export values", self._export_values),
("Tight layout", self._tight_layout),
("Reset", self._reset),
("Close", self.close)]:
self._widgets[action].clicked.connect(method)
def _export_values(self):
# Explicitly round to 3 decimals (which is also the spinbox precision)
# to avoid numbers of the form 0.100...001.
dialog = QtWidgets.QDialog()
layout = QtWidgets.QVBoxLayout()
dialog.setLayout(layout)
text = QtWidgets.QPlainTextEdit()
text.setReadOnly(True)
layout.addWidget(text)
text.setPlainText(
",\n".join("{}={:.3}".format(attr, self._widgets[attr].value())
for attr in self._attrs))
# Adjust the height of the text widget to fit the whole text, plus
# some padding.
size = text.maximumSize()
size.setHeight(
QtGui.QFontMetrics(text.document().defaultFont())
.size(0, text.toPlainText()).height() + 20)
text.setMaximumSize(size)
dialog.exec_()
def _on_value_changed(self):
self._figure.subplots_adjust(**{attr: self._widgets[attr].value()
for attr in self._attrs})
self._figure.canvas.draw_idle()
def _tight_layout(self):
self._figure.tight_layout()
for attr in self._attrs:
widget = self._widgets[attr]
widget.blockSignals(True)
widget.setValue(vars(self._figure.subplotpars)[attr])
widget.blockSignals(False)
self._figure.canvas.draw_idle()
def _reset(self):
for attr, value in self._defaults.items():
self._widgets[attr].setValue(value)
class ToolbarQt(ToolContainerBase, QtWidgets.QToolBar):
def __init__(self, toolmanager, parent):
ToolContainerBase.__init__(self, toolmanager)
QtWidgets.QToolBar.__init__(self, parent)
self._toolitems = {}
self._groups = {}
self._last = None
@property
def _icon_extension(self):
if is_pyqt5():
return '_large.png'
return '.png'
def add_toolitem(
self, name, group, position, image_file, description, toggle):
button = QtWidgets.QToolButton(self)
button.setIcon(self._icon(image_file))
button.setText(name)
if description:
button.setToolTip(description)
def handler():
self.trigger_tool(name)
if toggle:
button.setCheckable(True)
button.toggled.connect(handler)
else:
button.clicked.connect(handler)
self._last = button
self._toolitems.setdefault(name, [])
self._add_to_group(group, name, button, position)
self._toolitems[name].append((button, handler))
def _add_to_group(self, group, name, button, position):
gr = self._groups.get(group, [])
if not gr:
sep = self.addSeparator()
gr.append(sep)
before = gr[position]
widget = self.insertWidget(before, button)
gr.insert(position, widget)
self._groups[group] = gr
def _icon(self, name):
pm = QtGui.QPixmap(name)
if hasattr(pm, 'setDevicePixelRatio'):
pm.setDevicePixelRatio(self.toolmanager.canvas._dpi_ratio)
return QtGui.QIcon(pm)
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for button, handler in self._toolitems[name]:
button.toggled.disconnect(handler)
button.setChecked(toggled)
button.toggled.connect(handler)
def remove_toolitem(self, name):
for button, handler in self._toolitems[name]:
button.setParent(None)
del self._toolitems[name]
class StatusbarQt(StatusbarBase, QtWidgets.QLabel):
def __init__(self, window, *args, **kwargs):
StatusbarBase.__init__(self, *args, **kwargs)
QtWidgets.QLabel.__init__(self)
window.statusBar().addWidget(self)
def set_message(self, s):
self.setText(s)
class ConfigureSubplotsQt(backend_tools.ConfigureSubplotsBase):
def trigger(self, *args):
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
parent = self.canvas.manager.window
dia = SubplotToolQt(self.figure, parent)
dia.setWindowIcon(QtGui.QIcon(image))
dia.exec_()
class SaveFigureQt(backend_tools.SaveFigureBase):
def trigger(self, *args):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = sorted(six.iteritems(filetypes))
default_filetype = self.canvas.get_default_filetype()
startpath = os.path.expanduser(
matplotlib.rcParams['savefig.directory'])
start = os.path.join(startpath, self.canvas.get_default_filename())
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
parent = self.canvas.manager.window
fname, filter = _getSaveFileName(parent,
"Choose a filename to save to",
start, filters, selectedFilter)
if fname:
# Save dir for next time, unless empty str (i.e., use cwd).
if startpath != "":
matplotlib.rcParams['savefig.directory'] = (
os.path.dirname(six.text_type(fname)))
try:
self.canvas.figure.savefig(six.text_type(fname))
except Exception as e:
QtWidgets.QMessageBox.critical(
self, "Error saving file", six.text_type(e),
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
class SetCursorQt(backend_tools.SetCursorBase):
def set_cursor(self, cursor):
self.canvas.setCursor(cursord[cursor])
class RubberbandQt(backend_tools.RubberbandBase):
def draw_rubberband(self, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)]
self.canvas.drawRectangle(rect)
def remove_rubberband(self):
self.canvas.drawRectangle(None)
backend_tools.ToolSaveFigure = SaveFigureQt
backend_tools.ToolConfigureSubplots = ConfigureSubplotsQt
backend_tools.ToolSetCursor = SetCursorQt
backend_tools.ToolRubberband = RubberbandQt
def error_msg_qt(msg, parent=None):
if not isinstance(msg, six.string_types):
msg = ','.join(map(str, msg))
QtWidgets.QMessageBox.warning(None, "Matplotlib",
msg, QtGui.QMessageBox.Ok)
def exception_handler(type, value, tb):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename is not None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror is not None:
msg += value.strerror
else:
msg += six.text_type(value)
if len(msg):
error_msg_qt(msg)
@_Backend.export
class _BackendQT5(_Backend):
FigureCanvas = FigureCanvasQT
FigureManager = FigureManagerQT
@staticmethod
def trigger_manager_draw(manager):
manager.canvas.draw_idle()
@staticmethod
def mainloop():
# allow KeyboardInterrupt exceptions to close the plot window.
signal.signal(signal.SIGINT, signal.SIG_DFL)
qApp.exec_()
| []
| []
| [
"DISPLAY"
]
| [] | ["DISPLAY"] | python | 1 | 0 | |
example_wsgi.py | #!/home1/USERNAME/python/2.7/bin/python
# ^^ This is a custom install of python, change it to reflect your server ^^
# put this file in public_html root, like ~/public_html/flask/huzzah
# below, set APPLICATION_ROOT to this file, including filename (here ``huzzah``)
import os
import sys
# Add the path where you checked out this repo, below it's ``flask_mold``
sys.path.insert(0, '/home1/USERNAME/projects/flask_mold/')
# You can add environmental variables here. Just don't check them in to your repo.
os.environ['SECRET_KEY'] = 'make me super secret yo'
from flup.server.fcgi import WSGIServer
# Change this line to run if 'baseapp' isn't the folder you're using.
from baseapp import create_app
# Because this is production, load the ProdConfig. Change to DevConfig for development.
from config.default_server import ProdConfig as config
config.APPLICATION_ROOT = '/flask/huzzah/' # <-- set this to this file path (see above)
app = create_app(config)
if __name__ == '__main__':
WSGIServer(app).run()
| []
| []
| [
"SECRET_KEY"
]
| [] | ["SECRET_KEY"] | python | 1 | 0 | |
relogic/main.py | from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import random
from types import SimpleNamespace
import numpy as np
import torch
from relogic.logickit.base import utils
from relogic.logickit.base.configure import configure, update_configure
from relogic.logickit.training import trainer, training_progress
from relogic.logickit.serving import Server
from relogic.logickit.analyzer.heads_importance import compute_heads_importance, mask_heads
if "PUDB" not in os.environ or os.environ["PUDB"] == "false":
import relogic.utils.crash_on_ipy
def train(config):
if config.use_external_teacher:
teacher_model_path = config.teacher_model_path
teacher_config = os.path.join(teacher_model_path, "general_config.json")
with open(teacher_config) as f:
teacher_config = SimpleNamespace(**json.load(f))
teacher_config.local_rank = config.local_rank
teacher_config.no_cuda = config.no_cuda
else:
teacher_config = None
# A quick fix for loading external teacher
model_trainer = trainer.Trainer(
config=config, teacher_config=teacher_config)
# A quick fix for version migration
progress = training_progress.TrainingProgress(config=config)
if config.use_external_teacher:
model_path = os.path.join(teacher_model_path,
teacher_config.model_name + ".ckpt")
model_trainer.restore(model_path)
model_trainer.restore_teacher(model_path)
model_trainer.train(progress)
def finetune(config):
general_config_path = os.path.join(config.finetune_restore_path,
"general_config.json")
with open(general_config_path) as f:
restore_config = SimpleNamespace(**json.load(f))
if config.model_name:
model_path = os.path.join(config.finetune_restore_path,
config.model_name + ".ckpt")
else:
model_path = os.path.join(config.finetune_restore_path,
restore_config.model_name + ".ckpt")
model_trainer = trainer.Trainer(config)
model_trainer.restore(model_path)
progress = training_progress.TrainingProgress(config=config)
model_trainer.train(progress)
def eval(config):
general_config_path = os.path.join(config.restore_path,
"general_config.json")
with open(general_config_path) as f:
restore_config = SimpleNamespace(**json.load(f))
if config.model_name:
model_path = os.path.join(config.restore_path,
config.model_name + ".ckpt")
else:
model_path = os.path.join(config.restore_path,
restore_config.model_name + ".ckpt")
restore_config.mode = config.mode
restore_config.output_features = config.output_features
restore_config.local_rank = config.local_rank
restore_config.no_cuda = config.no_cuda
restore_config.buckets = config.buckets
restore_config.gold_answer_file = config.gold_answer_file
restore_config.null_score_diff_threshold = config.null_score_diff_threshold
restore_config.output_attentions = config.output_attentions
restore_config.use_external_teacher = False
if not hasattr(restore_config, "branching_encoder"):
restore_config.branching_encoder = False
# Update the evaluation dataset
update_configure(restore_config, config)
print(restore_config)
utils.heading("RUN {} ({:})".format(config.mode.upper(),
restore_config.task_names))
model_trainer = trainer.Trainer(restore_config)
model_trainer.restore(model_path)
if config.mode == "serving":
server = Server(model_trainer)
server.start()
elif config.mode == "analysis":
analyze(config, model_trainer)
elif config.mode == "feature_extraction":
task_ = None
for task in model_trainer.tasks:
if task.name == config.selected_task:
task_ = task
model_trainer.feature_extraction(
task=task_, dump_file=config.feature_dump_file)
else:
model_trainer.evaluate_all_tasks()
def analyze(config, model_trainer):
# compute_heads_importance(config, model_trainer)
mask_heads(config, model_trainer)
def main():
utils.heading("SETUP")
parser = argparse.ArgumentParser()
# IO
parser.add_argument(
"--mode", default=None, choices=["train", "valid", "eval", "finetune", "analysis", "feature_extraction"])
parser.add_argument("--output_dir", type=str, default="data/models")
parser.add_argument("--max_seq_length", type=int, default=450)
parser.add_argument("--max_query_length", type=int, default=64)
parser.add_argument("--doc_stride", type=int, default=128)
parser.add_argument("--do_lower_case", default=False, action="store_true")
parser.add_argument("--model_name", type=str)
parser.add_argument("--restore_path", type=str)
parser.add_argument("--finetune_restore_path", type=str)
parser.add_argument("--train_file", type=str, default="train.json")
parser.add_argument("--dev_file", type=str, default="dev.json")
parser.add_argument("--test_file", type=str, default="test.json")
# Task Definition
parser.add_argument("--task_names", type=str)
parser.add_argument("--raw_data_path", type=str)
parser.add_argument("--label_mapping_path", type=str)
parser.add_argument("--unsupervised_data", type=str)
parser.add_argument("--lang", type=str, default="en")
parser.add_argument("--pretokenized", action="store_true", default=False)
parser.add_argument("--topk", default=1)
parser.add_argument("--gold_answer_file", default="data/preprocessed_data/squad20.json")
parser.add_argument("--dump_to_files_dict", default="")
parser.add_argument("--output_attentions", default=False, action="store_true")
parser.add_argument("--span_inference", default=False, action="store_true")
parser.add_argument("--metrics", default="", type=str)
# Task related configuration
# Sequence Labeling
parser.add_argument("--sequence_labeling_use_cls", default=False, action="store_true")
# Relation Extraction
parser.add_argument("--no_entity_surface", dest="entity_surface_aware", default=True, action="store_false")
parser.add_argument("--use_dependency_feature", dest="use_dependency_feature", default=False, action="store_true")
parser.add_argument("--rel_extraction_module_type", type=str, default="hybrid")
# Semantic Role Labeling
parser.add_argument("--no_predicate_surface", dest="predicate_surface_aware", default=True, action="store_false")
parser.add_argument("--no_span_annotation", dest="use_span_annotation", default=True, action="store_false")
parser.add_argument("--use_span_candidates", default=False, action="store_true")
parser.add_argument("--srl_module_type", type=str, default="sequence_labeling")
parser.add_argument("--label_embed_dim", type=int, default=100)
parser.add_argument("--external_vocab_embed_dim", type=int, default=300)
parser.add_argument("--external_embeddings", type=str)
parser.add_argument("--use_description", default=False, action="store_true")
parser.add_argument("--srl_label_format", default="srl_label_span_based", type=str)
parser.add_argument("--num_width_embeddings", type=int, default=300)
parser.add_argument("--span_width_embedding_dim", type=int, default=100)
parser.add_argument("--srl_candidate_loss", default=False, action="store_true")
parser.add_argument("--srl_arg_span_repr", default="ave")
parser.add_argument("--srl_pred_span_repr", default="ave")
parser.add_argument("--srl_use_label_embedding", default=False, action="store_true")
parser.add_argument("--srl_compute_pos_tag_loss", default=False, action="store_true")
parser.add_argument("--srl_use_gold_predicate", default=False, action="store_true")
parser.add_argument("--srl_use_gold_argument", default=False, action="store_true")
parser.add_argument("--predicate_reveal_method", default=None, type=str)
parser.add_argument("--indicator_embedding_size", default=10, type=int)
# Dependency Parsing
parser.add_argument("--dep_parsing_mlp_dim", default=300, type=int)
parser.add_argument("--dropout", default=0.3, type=float)
# Parallel Mapping
parser.add_argument("--parallel_mapping_mode", default="alignment", type=str)
# Reading Comprehension
parser.add_argument("--null_score_diff_threshold", default=1.0)
# Information Retrieval
parser.add_argument("--qrels_file_path", type=str, default=None)
parser.add_argument("--regression", default=False, action="store_true")
parser.add_argument("--word_level_interaction", default=False, action="store_true")
parser.add_argument("--ir_siamese", default=False, action="store_true")
# CNN model
parser.add_argument("--output_channel", type=int, default=150)
parser.add_argument("--kernel_size", type=int, default=2)
parser.add_argument("--word_embed_dim", type=int, default=300)
# Modeling
parser.add_argument("--use_gcn", dest="use_gcn", default=False, action="store_true")
parser.add_argument("--fix_embedding", default=False, action="store_true")
# Model
parser.add_argument("--bert_model", type=str)
parser.add_argument("--encoder_type", type=str, default="bert", choices=["bert", "xlm", "xlmr", "lstm", "embedding"])
parser.add_argument("--hidden_size", type=int, default=768)
parser.add_argument("--projection_size", type=int, default=300)
parser.add_argument(
"--initializer_range", type=float,
default=0.02) # initialization for task module
# follow the initialization range of bert
parser.add_argument("--no_bilstm", default=True, dest="use_bilstm", action="store_false")
parser.add_argument("--repr_size", default=300, type=int)
parser.add_argument("--branching_encoder", default=False, action="store_true")
parser.add_argument("--routing_config_file", type=str)
parser.add_argument("--selected_non_final_layers", type=str, default="none", help="split by ; among tasks")
parser.add_argument("--dataset_type", type=str, default="bucket")
parser.add_argument("--language_id_file", type=str, default=None)
# Semi-Supervised
parser.add_argument("--is_semisup", default=False, action="store_true")
parser.add_argument("--partial_view_sources", type=str)
parser.add_argument("--use_external_teacher", default=False, action="store_true")
parser.add_argument("--teacher_model_path", default=None, type=str)
# Training
parser.add_argument("--seed", type=int, default=3435)
parser.add_argument("--no_cuda", action="store_true")
parser.add_argument("--local_rank", type=int, default=-1)
parser.add_argument("--learning_rate", type=float, default=5e-5)
parser.add_argument("--warmup_proportion", type=float, default=0.1)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help=
"Number of updates steps to accumulate before performing a backward/update pass"
)
parser.add_argument("--print_every", type=int, default=25)
parser.add_argument("--eval_dev_every", default=2000, type=int)
parser.add_argument("--train_batch_size", type=str, default="8")
parser.add_argument("--test_batch_size", type=str, default="8")
parser.add_argument("--grad_clip", type=float, default=1.0)
parser.add_argument("--epoch_number", type=int, default=20)
parser.add_argument("--self_attention_head_size", default=64, type=int)
parser.add_argument("--schedule_method", default="warmup_linear")
parser.add_argument(
"--no_schedule_lr", dest="schedule_lr", default=True, action="store_false")
parser.add_argument("--word_dropout", default=False, action="store_true")
parser.add_argument("--word_dropout_prob", default=0.6, type=float)
parser.add_argument("--max_margin", type=float, default=3)
parser.add_argument("--warmup_epoch_number", type=int, default=0)
parser.add_argument("--sgd_learning_rate", type=float, default=0.1)
parser.add_argument("--adam_learning_rate", type=float, default=0.003)
parser.add_argument("--sep_optim", dest="sep_optim", default=False, action="store_true")
parser.add_argument("--multi_gpu", dest="multi_gpu", default=False, action="store_true")
parser.add_argument("--ignore_parameters", default="", type=str)
parser.add_argument("--fix_bert", default=False, action="store_true")
parser.add_argument("--two_stage_optim", default=False, action="store_true")
parser.add_argument("--training_scheme", default=None, type=str)
parser.add_argument("--training_scheme_file", default=None, type=str)
parser.add_argument("--num_train_optimization_steps", default=0, type=int)
parser.add_argument("--early_stop_at", default=0, type=int)
parser.add_argument("--loss_weight", type=str, default='1')
parser.add_argument("--select_index_method", type=str, default="cls")
parser.add_argument("--use_cosine_loss", default=False, action="store_true")
parser.add_argument("--adversarial_training", default=None, type=str)
parser.add_argument("--no_bucket", default=False, action="store_true")
parser.add_argument("--param_initialization", default=None, type=str)
# We allow to set same training steps for different dataset
# Need to combine to CUDA_VISIBLE_DEVICES
parser.add_argument("--only_adam", default=False, action="store_true")
# Analysis
parser.add_argument("--head_to_mask_file", type=str, default="")
# Configuration
parser.add_argument("--config_file", type=str, default=None)
parser.add_argument("--trainer_config", type=str, default=None)
parser.add_argument("--module_config", type=str, default=None)
parser.add_argument("--task_config", type=str, default=None)
#
parser.add_argument("--selected_task", type=str)
parser.add_argument("--feature_dump_file", type=str)
parser.add_argument("--output_features", default=False, action="store_true")
args = parser.parse_args()
if not args.mode:
raise ValueError("You need to specify the mode")
if args.output_dir:
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and args.mode == "train":
raise ValueError(
"Output directory ({}) already exists and is not empty.".format(
args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if args.gradient_accumulation_steps < 1:
raise ValueError(
"Invalid gradient_accumulation_steps parameter: {}, should be >= 1".
format(args.gradient_accumulation_steps))
# args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
# num_train_optimization_steps = len(train_examples) / batch_size * epoch_number
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
configure(args)
print(args)
if args.mode == "train":
utils.heading("START TRAINING ({:})".format(args.task_names))
train(args)
elif args.mode == "valid":
eval(args)
elif args.mode == "eval":
eval(args)
elif args.mode == "finetune":
finetune(args)
elif args.mode == "serving":
eval(args)
elif args.mode == "analysis":
eval(args)
elif args.mode == "feature_extraction":
eval(args)
if __name__ == "__main__":
main()
| []
| []
| [
"PUDB"
]
| [] | ["PUDB"] | python | 1 | 0 | |
detection/utils.py | import os
import logging
import sys
import random
import cv2
import numpy as np
import torch
import torch.cuda
import torch.distributed as dist
from matplotlib import pyplot as plt, patches
from matplotlib.colors import SymLogNorm
from skimage.util import view_as_windows
def initialise_distributed(args):
"""Initialises environment for distributed training on GPUs."""
if "RANK" not in os.environ or "WORLD_SIZE" not in os.environ:
logging.info("Not using distributed mode")
args.distributed = False
return
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
args.distributed = True
torch.cuda.set_device(args.gpu)
dist.init_process_group(backend='nccl', init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
torch.distributed.barrier(device_ids=[args.gpu])
def get_world_size():
if "WORLD_SIZE" in os.environ:
return int(os.environ["WORLD_SIZE"])
return 1
def initialise_logging(args):
"""Initialise logging environment."""
if not is_master_process():
logging.disable()
return
level = getattr(logging, args.log_level.upper(), None)
if not isinstance(level, int):
raise ValueError(f"Invalid log level: {args.log_level}")
if args.log_file:
logging.basicConfig(filename=args.log_file, filemode='w', level=level, format='[%(asctime)s] %(message)s',
datefmt='%I:%M:%S %p')
else:
logging.basicConfig(stream=sys.stdout, level=level, format='[%(asctime)s] %(message)s', datefmt='%I:%M:%S %p')
def collate_fn(batch):
return tuple(zip(*batch))
def is_master_process():
if dist.is_available() and dist.is_initialized() and dist.get_rank() != 0:
return False
return True
def save_state(checkpoint, output_dir, epoch):
torch.save(checkpoint, os.path.join(output_dir, 'checkpoint.file'))
torch.save(checkpoint, os.path.join(output_dir, f'model_{epoch}.file'))
def tensor_encode_id(img_id):
"""
Encodes a FathomNet image id like '00a6db92-5277-4772-b019-5b89c6af57c3' as a tensor
of shape torch.Size([4]) of four integers in the range [0, 2^32-1].
"""
hex_str = img_id.replace('-', '')
length = len(hex_str) // 4
img_id_enc = tuple(int(hex_str[i * length: (i + 1) * length], 16) for i in range(4))
return torch.tensor(img_id_enc)
def tensor_decode_id(img_id_enc):
"""Inverse function of tensor_encode_id"""
ints = img_id_enc.tolist()
img_id = ''.join([hex(part)[2:].zfill(8) for part in ints])
for ind in [8, 13, 18, 23]:
img_id = img_id[:ind] + '-' + img_id[ind:]
return img_id
def sliding_window(image, win_size, step_size):
"""Implements sliding window over an image."""
for j, row in enumerate(view_as_windows(image, win_size, step_size)):
for i, col in enumerate(row):
x, y = i * step_size[1], j * step_size[0]
for window in col:
yield x, y, window
def iou_one_to_many(box, boxes):
"""Computes one-to-many intersection over union"""
x0y0 = np.maximum(boxes[:, :2], box[:2])
x1y1 = np.minimum(boxes[:, 2:], box[2:])
intersection = (x1y1[:, 0] - x0y0[:, 0]) * (x1y1[:, 1] - x0y0[:, 1])
intersection[(x0y0[:, 0] > x1y1[:, 0]) | (x0y0[:, 1] > x1y1[:, 1])] = 0
area = (box[2] - box[0]) * (box[3] - box[1])
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
ious = intersection / (area + areas - intersection)
return ious
def non_maxima_suppression(boxes, confidence_scores, threshold=0.5):
"""Performes non-maxima suppression filtering on bounding boxes."""
filtered_boxes = []
filtered_confidence = []
while boxes.size > 0:
i = confidence_scores.argmax()
filtered_boxes.append(boxes[i])
filtered_confidence.append(confidence_scores[i])
ious = iou_one_to_many(boxes[i], boxes)
boxes = boxes[ious <= threshold]
confidence_scores = confidence_scores[ious <= threshold]
return filtered_boxes, filtered_confidence
def plot_confusion_matrix(conf_mat, filename, labels=None, log_scale=False, show_values=False, cmap='viridis'):
"""Plots a confusion matrix with or without labels."""
fig, ax = plt.subplots(figsize=(6, 6))
im = ax.imshow(conf_mat, cmap=cmap, norm=SymLogNorm(10) if log_scale else None, extent=[0, 1, 0, 1], origin='lower',
interpolation="nearest")
fig.colorbar(im, ax=ax)
if labels:
ax.set_xticks(np.linspace(0.5 / len(labels), 1 - 0.5 / len(labels), len(labels)))
ax.set_xticklabels(labels, rotation='vertical', fontsize=8)
ax.set_yticks(np.linspace(0.5 / len(labels), 1 - 0.5 / len(labels), len(labels)))
ax.set_yticklabels(labels, fontsize=8)
else:
ax.set_xticks([])
ax.set_yticks([])
if show_values:
for (j, i), label in np.ndenumerate(conf_mat):
ax.text((i + 0.5) / len(conf_mat), (j + 0.5) / len(conf_mat[0]), f"{label:.02f}", ha='center', va='center',
fontsize=8)
ax.set_xlabel('Predicted')
ax.set_ylabel('Actual')
plt.savefig(filename, facecolor='w', bbox_inches='tight', dpi=200)
def make_deterministic(seed):
"""Forces a deterministic run."""
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
cv2.setRNGSeed(0)
def visualise_image(image, predictions=None, ground_truths=None, name_mapping=None, conf_thresh=None, c_pred='red',
c_gt='yellow'):
"""Visualises image with predicted and ground truth bounding boxes"""
fig, ax = plt.subplots(figsize=(10, 10))
if isinstance(image, torch.Tensor):
ax.imshow(image.permute(1, 2, 0))
else:
ax.imshow(image)
if predictions:
for box, label, score in zip(predictions['boxes'], predictions['labels'], predictions['scores']):
if conf_thresh and score < conf_thresh:
continue
# Create a Rectangle patch
rect = patches.Rectangle((box[0], box[1]), box[2] - box[0], box[3] - box[1], linewidth=1, edgecolor=c_pred,
facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
label = label.item()
if name_mapping:
label = name_mapping(label)
plt.text(box[0] + 10, box[3] + 10, label, size=10,
ha="left", va="top",
bbox=dict(boxstyle="square",
ec=c_pred,
fc=c_pred,
alpha=0.2
)
)
if ground_truths:
for box, label in zip(ground_truths['boxes'], ground_truths['labels']):
# Create a Rectangle patch
rect = patches.Rectangle((box[0], box[1]), box[2] - box[0], box[3] - box[1], linewidth=1, edgecolor=c_gt,
facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
label = label.item()
if name_mapping:
label = name_mapping(label)
plt.text(box[0] + 10, box[3] + 10, label, size=10,
ha="left", va="top",
bbox=dict(boxstyle="square",
ec=c_gt,
fc=c_gt,
alpha=0.2
)
)
| []
| []
| [
"LOCAL_RANK",
"WORLD_SIZE",
"RANK"
]
| [] | ["LOCAL_RANK", "WORLD_SIZE", "RANK"] | python | 3 | 0 | |
server.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// HTTP server. See RFC 7230 through 7235.
package http
import (
"bufio"
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/textproto"
"net/url"
urlpkg "net/url"
"os"
"path"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"golang.org/x/net/http/httpguts"
)
// Errors used by the HTTP server.
var (
// ErrBodyNotAllowed is returned by ResponseWriter.Write calls
// when the HTTP method or response code does not permit a
// body.
ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body")
// ErrHijacked is returned by ResponseWriter.Write calls when
// the underlying connection has been hijacked using the
// Hijacker interface. A zero-byte write on a hijacked
// connection will return ErrHijacked without any other side
// effects.
ErrHijacked = errors.New("http: connection has been hijacked")
// ErrContentLength is returned by ResponseWriter.Write calls
// when a Handler set a Content-Length response header with a
// declared size and then attempted to write more bytes than
// declared.
ErrContentLength = errors.New("http: wrote more than the declared Content-Length")
// Deprecated: ErrWriteAfterFlush is no longer returned by
// anything in the net/http package. Callers should not
// compare errors against this variable.
ErrWriteAfterFlush = errors.New("unused")
)
// A Handler responds to an HTTP request.
//
// ServeHTTP should write reply headers and data to the ResponseWriter
// and then return. Returning signals that the request is finished; it
// is not valid to use the ResponseWriter or read from the
// Request.Body after or concurrently with the completion of the
// ServeHTTP call.
//
// Depending on the HTTP client software, HTTP protocol version, and
// any intermediaries between the client and the Go server, it may not
// be possible to read from the Request.Body after writing to the
// ResponseWriter. Cautious handlers should read the Request.Body
// first, and then reply.
//
// Except for reading the body, handlers should not modify the
// provided Request.
//
// If ServeHTTP panics, the server (the caller of ServeHTTP) assumes
// that the effect of the panic was isolated to the active request.
// It recovers the panic, logs a stack trace to the server error log,
// and either closes the network connection or sends an HTTP/2
// RST_STREAM, depending on the HTTP protocol. To abort a handler so
// the client sees an interrupted response but the server doesn't log
// an error, panic with the value ErrAbortHandler.
type Handler interface {
ServeHTTP(ResponseWriter, *Request)
}
// A ResponseWriter interface is used by an HTTP handler to
// construct an HTTP response.
//
// A ResponseWriter may not be used after the Handler.ServeHTTP method
// has returned.
type ResponseWriter interface {
// Header returns the header map that will be sent by
// WriteHeader. The Header map also is the mechanism with which
// Handlers can set HTTP trailers.
//
// Changing the header map after a call to WriteHeader (or
// Write) has no effect unless the modified headers are
// trailers.
//
// There are two ways to set Trailers. The preferred way is to
// predeclare in the headers which trailers you will later
// send by setting the "Trailer" header to the names of the
// trailer keys which will come later. In this case, those
// keys of the Header map are treated as if they were
// trailers. See the example. The second way, for trailer
// keys not known to the Handler until after the first Write,
// is to prefix the Header map keys with the TrailerPrefix
// constant value. See TrailerPrefix.
//
// To suppress automatic response headers (such as "Date"), set
// their value to nil.
Header() Header
// Write writes the data to the connection as part of an HTTP reply.
//
// If WriteHeader has not yet been called, Write calls
// WriteHeader(http.StatusOK) before writing the data. If the Header
// does not contain a Content-Type line, Write adds a Content-Type set
// to the result of passing the initial 512 bytes of written data to
// DetectContentType. Additionally, if the total size of all written
// data is under a few KB and there are no Flush calls, the
// Content-Length header is added automatically.
//
// Depending on the HTTP protocol version and the client, calling
// Write or WriteHeader may prevent future reads on the
// Request.Body. For HTTP/1.x requests, handlers should read any
// needed request body data before writing the response. Once the
// headers have been flushed (due to either an explicit Flusher.Flush
// call or writing enough data to trigger a flush), the request body
// may be unavailable. For HTTP/2 requests, the Go HTTP server permits
// handlers to continue to read the request body while concurrently
// writing the response. However, such behavior may not be supported
// by all HTTP/2 clients. Handlers should read before writing if
// possible to maximize compatibility.
Write([]byte) (int, error)
// WriteHeader sends an HTTP response header with the provided
// status code.
//
// If WriteHeader is not called explicitly, the first call to Write
// will trigger an implicit WriteHeader(http.StatusOK).
// Thus explicit calls to WriteHeader are mainly used to
// send error codes.
//
// The provided code must be a valid HTTP 1xx-5xx status code.
// Only one header may be written. Go does not currently
// support sending user-defined 1xx informational headers,
// with the exception of 100-continue response header that the
// Server sends automatically when the Request.Body is read.
WriteHeader(statusCode int)
}
// The Flusher interface is implemented by ResponseWriters that allow
// an HTTP handler to flush buffered data to the client.
//
// The default HTTP/1.x and HTTP/2 ResponseWriter implementations
// support Flusher, but ResponseWriter wrappers may not. Handlers
// should always test for this ability at runtime.
//
// Note that even for ResponseWriters that support Flush,
// if the client is connected through an HTTP proxy,
// the buffered data may not reach the client until the response
// completes.
type Flusher interface {
// Flush sends any buffered data to the client.
Flush()
}
// The Hijacker interface is implemented by ResponseWriters that allow
// an HTTP handler to take over the connection.
//
// The default ResponseWriter for HTTP/1.x connections supports
// Hijacker, but HTTP/2 connections intentionally do not.
// ResponseWriter wrappers may also not support Hijacker. Handlers
// should always test for this ability at runtime.
type Hijacker interface {
// Hijack lets the caller take over the connection.
// After a call to Hijack the HTTP server library
// will not do anything else with the connection.
//
// It becomes the caller's responsibility to manage
// and close the connection.
//
// The returned net.Conn may have read or write deadlines
// already set, depending on the configuration of the
// Server. It is the caller's responsibility to set
// or clear those deadlines as needed.
//
// The returned bufio.Reader may contain unprocessed buffered
// data from the client.
//
// After a call to Hijack, the original Request.Body must not
// be used. The original Request's Context remains valid and
// is not canceled until the Request's ServeHTTP method
// returns.
Hijack() (net.Conn, *bufio.ReadWriter, error)
}
// The CloseNotifier interface is implemented by ResponseWriters which
// allow detecting when the underlying connection has gone away.
//
// This mechanism can be used to cancel long operations on the server
// if the client has disconnected before the response is ready.
//
// Deprecated: the CloseNotifier interface predates Go's context package.
// New code should use Request.Context instead.
type CloseNotifier interface {
// CloseNotify returns a channel that receives at most a
// single value (true) when the client connection has gone
// away.
//
// CloseNotify may wait to notify until Request.Body has been
// fully read.
//
// After the Handler has returned, there is no guarantee
// that the channel receives a value.
//
// If the protocol is HTTP/1.1 and CloseNotify is called while
// processing an idempotent request (such a GET) while
// HTTP/1.1 pipelining is in use, the arrival of a subsequent
// pipelined request may cause a value to be sent on the
// returned channel. In practice HTTP/1.1 pipelining is not
// enabled in browsers and not seen often in the wild. If this
// is a problem, use HTTP/2 or only use CloseNotify on methods
// such as POST.
CloseNotify() <-chan bool
}
var (
// ServerContextKey is a context key. It can be used in HTTP
// handlers with Context.Value to access the server that
// started the handler. The associated value will be of
// type *Server.
ServerContextKey = &contextKey{"http-server"}
// LocalAddrContextKey is a context key. It can be used in
// HTTP handlers with Context.Value to access the local
// address the connection arrived on.
// The associated value will be of type net.Addr.
LocalAddrContextKey = &contextKey{"local-addr"}
)
// A conn represents the server side of an HTTP connection.
type conn struct {
// server is the server on which the connection arrived.
// Immutable; never nil.
server *Server
// cancelCtx cancels the connection-level context.
cancelCtx context.CancelFunc
// rwc is the underlying network connection.
// This is never wrapped by other types and is the value given out
// to CloseNotifier callers. It is usually of type *net.TCPConn or
// *tls.Conn.
rwc net.Conn
// remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously
// inside the Listener's Accept goroutine, as some implementations block.
// It is populated immediately inside the (*conn).serve goroutine.
// This is the value of a Handler's (*Request).RemoteAddr.
remoteAddr string
// tlsState is the TLS connection state when using TLS.
// nil means not TLS.
tlsState *tls.ConnectionState
// werr is set to the first write error to rwc.
// It is set via checkConnErrorWriter{w}, where bufw writes.
werr error
// r is bufr's read source. It's a wrapper around rwc that provides
// io.LimitedReader-style limiting (while reading request headers)
// and functionality to support CloseNotifier. See *connReader docs.
r *connReader
// bufr reads from r.
bufr *bufio.Reader
// bufw writes to checkConnErrorWriter{c}, which populates werr on error.
bufw *bufio.Writer
// lastMethod is the method of the most recent request
// on this connection, if any.
lastMethod string
curReq atomic.Value // of *response (which has a Request in it)
curState struct{ atomic uint64 } // packed (unixtime<<8|uint8(ConnState))
// mu guards hijackedv
mu sync.Mutex
// hijackedv is whether this connection has been hijacked
// by a Handler with the Hijacker interface.
// It is guarded by mu.
hijackedv bool
}
func (c *conn) hijacked() bool {
c.mu.Lock()
defer c.mu.Unlock()
return c.hijackedv
}
// c.mu must be held.
func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
if c.hijackedv {
return nil, nil, ErrHijacked
}
c.r.abortPendingRead()
c.hijackedv = true
rwc = c.rwc
rwc.SetDeadline(time.Time{})
buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc))
if c.r.hasByte {
if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil {
return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err)
}
}
c.setState(rwc, StateHijacked)
return
}
// This should be >= 512 bytes for DetectContentType,
// but otherwise it's somewhat arbitrary.
const bufferBeforeChunkingSize = 2048
// chunkWriter writes to a response's conn buffer, and is the writer
// wrapped by the response.bufw buffered writer.
//
// chunkWriter also is responsible for finalizing the Header, including
// conditionally setting the Content-Type and setting a Content-Length
// in cases where the handler's final output is smaller than the buffer
// size. It also conditionally adds chunk headers, when in chunking mode.
//
// See the comment above (*response).Write for the entire write flow.
type chunkWriter struct {
res *response
// header is either nil or a deep clone of res.handlerHeader
// at the time of res.writeHeader, if res.writeHeader is
// called and extra buffering is being done to calculate
// Content-Type and/or Content-Length.
header Header
// wroteHeader tells whether the header's been written to "the
// wire" (or rather: w.conn.buf). this is unlike
// (*response).wroteHeader, which tells only whether it was
// logically written.
wroteHeader bool
// set by the writeHeader method:
chunking bool // using chunked transfer encoding for reply body
}
var (
crlf = []byte("\r\n")
colonSpace = []byte(": ")
)
func (cw *chunkWriter) Write(p []byte) (n int, err error) {
if !cw.wroteHeader {
cw.writeHeader(p)
}
if cw.res.req.Method == "HEAD" {
// Eat writes.
return len(p), nil
}
if cw.chunking {
_, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p))
if err != nil {
cw.res.conn.rwc.Close()
return
}
}
n, err = cw.res.conn.bufw.Write(p)
if cw.chunking && err == nil {
_, err = cw.res.conn.bufw.Write(crlf)
}
if err != nil {
cw.res.conn.rwc.Close()
}
return
}
func (cw *chunkWriter) flush() {
if !cw.wroteHeader {
cw.writeHeader(nil)
}
cw.res.conn.bufw.Flush()
}
func (cw *chunkWriter) close() {
if !cw.wroteHeader {
cw.writeHeader(nil)
}
if cw.chunking {
bw := cw.res.conn.bufw // conn's bufio writer
// zero chunk to mark EOF
bw.WriteString("0\r\n")
if trailers := cw.res.finalTrailers(); trailers != nil {
trailers.Write(bw) // the writer handles noting errors
}
// final blank line after the trailers (whether
// present or not)
bw.WriteString("\r\n")
}
}
// A response represents the server side of an HTTP response.
type response struct {
conn *conn
req *Request // request for this response
reqBody io.ReadCloser
cancelCtx context.CancelFunc // when ServeHTTP exits
wroteHeader bool // reply header has been (logically) written
wroteContinue bool // 100 Continue response was written
wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive"
wantsClose bool // HTTP request has Connection "close"
w *bufio.Writer // buffers output in chunks to chunkWriter
cw chunkWriter
// handlerHeader is the Header that Handlers get access to,
// which may be retained and mutated even after WriteHeader.
// handlerHeader is copied into cw.header at WriteHeader
// time, and privately mutated thereafter.
handlerHeader Header
calledHeader bool // handler accessed handlerHeader via Header
written int64 // number of bytes written in body
contentLength int64 // explicitly-declared Content-Length; or -1
status int // status code passed to WriteHeader
// close connection after this reply. set on request and
// updated after response from handler if there's a
// "Connection: keep-alive" response header and a
// Content-Length.
closeAfterReply bool
// requestBodyLimitHit is set by requestTooLarge when
// maxBytesReader hits its max size. It is checked in
// WriteHeader, to make sure we don't consume the
// remaining request body to try to advance to the next HTTP
// request. Instead, when this is set, we stop reading
// subsequent requests on this connection and stop reading
// input from it.
requestBodyLimitHit bool
// trailers are the headers to be sent after the handler
// finishes writing the body. This field is initialized from
// the Trailer response header when the response header is
// written.
trailers []string
handlerDone atomicBool // set true when the handler exits
// Buffers for Date, Content-Length, and status code
dateBuf [len(TimeFormat)]byte
clenBuf [10]byte
statusBuf [3]byte
// closeNotifyCh is the channel returned by CloseNotify.
// TODO(bradfitz): this is currently (for Go 1.8) always
// non-nil. Make this lazily-created again as it used to be?
closeNotifyCh chan bool
didCloseNotify int32 // atomic (only 0->1 winner should send)
}
// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
// that, if present, signals that the map entry is actually for
// the response trailers, and not the response headers. The prefix
// is stripped after the ServeHTTP call finishes and the values are
// sent in the trailers.
//
// This mechanism is intended only for trailers that are not known
// prior to the headers being written. If the set of trailers is fixed
// or known before the header is written, the normal Go trailers mechanism
// is preferred:
// https://golang.org/pkg/net/http/#ResponseWriter
// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
const TrailerPrefix = "Trailer:"
// finalTrailers is called after the Handler exits and returns a non-nil
// value if the Handler set any trailers.
func (w *response) finalTrailers() Header {
var t Header
for k, vv := range w.handlerHeader {
if strings.HasPrefix(k, TrailerPrefix) {
if t == nil {
t = make(Header)
}
t[strings.TrimPrefix(k, TrailerPrefix)] = vv
}
}
for _, k := range w.trailers {
if t == nil {
t = make(Header)
}
for _, v := range w.handlerHeader[k] {
t.Add(k, v)
}
}
return t
}
type atomicBool int32
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
// declareTrailer is called for each Trailer header when the
// response header is written. It notes that a header will need to be
// written in the trailers at the end of the response.
func (w *response) declareTrailer(k string) {
k = CanonicalHeaderKey(k)
if !httpguts.ValidTrailerHeader(k) {
// Forbidden by RFC 7230, section 4.1.2
return
}
w.trailers = append(w.trailers, k)
}
// requestTooLarge is called by maxBytesReader when too much input has
// been read from the client.
func (w *response) requestTooLarge() {
w.closeAfterReply = true
w.requestBodyLimitHit = true
if !w.wroteHeader {
w.Header().Set("Connection", "close")
}
}
// needsSniff reports whether a Content-Type still needs to be sniffed.
func (w *response) needsSniff() bool {
_, haveType := w.handlerHeader["Content-Type"]
return !w.cw.wroteHeader && !haveType && w.written < sniffLen
}
// writerOnly hides an io.Writer value's optional ReadFrom method
// from io.Copy.
type writerOnly struct {
io.Writer
}
func srcIsRegularFile(src io.Reader) (isRegular bool, err error) {
switch v := src.(type) {
case *os.File:
fi, err := v.Stat()
if err != nil {
return false, err
}
return fi.Mode().IsRegular(), nil
case *io.LimitedReader:
return srcIsRegularFile(v.R)
default:
return
}
}
// ReadFrom is here to optimize copying from an *os.File regular file
// to a *net.TCPConn with sendfile.
func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
// Our underlying w.conn.rwc is usually a *TCPConn (with its
// own ReadFrom method). If not, or if our src isn't a regular
// file, just fall back to the normal copy method.
rf, ok := w.conn.rwc.(io.ReaderFrom)
regFile, err := srcIsRegularFile(src)
if err != nil {
return 0, err
}
if !ok || !regFile {
bufp := copyBufPool.Get().(*[]byte)
defer copyBufPool.Put(bufp)
return io.CopyBuffer(writerOnly{w}, src, *bufp)
}
// sendfile path:
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
if w.needsSniff() {
n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen))
n += n0
if err != nil {
return n, err
}
}
w.w.Flush() // get rid of any previous writes
w.cw.flush() // make sure Header is written; flush data to rwc
// Now that cw has been flushed, its chunking field is guaranteed initialized.
if !w.cw.chunking && w.bodyAllowed() {
n0, err := rf.ReadFrom(src)
n += n0
w.written += n0
return n, err
}
n0, err := io.Copy(writerOnly{w}, src)
n += n0
return n, err
}
// debugServerConnections controls whether all server connections are wrapped
// with a verbose logging wrapper.
const debugServerConnections = false
// Create new connection from rwc.
func (srv *Server) newConn(rwc net.Conn) *conn {
c := &conn{
server: srv,
rwc: rwc,
}
if debugServerConnections {
c.rwc = newLoggingConn("server", c.rwc)
}
return c
}
type readResult struct {
n int
err error
b byte // byte read, if n == 1
}
// connReader is the io.Reader wrapper used by *conn. It combines a
// selectively-activated io.LimitedReader (to bound request header
// read sizes) with support for selectively keeping an io.Reader.Read
// call blocked in a background goroutine to wait for activity and
// trigger a CloseNotifier channel.
type connReader struct {
conn *conn
mu sync.Mutex // guards following
hasByte bool
byteBuf [1]byte
cond *sync.Cond
inRead bool
aborted bool // set true before conn.rwc deadline is set to past
remain int64 // bytes remaining
}
func (cr *connReader) lock() {
cr.mu.Lock()
if cr.cond == nil {
cr.cond = sync.NewCond(&cr.mu)
}
}
func (cr *connReader) unlock() { cr.mu.Unlock() }
func (cr *connReader) startBackgroundRead() {
cr.lock()
defer cr.unlock()
if cr.inRead {
panic("invalid concurrent Body.Read call")
}
if cr.hasByte {
return
}
cr.inRead = true
cr.conn.rwc.SetReadDeadline(time.Time{})
go cr.backgroundRead()
}
func (cr *connReader) backgroundRead() {
n, err := cr.conn.rwc.Read(cr.byteBuf[:])
cr.lock()
if n == 1 {
cr.hasByte = true
// We were past the end of the previous request's body already
// (since we wouldn't be in a background read otherwise), so
// this is a pipelined HTTP request. Prior to Go 1.11 we used to
// send on the CloseNotify channel and cancel the context here,
// but the behavior was documented as only "may", and we only
// did that because that's how CloseNotify accidentally behaved
// in very early Go releases prior to context support. Once we
// added context support, people used a Handler's
// Request.Context() and passed it along. Having that context
// cancel on pipelined HTTP requests caused problems.
// Fortunately, almost nothing uses HTTP/1.x pipelining.
// Unfortunately, apt-get does, or sometimes does.
// New Go 1.11 behavior: don't fire CloseNotify or cancel
// contexts on pipelined requests. Shouldn't affect people, but
// fixes cases like Issue 23921. This does mean that a client
// closing their TCP connection after sending a pipelined
// request won't cancel the context, but we'll catch that on any
// write failure (in checkConnErrorWriter.Write).
// If the server never writes, yes, there are still contrived
// server & client behaviors where this fails to ever cancel the
// context, but that's kinda why HTTP/1.x pipelining died
// anyway.
}
if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() {
// Ignore this error. It's the expected error from
// another goroutine calling abortPendingRead.
} else if err != nil {
cr.handleReadError(err)
}
cr.aborted = false
cr.inRead = false
cr.unlock()
cr.cond.Broadcast()
}
func (cr *connReader) abortPendingRead() {
cr.lock()
defer cr.unlock()
if !cr.inRead {
return
}
cr.aborted = true
cr.conn.rwc.SetReadDeadline(aLongTimeAgo)
for cr.inRead {
cr.cond.Wait()
}
cr.conn.rwc.SetReadDeadline(time.Time{})
}
func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain }
func (cr *connReader) setInfiniteReadLimit() { cr.remain = maxInt64 }
func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 }
// handleReadError is called whenever a Read from the client returns a
// non-nil error.
//
// The provided non-nil err is almost always io.EOF or a "use of
// closed network connection". In any case, the error is not
// particularly interesting, except perhaps for debugging during
// development. Any error means the connection is dead and we should
// down its context.
//
// It may be called from multiple goroutines.
func (cr *connReader) handleReadError(_ error) {
cr.conn.cancelCtx()
cr.closeNotify()
}
// may be called from multiple goroutines.
func (cr *connReader) closeNotify() {
res, _ := cr.conn.curReq.Load().(*response)
if res != nil && atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) {
res.closeNotifyCh <- true
}
}
func (cr *connReader) Read(p []byte) (n int, err error) {
cr.lock()
if cr.inRead {
cr.unlock()
if cr.conn.hijacked() {
panic("invalid Body.Read call. After hijacked, the original Request must not be used")
}
panic("invalid concurrent Body.Read call")
}
if cr.hitReadLimit() {
cr.unlock()
return 0, io.EOF
}
if len(p) == 0 {
cr.unlock()
return 0, nil
}
if int64(len(p)) > cr.remain {
p = p[:cr.remain]
}
if cr.hasByte {
p[0] = cr.byteBuf[0]
cr.hasByte = false
cr.unlock()
return 1, nil
}
cr.inRead = true
cr.unlock()
n, err = cr.conn.rwc.Read(p)
cr.lock()
cr.inRead = false
if err != nil {
cr.handleReadError(err)
}
cr.remain -= int64(n)
cr.unlock()
cr.cond.Broadcast()
return n, err
}
var (
bufioReaderPool sync.Pool
bufioWriter2kPool sync.Pool
bufioWriter4kPool sync.Pool
)
var copyBufPool = sync.Pool{
New: func() interface{} {
b := make([]byte, 32*1024)
return &b
},
}
func bufioWriterPool(size int) *sync.Pool {
switch size {
case 2 << 10:
return &bufioWriter2kPool
case 4 << 10:
return &bufioWriter4kPool
}
return nil
}
func newBufioReader(r io.Reader) *bufio.Reader {
if v := bufioReaderPool.Get(); v != nil {
br := v.(*bufio.Reader)
br.Reset(r)
return br
}
// Note: if this reader size is ever changed, update
// TestHandlerBodyClose's assumptions.
return bufio.NewReader(r)
}
func putBufioReader(br *bufio.Reader) {
br.Reset(nil)
bufioReaderPool.Put(br)
}
func newBufioWriterSize(w io.Writer, size int) *bufio.Writer {
pool := bufioWriterPool(size)
if pool != nil {
if v := pool.Get(); v != nil {
bw := v.(*bufio.Writer)
bw.Reset(w)
return bw
}
}
return bufio.NewWriterSize(w, size)
}
func putBufioWriter(bw *bufio.Writer) {
bw.Reset(nil)
if pool := bufioWriterPool(bw.Available()); pool != nil {
pool.Put(bw)
}
}
// DefaultMaxHeaderBytes is the maximum permitted size of the headers
// in an HTTP request.
// This can be overridden by setting Server.MaxHeaderBytes.
const DefaultMaxHeaderBytes = 1 << 20 // 1 MB
func (srv *Server) maxHeaderBytes() int {
if srv.MaxHeaderBytes > 0 {
return srv.MaxHeaderBytes
}
return DefaultMaxHeaderBytes
}
func (srv *Server) initialReadLimitSize() int64 {
return int64(srv.maxHeaderBytes()) + 4096 // bufio slop
}
// wrapper around io.ReadCloser which on first read, sends an
// HTTP/1.1 100 Continue header
type expectContinueReader struct {
resp *response
readCloser io.ReadCloser
closed bool
sawEOF bool
}
func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
if ecr.closed {
return 0, ErrBodyReadAfterClose
}
if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() {
ecr.resp.wroteContinue = true
ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
ecr.resp.conn.bufw.Flush()
}
n, err = ecr.readCloser.Read(p)
if err == io.EOF {
ecr.sawEOF = true
}
return
}
func (ecr *expectContinueReader) Close() error {
ecr.closed = true
return ecr.readCloser.Close()
}
// TimeFormat is the time format to use when generating times in HTTP
// headers. It is like time.RFC1123 but hard-codes GMT as the time
// zone. The time being formatted must be in UTC for Format to
// generate the correct format.
//
// For parsing this time format, see ParseTime.
const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
// appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat))
func appendTime(b []byte, t time.Time) []byte {
const days = "SunMonTueWedThuFriSat"
const months = "JanFebMarAprMayJunJulAugSepOctNovDec"
t = t.UTC()
yy, mm, dd := t.Date()
hh, mn, ss := t.Clock()
day := days[3*t.Weekday():]
mon := months[3*(mm-1):]
return append(b,
day[0], day[1], day[2], ',', ' ',
byte('0'+dd/10), byte('0'+dd%10), ' ',
mon[0], mon[1], mon[2], ' ',
byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ',
byte('0'+hh/10), byte('0'+hh%10), ':',
byte('0'+mn/10), byte('0'+mn%10), ':',
byte('0'+ss/10), byte('0'+ss%10), ' ',
'G', 'M', 'T')
}
var errTooLarge = errors.New("http: request too large")
// Read next request from connection.
func (c *conn) readRequest(ctx context.Context) (w *response, err error) {
if c.hijacked() {
return nil, ErrHijacked
}
var (
wholeReqDeadline time.Time // or zero if none
hdrDeadline time.Time // or zero if none
)
t0 := time.Now()
if d := c.server.readHeaderTimeout(); d != 0 {
hdrDeadline = t0.Add(d)
}
if d := c.server.ReadTimeout; d != 0 {
wholeReqDeadline = t0.Add(d)
}
c.rwc.SetReadDeadline(hdrDeadline)
if d := c.server.WriteTimeout; d != 0 {
defer func() {
c.rwc.SetWriteDeadline(time.Now().Add(d))
}()
}
c.r.setReadLimit(c.server.initialReadLimitSize())
if c.lastMethod == "POST" {
// RFC 7230 section 3 tolerance for old buggy clients.
peek, _ := c.bufr.Peek(4) // ReadRequest will get err below
c.bufr.Discard(numLeadingCRorLF(peek))
}
req, err := readRequest(c.bufr, keepHostHeader)
if err != nil {
if c.r.hitReadLimit() {
return nil, errTooLarge
}
return nil, err
}
if !http1ServerSupportsRequest(req) {
return nil, badRequestError("unsupported protocol version")
}
c.lastMethod = req.Method
c.r.setInfiniteReadLimit()
hosts, haveHost := req.Header["Host"]
isH2Upgrade := req.isH2Upgrade()
if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" {
return nil, badRequestError("missing required Host header")
}
if len(hosts) > 1 {
return nil, badRequestError("too many Host headers")
}
if len(hosts) == 1 && !httpguts.ValidHostHeader(hosts[0]) {
return nil, badRequestError("malformed Host header")
}
for k, vv := range req.Header {
if !httpguts.ValidHeaderFieldName(k) {
return nil, badRequestError("invalid header name")
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
return nil, badRequestError("invalid header value")
}
}
}
delete(req.Header, "Host")
ctx, cancelCtx := context.WithCancel(ctx)
req.ctx = ctx
req.RemoteAddr = c.remoteAddr
req.TLS = c.tlsState
if body, ok := req.Body.(*body); ok {
body.doEarlyClose = true
}
// Adjust the read deadline if necessary.
if !hdrDeadline.Equal(wholeReqDeadline) {
c.rwc.SetReadDeadline(wholeReqDeadline)
}
w = &response{
conn: c,
cancelCtx: cancelCtx,
req: req,
reqBody: req.Body,
handlerHeader: make(Header),
contentLength: -1,
closeNotifyCh: make(chan bool, 1),
// We populate these ahead of time so we're not
// reading from req.Header after their Handler starts
// and maybe mutates it (Issue 14940)
wants10KeepAlive: req.wantsHttp10KeepAlive(),
wantsClose: req.wantsClose(),
}
if isH2Upgrade {
w.closeAfterReply = true
}
w.cw.res = w
w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize)
return w, nil
}
// http1ServerSupportsRequest reports whether Go's HTTP/1.x server
// supports the given request.
func http1ServerSupportsRequest(req *Request) bool {
if req.ProtoMajor == 1 {
return true
}
// Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can
// wire up their own HTTP/2 upgrades.
if req.ProtoMajor == 2 && req.ProtoMinor == 0 &&
req.Method == "PRI" && req.RequestURI == "*" {
return true
}
// Reject HTTP/0.x, and all other HTTP/2+ requests (which
// aren't encoded in ASCII anyway).
return false
}
func (w *response) Header() Header {
if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader {
// Accessing the header between logically writing it
// and physically writing it means we need to allocate
// a clone to snapshot the logically written state.
w.cw.header = w.handlerHeader.Clone()
}
w.calledHeader = true
return w.handlerHeader
}
// maxPostHandlerReadBytes is the max number of Request.Body bytes not
// consumed by a handler that the server will read from the client
// in order to keep a connection alive. If there are more bytes than
// this then the server to be paranoid instead sends a "Connection:
// close" response.
//
// This number is approximately what a typical machine's TCP buffer
// size is anyway. (if we have the bytes on the machine, we might as
// well read them)
const maxPostHandlerReadBytes = 256 << 10
func checkWriteHeaderCode(code int) {
// Issue 22880: require valid WriteHeader status codes.
// For now we only enforce that it's three digits.
// In the future we might block things over 599 (600 and above aren't defined
// at https://httpwg.org/specs/rfc7231.html#status.codes)
// and we might block under 200 (once we have more mature 1xx support).
// But for now any three digits.
//
// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
// no equivalent bogus thing we can realistically send in HTTP/2,
// so we'll consistently panic instead and help people find their bugs
// early. (We can't return an error from WriteHeader even if we wanted to.)
if code < 100 || code > 999 {
panic(fmt.Sprintf("invalid WriteHeader code %v", code))
}
}
// relevantCaller searches the call stack for the first function outside of net/http.
// The purpose of this function is to provide more helpful error messages.
func relevantCaller() runtime.Frame {
pc := make([]uintptr, 16)
n := runtime.Callers(1, pc)
frames := runtime.CallersFrames(pc[:n])
var frame runtime.Frame
for {
frame, more := frames.Next()
if !strings.HasPrefix(frame.Function, "net/http.") {
return frame
}
if !more {
break
}
}
return frame
}
func (w *response) WriteHeader(code int) {
if w.conn.hijacked() {
caller := relevantCaller()
w.conn.server.logf("http: response.WriteHeader on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
return
}
if w.wroteHeader {
caller := relevantCaller()
w.conn.server.logf("http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
return
}
checkWriteHeaderCode(code)
w.wroteHeader = true
w.status = code
if w.calledHeader && w.cw.header == nil {
w.cw.header = w.handlerHeader.Clone()
}
if cl := w.handlerHeader.get("Content-Length"); cl != "" {
v, err := strconv.ParseInt(cl, 10, 64)
if err == nil && v >= 0 {
w.contentLength = v
} else {
w.conn.server.logf("http: invalid Content-Length of %q", cl)
w.handlerHeader.Del("Content-Length")
}
}
}
// extraHeader is the set of headers sometimes added by chunkWriter.writeHeader.
// This type is used to avoid extra allocations from cloning and/or populating
// the response Header map and all its 1-element slices.
type extraHeader struct {
contentType string
connection string
transferEncoding string
date []byte // written if not nil
contentLength []byte // written if not nil
}
// Sorted the same as extraHeader.Write's loop.
var extraHeaderKeys = [][]byte{
[]byte("Content-Type"),
[]byte("Connection"),
[]byte("Transfer-Encoding"),
}
var (
headerContentLength = []byte("Content-Length: ")
headerDate = []byte("Date: ")
)
// Write writes the headers described in h to w.
//
// This method has a value receiver, despite the somewhat large size
// of h, because it prevents an allocation. The escape analysis isn't
// smart enough to realize this function doesn't mutate h.
func (h extraHeader) Write(w *bufio.Writer) {
if h.date != nil {
w.Write(headerDate)
w.Write(h.date)
w.Write(crlf)
}
if h.contentLength != nil {
w.Write(headerContentLength)
w.Write(h.contentLength)
w.Write(crlf)
}
for i, v := range []string{h.contentType, h.connection, h.transferEncoding} {
if v != "" {
w.Write(extraHeaderKeys[i])
w.Write(colonSpace)
w.WriteString(v)
w.Write(crlf)
}
}
}
// writeHeader finalizes the header sent to the client and writes it
// to cw.res.conn.bufw.
//
// p is not written by writeHeader, but is the first chunk of the body
// that will be written. It is sniffed for a Content-Type if none is
// set explicitly. It's also used to set the Content-Length, if the
// total body size was small and the handler has already finished
// running.
func (cw *chunkWriter) writeHeader(p []byte) {
if cw.wroteHeader {
return
}
cw.wroteHeader = true
w := cw.res
keepAlivesEnabled := w.conn.server.doKeepAlives()
isHEAD := w.req.Method == "HEAD"
// header is written out to w.conn.buf below. Depending on the
// state of the handler, we either own the map or not. If we
// don't own it, the exclude map is created lazily for
// WriteSubset to remove headers. The setHeader struct holds
// headers we need to add.
header := cw.header
owned := header != nil
if !owned {
header = w.handlerHeader
}
var excludeHeader map[string]bool
delHeader := func(key string) {
if owned {
header.Del(key)
return
}
if _, ok := header[key]; !ok {
return
}
if excludeHeader == nil {
excludeHeader = make(map[string]bool)
}
excludeHeader[key] = true
}
var setHeader extraHeader
// Don't write out the fake "Trailer:foo" keys. See TrailerPrefix.
trailers := false
for k := range cw.header {
if strings.HasPrefix(k, TrailerPrefix) {
if excludeHeader == nil {
excludeHeader = make(map[string]bool)
}
excludeHeader[k] = true
trailers = true
}
}
for _, v := range cw.header["Trailer"] {
trailers = true
foreachHeaderElement(v, cw.res.declareTrailer)
}
te := header.get("Transfer-Encoding")
hasTE := te != ""
// If the handler is done but never sent a Content-Length
// response header and this is our first (and last) write, set
// it, even to zero. This helps HTTP/1.0 clients keep their
// "keep-alive" connections alive.
// Exceptions: 304/204/1xx responses never get Content-Length, and if
// it was a HEAD request, we don't know the difference between
// 0 actual bytes and 0 bytes because the handler noticed it
// was a HEAD request and chose not to write anything. So for
// HEAD, the handler should either write the Content-Length or
// write non-zero bytes. If it's actually 0 bytes and the
// handler never looked at the Request.Method, we just don't
// send a Content-Length header.
// Further, we don't send an automatic Content-Length if they
// set a Transfer-Encoding, because they're generally incompatible.
if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) {
w.contentLength = int64(len(p))
setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10)
}
// If this was an HTTP/1.0 request with keep-alive and we sent a
// Content-Length back, we can make this a keep-alive response ...
if w.wants10KeepAlive && keepAlivesEnabled {
sentLength := header.get("Content-Length") != ""
if sentLength && header.get("Connection") == "keep-alive" {
w.closeAfterReply = false
}
}
// Check for an explicit (and valid) Content-Length header.
hasCL := w.contentLength != -1
if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) {
_, connectionHeaderSet := header["Connection"]
if !connectionHeaderSet {
setHeader.connection = "keep-alive"
}
} else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose {
w.closeAfterReply = true
}
if header.get("Connection") == "close" || !keepAlivesEnabled {
w.closeAfterReply = true
}
// If the client wanted a 100-continue but we never sent it to
// them (or, more strictly: we never finished reading their
// request body), don't reuse this connection because it's now
// in an unknown state: we might be sending this response at
// the same time the client is now sending its request body
// after a timeout. (Some HTTP clients send Expect:
// 100-continue but knowing that some servers don't support
// it, the clients set a timer and send the body later anyway)
// If we haven't seen EOF, we can't skip over the unread body
// because we don't know if the next bytes on the wire will be
// the body-following-the-timer or the subsequent request.
// See Issue 11549.
if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF {
w.closeAfterReply = true
}
// Per RFC 2616, we should consume the request body before
// replying, if the handler hasn't already done so. But we
// don't want to do an unbounded amount of reading here for
// DoS reasons, so we only try up to a threshold.
// TODO(bradfitz): where does RFC 2616 say that? See Issue 15527
// about HTTP/1.x Handlers concurrently reading and writing, like
// HTTP/2 handlers can do. Maybe this code should be relaxed?
if w.req.ContentLength != 0 && !w.closeAfterReply {
var discard, tooBig bool
switch bdy := w.req.Body.(type) {
case *expectContinueReader:
if bdy.resp.wroteContinue {
discard = true
}
case *body:
bdy.mu.Lock()
switch {
case bdy.closed:
if !bdy.sawEOF {
// Body was closed in handler with non-EOF error.
w.closeAfterReply = true
}
case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes:
tooBig = true
default:
discard = true
}
bdy.mu.Unlock()
default:
discard = true
}
if discard {
_, err := io.CopyN(ioutil.Discard, w.reqBody, maxPostHandlerReadBytes+1)
switch err {
case nil:
// There must be even more data left over.
tooBig = true
case ErrBodyReadAfterClose:
// Body was already consumed and closed.
case io.EOF:
// The remaining body was just consumed, close it.
err = w.reqBody.Close()
if err != nil {
w.closeAfterReply = true
}
default:
// Some other kind of error occurred, like a read timeout, or
// corrupt chunked encoding. In any case, whatever remains
// on the wire must not be parsed as another HTTP request.
w.closeAfterReply = true
}
}
if tooBig {
w.requestTooLarge()
delHeader("Connection")
setHeader.connection = "close"
}
}
code := w.status
if bodyAllowedForStatus(code) {
// If no content type, apply sniffing algorithm to body.
_, haveType := header["Content-Type"]
// If the Content-Encoding was set and is non-blank,
// we shouldn't sniff the body. See Issue 31753.
ce := header.Get("Content-Encoding")
hasCE := len(ce) > 0
if !hasCE && !haveType && !hasTE && len(p) > 0 {
setHeader.contentType = DetectContentType(p)
}
} else {
for _, k := range suppressedHeaders(code) {
delHeader(k)
}
}
if !header.has("Date") {
setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now())
}
if hasCL && hasTE && te != "identity" {
// TODO: return an error if WriteHeader gets a return parameter
// For now just ignore the Content-Length.
w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d",
te, w.contentLength)
delHeader("Content-Length")
hasCL = false
}
if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) {
// do nothing
} else if code == StatusNoContent {
delHeader("Transfer-Encoding")
} else if hasCL {
delHeader("Transfer-Encoding")
} else if w.req.ProtoAtLeast(1, 1) {
// HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no
// content-length has been provided. The connection must be closed after the
// reply is written, and no chunking is to be done. This is the setup
// recommended in the Server-Sent Events candidate recommendation 11,
// section 8.
if hasTE && te == "identity" {
cw.chunking = false
w.closeAfterReply = true
} else {
// HTTP/1.1 or greater: use chunked transfer encoding
// to avoid closing the connection at EOF.
cw.chunking = true
setHeader.transferEncoding = "chunked"
if hasTE && te == "chunked" {
// We will send the chunked Transfer-Encoding header later.
delHeader("Transfer-Encoding")
}
}
} else {
// HTTP version < 1.1: cannot do chunked transfer
// encoding and we don't know the Content-Length so
// signal EOF by closing connection.
w.closeAfterReply = true
delHeader("Transfer-Encoding") // in case already set
}
// Cannot use Content-Length with non-identity Transfer-Encoding.
if cw.chunking {
delHeader("Content-Length")
}
if !w.req.ProtoAtLeast(1, 0) {
return
}
if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) {
delHeader("Connection")
if w.req.ProtoAtLeast(1, 1) {
setHeader.connection = "close"
}
}
writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:])
cw.header.WriteSubset(w.conn.bufw, excludeHeader)
setHeader.Write(w.conn.bufw)
w.conn.bufw.Write(crlf)
}
// foreachHeaderElement splits v according to the "#rule" construction
// in RFC 7230 section 7 and calls fn for each non-empty element.
func foreachHeaderElement(v string, fn func(string)) {
v = textproto.TrimString(v)
if v == "" {
return
}
if !strings.Contains(v, ",") {
fn(v)
return
}
for _, f := range strings.Split(v, ",") {
if f = textproto.TrimString(f); f != "" {
fn(f)
}
}
}
// writeStatusLine writes an HTTP/1.x Status-Line (RFC 7230 Section 3.1.2)
// to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0.
// code is the response status code.
// scratch is an optional scratch buffer. If it has at least capacity 3, it's used.
func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) {
if is11 {
bw.WriteString("HTTP/1.1 ")
} else {
bw.WriteString("HTTP/1.0 ")
}
var text string
var ok bool
if customSingleResponseStatus {
code, text, ok = getStatusText(code)
} else {
text, ok = statusText[code]
}
if ok {
bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10))
bw.WriteByte(' ')
bw.WriteString(text)
bw.WriteString("\r\n")
} else {
// don't worry about performance
fmt.Fprintf(bw, "%03d status code %d\r\n", code, code)
}
}
// bodyAllowed reports whether a Write is allowed for this response type.
// It's illegal to call this before the header has been flushed.
func (w *response) bodyAllowed() bool {
if !w.wroteHeader {
panic("")
}
return bodyAllowedForStatus(w.status)
}
// The Life Of A Write is like this:
//
// Handler starts. No header has been sent. The handler can either
// write a header, or just start writing. Writing before sending a header
// sends an implicitly empty 200 OK header.
//
// If the handler didn't declare a Content-Length up front, we either
// go into chunking mode or, if the handler finishes running before
// the chunking buffer size, we compute a Content-Length and send that
// in the header instead.
//
// Likewise, if the handler didn't set a Content-Type, we sniff that
// from the initial chunk of output.
//
// The Writers are wired together like:
//
// 1. *response (the ResponseWriter) ->
// 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes
// 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type)
// and which writes the chunk headers, if needed.
// 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to ->
// 5. checkConnErrorWriter{c}, which notes any non-nil error on Write
// and populates c.werr with it if so. but otherwise writes to:
// 6. the rwc, the net.Conn.
//
// TODO(bradfitz): short-circuit some of the buffering when the
// initial header contains both a Content-Type and Content-Length.
// Also short-circuit in (1) when the header's been sent and not in
// chunking mode, writing directly to (4) instead, if (2) has no
// buffered data. More generally, we could short-circuit from (1) to
// (3) even in chunking mode if the write size from (1) is over some
// threshold and nothing is in (2). The answer might be mostly making
// bufferBeforeChunkingSize smaller and having bufio's fast-paths deal
// with this instead.
func (w *response) Write(data []byte) (n int, err error) {
return w.write(len(data), data, "")
}
func (w *response) WriteString(data string) (n int, err error) {
return w.write(len(data), nil, data)
}
// either dataB or dataS is non-zero.
func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) {
if w.conn.hijacked() {
if lenData > 0 {
caller := relevantCaller()
w.conn.server.logf("http: response.Write on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
}
return 0, ErrHijacked
}
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
if lenData == 0 {
return 0, nil
}
if !w.bodyAllowed() {
return 0, ErrBodyNotAllowed
}
w.written += int64(lenData) // ignoring errors, for errorKludge
if w.contentLength != -1 && w.written > w.contentLength {
return 0, ErrContentLength
}
if dataB != nil {
return w.w.Write(dataB)
} else {
return w.w.WriteString(dataS)
}
}
func (w *response) finishRequest() {
w.handlerDone.setTrue()
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
w.w.Flush()
putBufioWriter(w.w)
w.cw.close()
w.conn.bufw.Flush()
w.conn.r.abortPendingRead()
// Close the body (regardless of w.closeAfterReply) so we can
// re-use its bufio.Reader later safely.
w.reqBody.Close()
if w.req.MultipartForm != nil {
w.req.MultipartForm.RemoveAll()
}
}
// shouldReuseConnection reports whether the underlying TCP connection can be reused.
// It must only be called after the handler is done executing.
func (w *response) shouldReuseConnection() bool {
if w.closeAfterReply {
// The request or something set while executing the
// handler indicated we shouldn't reuse this
// connection.
return false
}
if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written {
// Did not write enough. Avoid getting out of sync.
return false
}
// There was some error writing to the underlying connection
// during the request, so don't re-use this conn.
if w.conn.werr != nil {
return false
}
if w.closedRequestBodyEarly() {
return false
}
return true
}
func (w *response) closedRequestBodyEarly() bool {
body, ok := w.req.Body.(*body)
return ok && body.didEarlyClose()
}
func (w *response) Flush() {
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
w.w.Flush()
w.cw.flush()
}
func (c *conn) finalFlush() {
if c.bufr != nil {
// Steal the bufio.Reader (~4KB worth of memory) and its associated
// reader for a future connection.
putBufioReader(c.bufr)
c.bufr = nil
}
if c.bufw != nil {
c.bufw.Flush()
// Steal the bufio.Writer (~4KB worth of memory) and its associated
// writer for a future connection.
putBufioWriter(c.bufw)
c.bufw = nil
}
}
// Close the connection.
func (c *conn) close() {
c.finalFlush()
c.rwc.Close()
}
// rstAvoidanceDelay is the amount of time we sleep after closing the
// write side of a TCP connection before closing the entire socket.
// By sleeping, we increase the chances that the client sees our FIN
// and processes its final data before they process the subsequent RST
// from closing a connection with known unread data.
// This RST seems to occur mostly on BSD systems. (And Windows?)
// This timeout is somewhat arbitrary (~latency around the planet).
const rstAvoidanceDelay = 500 * time.Millisecond
type closeWriter interface {
CloseWrite() error
}
var _ closeWriter = (*net.TCPConn)(nil)
// closeWrite flushes any outstanding data and sends a FIN packet (if
// client is connected via TCP), signalling that we're done. We then
// pause for a bit, hoping the client processes it before any
// subsequent RST.
//
// See https://golang.org/issue/3595
func (c *conn) closeWriteAndWait() {
c.finalFlush()
if tcp, ok := c.rwc.(closeWriter); ok {
tcp.CloseWrite()
}
time.Sleep(rstAvoidanceDelay)
}
// validNextProto reports whether the proto is not a blacklisted ALPN
// protocol name. Empty and built-in protocol types are blacklisted
// and can't be overridden with alternate implementations.
func validNextProto(proto string) bool {
switch proto {
case "", "http/1.1", "http/1.0":
return false
}
return true
}
func (c *conn) setState(nc net.Conn, state ConnState) {
srv := c.server
switch state {
case StateNew:
srv.trackConn(c, true)
case StateHijacked, StateClosed:
srv.trackConn(c, false)
}
if state > 0xff || state < 0 {
panic("internal error")
}
packedState := uint64(time.Now().Unix()<<8) | uint64(state)
atomic.StoreUint64(&c.curState.atomic, packedState)
if hook := srv.ConnState; hook != nil {
hook(nc, state)
}
}
func (c *conn) getState() (state ConnState, unixSec int64) {
packedState := atomic.LoadUint64(&c.curState.atomic)
return ConnState(packedState & 0xff), int64(packedState >> 8)
}
// badRequestError is a literal string (used by in the server in HTML,
// unescaped) to tell the user why their request was bad. It should
// be plain text without user info or other embedded errors.
type badRequestError string
func (e badRequestError) Error() string { return "Bad Request: " + string(e) }
// ErrAbortHandler is a sentinel panic value to abort a handler.
// While any panic from ServeHTTP aborts the response to the client,
// panicking with ErrAbortHandler also suppresses logging of a stack
// trace to the server's error log.
var ErrAbortHandler = errors.New("net/http: abort Handler")
// isCommonNetReadError reports whether err is a common error
// encountered during reading a request off the network when the
// client has gone away or had its read fail somehow. This is used to
// determine which logs are interesting enough to log about.
func isCommonNetReadError(err error) bool {
if err == io.EOF {
return true
}
if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
return true
}
if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
return true
}
return false
}
// Serve a new connection.
func (c *conn) serve(ctx context.Context) {
c.remoteAddr = c.rwc.RemoteAddr().String()
ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr())
defer func() {
if err := recover(); err != nil && err != ErrAbortHandler {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf)
}
if !c.hijacked() {
c.close()
c.setState(c.rwc, StateClosed)
}
}()
if tlsConn, ok := c.rwc.(*tls.Conn); ok {
if d := c.server.ReadTimeout; d != 0 {
c.rwc.SetReadDeadline(time.Now().Add(d))
}
if d := c.server.WriteTimeout; d != 0 {
c.rwc.SetWriteDeadline(time.Now().Add(d))
}
if err := tlsConn.Handshake(); err != nil {
// If the handshake failed due to the client not speaking
// TLS, assume they're speaking plaintext HTTP and write a
// 400 response on the TLS conn's underlying net.Conn.
if re, ok := err.(tls.RecordHeaderError); ok && re.Conn != nil && tlsRecordHeaderLooksLikeHTTP(re.RecordHeader) {
io.WriteString(re.Conn, "HTTP/1.0 400 Bad Request\r\n\r\nClient sent an HTTP request to an HTTPS server.\n")
re.Conn.Close()
return
}
c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err)
return
}
c.tlsState = new(tls.ConnectionState)
*c.tlsState = tlsConn.ConnectionState()
if proto := c.tlsState.NegotiatedProtocol; validNextProto(proto) {
if fn := c.server.TLSNextProto[proto]; fn != nil {
h := initALPNRequest{ctx, tlsConn, serverHandler{c.server}}
fn(c.server, tlsConn, h)
}
return
}
}
// HTTP/1.x from here on.
ctx, cancelCtx := context.WithCancel(ctx)
c.cancelCtx = cancelCtx
defer cancelCtx()
c.r = &connReader{conn: c}
c.bufr = newBufioReader(c.r)
c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10)
for {
w, err := c.readRequest(ctx)
if c.r.remain != c.server.initialReadLimitSize() {
// If we read any bytes off the wire, we're active.
c.setState(c.rwc, StateActive)
}
if err != nil {
const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n"
switch {
case err == errTooLarge:
// Their HTTP client may or may not be
// able to read this if we're
// responding to them and hanging up
// while they're still writing their
// request. Undefined behavior.
const publicErr = "431 Request Header Fields Too Large"
fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
c.closeWriteAndWait()
return
case isUnsupportedTEError(err):
// Respond as per RFC 7230 Section 3.3.1 which says,
// A server that receives a request message with a
// transfer coding it does not understand SHOULD
// respond with 501 (Unimplemented).
code := StatusNotImplemented
// We purposefully aren't echoing back the transfer-encoding's value,
// so as to mitigate the risk of cross side scripting by an attacker.
fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s%sUnsupported transfer encoding", code, StatusText(code), errorHeaders)
return
case isCommonNetReadError(err):
return // don't reply
default:
publicErr := "400 Bad Request"
if v, ok := err.(badRequestError); ok {
publicErr = publicErr + ": " + string(v)
}
fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
return
}
}
// Expect 100 Continue support
req := w.req
if req.expectsContinue() {
if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 {
// Wrap the Body reader with one that replies on the connection
req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
}
} else if req.Header.get("Expect") != "" {
w.sendExpectationFailed()
return
}
c.curReq.Store(w)
if requestBodyRemains(req.Body) {
registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead)
} else {
w.conn.r.startBackgroundRead()
}
// HTTP cannot have multiple simultaneous active requests.[*]
// Until the server replies to this request, it can't read another,
// so we might as well run the handler in this goroutine.
// [*] Not strictly true: HTTP pipelining. We could let them all process
// in parallel even if their responses need to be serialized.
// But we're not going to implement HTTP pipelining because it
// was never deployed in the wild and the answer is HTTP/2.
serverHandler{c.server}.ServeHTTP(w, w.req)
w.cancelCtx()
if c.hijacked() {
return
}
w.finishRequest()
if !w.shouldReuseConnection() {
if w.requestBodyLimitHit || w.closedRequestBodyEarly() {
c.closeWriteAndWait()
}
return
}
c.setState(c.rwc, StateIdle)
c.curReq.Store((*response)(nil))
if !w.conn.server.doKeepAlives() {
// We're in shutdown mode. We might've replied
// to the user without "Connection: close" and
// they might think they can send another
// request, but such is life with HTTP/1.1.
return
}
if d := c.server.idleTimeout(); d != 0 {
c.rwc.SetReadDeadline(time.Now().Add(d))
if _, err := c.bufr.Peek(4); err != nil {
return
}
}
c.rwc.SetReadDeadline(time.Time{})
}
}
func (w *response) sendExpectationFailed() {
// TODO(bradfitz): let ServeHTTP handlers handle
// requests with non-standard expectation[s]? Seems
// theoretical at best, and doesn't fit into the
// current ServeHTTP model anyway. We'd need to
// make the ResponseWriter an optional
// "ExpectReplier" interface or something.
//
// For now we'll just obey RFC 7231 5.1.1 which says
// "A server that receives an Expect field-value other
// than 100-continue MAY respond with a 417 (Expectation
// Failed) status code to indicate that the unexpected
// expectation cannot be met."
w.Header().Set("Connection", "close")
w.WriteHeader(StatusExpectationFailed)
w.finishRequest()
}
// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter
// and a Hijacker.
func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
if w.handlerDone.isSet() {
panic("net/http: Hijack called after ServeHTTP finished")
}
if w.wroteHeader {
w.cw.flush()
}
c := w.conn
c.mu.Lock()
defer c.mu.Unlock()
// Release the bufioWriter that writes to the chunk writer, it is not
// used after a connection has been hijacked.
rwc, buf, err = c.hijackLocked()
if err == nil {
putBufioWriter(w.w)
w.w = nil
}
return rwc, buf, err
}
func (w *response) CloseNotify() <-chan bool {
if w.handlerDone.isSet() {
panic("net/http: CloseNotify called after ServeHTTP finished")
}
return w.closeNotifyCh
}
func registerOnHitEOF(rc io.ReadCloser, fn func()) {
switch v := rc.(type) {
case *expectContinueReader:
registerOnHitEOF(v.readCloser, fn)
case *body:
v.registerOnHitEOF(fn)
default:
panic("unexpected type " + fmt.Sprintf("%T", rc))
}
}
// requestBodyRemains reports whether future calls to Read
// on rc might yield more data.
func requestBodyRemains(rc io.ReadCloser) bool {
if rc == NoBody {
return false
}
switch v := rc.(type) {
case *expectContinueReader:
return requestBodyRemains(v.readCloser)
case *body:
return v.bodyRemains()
default:
panic("unexpected type " + fmt.Sprintf("%T", rc))
}
}
// The HandlerFunc type is an adapter to allow the use of
// ordinary functions as HTTP handlers. If f is a function
// with the appropriate signature, HandlerFunc(f) is a
// Handler that calls f.
type HandlerFunc func(ResponseWriter, *Request)
// ServeHTTP calls f(w, r).
func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) {
f(w, r)
}
// Helper handlers
// Error replies to the request with the specified error message and HTTP code.
// It does not otherwise end the request; the caller should ensure no further
// writes are done to w.
// The error message should be plain text.
func Error(w ResponseWriter, error string, code int) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(code)
fmt.Fprintln(w, error)
}
// NotFound replies to the request with an HTTP 404 not found error.
func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) }
// NotFoundHandler returns a simple request handler
// that replies to each request with a ``404 page not found'' reply.
func NotFoundHandler() Handler { return HandlerFunc(NotFound) }
// StripPrefix returns a handler that serves HTTP requests
// by removing the given prefix from the request URL's Path
// and invoking the handler h. StripPrefix handles a
// request for a path that doesn't begin with prefix by
// replying with an HTTP 404 not found error.
func StripPrefix(prefix string, h Handler) Handler {
if prefix == "" {
return h
}
return HandlerFunc(func(w ResponseWriter, r *Request) {
if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) {
r2 := new(Request)
*r2 = *r
r2.URL = new(url.URL)
*r2.URL = *r.URL
r2.URL.Path = p
h.ServeHTTP(w, r2)
} else {
NotFound(w, r)
}
})
}
// Redirect replies to the request with a redirect to url,
// which may be a path relative to the request path.
//
// The provided code should be in the 3xx range and is usually
// StatusMovedPermanently, StatusFound or StatusSeeOther.
//
// If the Content-Type header has not been set, Redirect sets it
// to "text/html; charset=utf-8" and writes a small HTML body.
// Setting the Content-Type header to any value, including nil,
// disables that behavior.
func Redirect(w ResponseWriter, r *Request, url string, code int) {
if u, err := urlpkg.Parse(url); err == nil {
// If url was relative, make its path absolute by
// combining with request path.
// The client would probably do this for us,
// but doing it ourselves is more reliable.
// See RFC 7231, section 7.1.2
if u.Scheme == "" && u.Host == "" {
oldpath := r.URL.Path
if oldpath == "" { // should not happen, but avoid a crash if it does
oldpath = "/"
}
// no leading http://server
if url == "" || url[0] != '/' {
// make relative path absolute
olddir, _ := path.Split(oldpath)
url = olddir + url
}
var query string
if i := strings.Index(url, "?"); i != -1 {
url, query = url[:i], url[i:]
}
// clean up but preserve trailing slash
trailing := strings.HasSuffix(url, "/")
url = path.Clean(url)
if trailing && !strings.HasSuffix(url, "/") {
url += "/"
}
url += query
}
}
h := w.Header()
// RFC 7231 notes that a short HTML body is usually included in
// the response because older user agents may not understand 301/307.
// Do it only if the request didn't already have a Content-Type header.
_, hadCT := h["Content-Type"]
h.Set("Location", hexEscapeNonASCII(url))
if !hadCT && (r.Method == "GET" || r.Method == "HEAD") {
h.Set("Content-Type", "text/html; charset=utf-8")
}
w.WriteHeader(code)
// Shouldn't send the body for POST or HEAD; that leaves GET.
if !hadCT && r.Method == "GET" {
body := "<a href=\"" + htmlEscape(url) + "\">" + statusText[code] + "</a>.\n"
fmt.Fprintln(w, body)
}
}
var htmlReplacer = strings.NewReplacer(
"&", "&",
"<", "<",
">", ">",
// """ is shorter than """.
`"`, """,
// "'" is shorter than "'" and apos was not in HTML until HTML5.
"'", "'",
)
func htmlEscape(s string) string {
return htmlReplacer.Replace(s)
}
// Redirect to a fixed URL
type redirectHandler struct {
url string
code int
}
func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) {
Redirect(w, r, rh.url, rh.code)
}
// RedirectHandler returns a request handler that redirects
// each request it receives to the given url using the given
// status code.
//
// The provided code should be in the 3xx range and is usually
// StatusMovedPermanently, StatusFound or StatusSeeOther.
func RedirectHandler(url string, code int) Handler {
return &redirectHandler{url, code}
}
// ServeMux is an HTTP request multiplexer.
// It matches the URL of each incoming request against a list of registered
// patterns and calls the handler for the pattern that
// most closely matches the URL.
//
// Patterns name fixed, rooted paths, like "/favicon.ico",
// or rooted subtrees, like "/images/" (note the trailing slash).
// Longer patterns take precedence over shorter ones, so that
// if there are handlers registered for both "/images/"
// and "/images/thumbnails/", the latter handler will be
// called for paths beginning "/images/thumbnails/" and the
// former will receive requests for any other paths in the
// "/images/" subtree.
//
// Note that since a pattern ending in a slash names a rooted subtree,
// the pattern "/" matches all paths not matched by other registered
// patterns, not just the URL with Path == "/".
//
// If a subtree has been registered and a request is received naming the
// subtree root without its trailing slash, ServeMux redirects that
// request to the subtree root (adding the trailing slash). This behavior can
// be overridden with a separate registration for the path without
// the trailing slash. For example, registering "/images/" causes ServeMux
// to redirect a request for "/images" to "/images/", unless "/images" has
// been registered separately.
//
// Patterns may optionally begin with a host name, restricting matches to
// URLs on that host only. Host-specific patterns take precedence over
// general patterns, so that a handler might register for the two patterns
// "/codesearch" and "codesearch.google.com/" without also taking over
// requests for "http://www.google.com/".
//
// ServeMux also takes care of sanitizing the URL request path and the Host
// header, stripping the port number and redirecting any request containing . or
// .. elements or repeated slashes to an equivalent, cleaner URL.
type ServeMux struct {
mu sync.RWMutex
m map[string]muxEntry
es []muxEntry // slice of entries sorted from longest to shortest.
hosts bool // whether any patterns contain hostnames
}
type muxEntry struct {
h Handler
pattern string
}
// NewServeMux allocates and returns a new ServeMux.
func NewServeMux() *ServeMux { return new(ServeMux) }
// DefaultServeMux is the default ServeMux used by Serve.
var DefaultServeMux = &defaultServeMux
var defaultServeMux ServeMux
// cleanPath returns the canonical path for p, eliminating . and .. elements.
func cleanPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
// path.Clean removes trailing slash except for root;
// put the trailing slash back if necessary.
if p[len(p)-1] == '/' && np != "/" {
// Fast path for common case of p being the string we want:
if len(p) == len(np)+1 && strings.HasPrefix(p, np) {
np = p
} else {
np += "/"
}
}
return np
}
// stripHostPort returns h without any trailing ":<port>".
func stripHostPort(h string) string {
// If no port on host, return unchanged
if strings.IndexByte(h, ':') == -1 {
return h
}
host, _, err := net.SplitHostPort(h)
if err != nil {
return h // on error, return unchanged
}
return host
}
// Find a handler on a handler map given a path string.
// Most-specific (longest) pattern wins.
func (mux *ServeMux) match(path string) (h Handler, pattern string) {
// Check for exact match first.
v, ok := mux.m[path]
if ok {
return v.h, v.pattern
}
// Check for longest valid match. mux.es contains all patterns
// that end in / sorted from longest to shortest.
for _, e := range mux.es {
if strings.HasPrefix(path, e.pattern) {
return e.h, e.pattern
}
}
return nil, ""
}
// redirectToPathSlash determines if the given path needs appending "/" to it.
// This occurs when a handler for path + "/" was already registered, but
// not for path itself. If the path needs appending to, it creates a new
// URL, setting the path to u.Path + "/" and returning true to indicate so.
func (mux *ServeMux) redirectToPathSlash(host, path string, u *url.URL) (*url.URL, bool) {
mux.mu.RLock()
shouldRedirect := mux.shouldRedirectRLocked(host, path)
mux.mu.RUnlock()
if !shouldRedirect {
return u, false
}
path = path + "/"
u = &url.URL{Path: path, RawQuery: u.RawQuery}
return u, true
}
// shouldRedirectRLocked reports whether the given path and host should be redirected to
// path+"/". This should happen if a handler is registered for path+"/" but
// not path -- see comments at ServeMux.
func (mux *ServeMux) shouldRedirectRLocked(host, path string) bool {
p := []string{path, host + path}
for _, c := range p {
if _, exist := mux.m[c]; exist {
return false
}
}
n := len(path)
if n == 0 {
return false
}
for _, c := range p {
if _, exist := mux.m[c+"/"]; exist {
return path[n-1] != '/'
}
}
return false
}
// Handler returns the handler to use for the given request,
// consulting r.Method, r.Host, and r.URL.Path. It always returns
// a non-nil handler. If the path is not in its canonical form, the
// handler will be an internally-generated handler that redirects
// to the canonical path. If the host contains a port, it is ignored
// when matching handlers.
//
// The path and host are used unchanged for CONNECT requests.
//
// Handler also returns the registered pattern that matches the
// request or, in the case of internally-generated redirects,
// the pattern that will match after following the redirect.
//
// If there is no registered handler that applies to the request,
// Handler returns a ``page not found'' handler and an empty pattern.
func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) {
// CONNECT requests are not canonicalized.
if r.Method == "CONNECT" {
// If r.URL.Path is /tree and its handler is not registered,
// the /tree -> /tree/ redirect applies to CONNECT requests
// but the path canonicalization does not.
if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok {
return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
}
return mux.handler(r.Host, r.URL.Path)
}
// All other requests have any port stripped and path cleaned
// before passing to mux.handler.
host := stripHostPort(r.Host)
path := cleanPath(r.URL.Path)
// If the given path is /tree and its handler is not registered,
// redirect for /tree/.
if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok {
return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
}
if path != r.URL.Path {
_, pattern = mux.handler(host, path)
url := *r.URL
url.Path = path
return RedirectHandler(url.String(), StatusMovedPermanently), pattern
}
return mux.handler(host, r.URL.Path)
}
// handler is the main implementation of Handler.
// The path is known to be in canonical form, except for CONNECT methods.
func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) {
mux.mu.RLock()
defer mux.mu.RUnlock()
// Host-specific pattern takes precedence over generic ones
if mux.hosts {
h, pattern = mux.match(host + path)
}
if h == nil {
h, pattern = mux.match(path)
}
if h == nil {
h, pattern = NotFoundHandler(), ""
}
return
}
// ServeHTTP dispatches the request to the handler whose
// pattern most closely matches the request URL.
func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) {
if r.RequestURI == "*" {
if r.ProtoAtLeast(1, 1) {
w.Header().Set("Connection", "close")
}
w.WriteHeader(StatusBadRequest)
return
}
h, _ := mux.Handler(r)
h.ServeHTTP(w, r)
}
// Handle registers the handler for the given pattern.
// If a handler already exists for pattern, Handle panics.
func (mux *ServeMux) Handle(pattern string, handler Handler) {
mux.mu.Lock()
defer mux.mu.Unlock()
if pattern == "" {
panic("http: invalid pattern")
}
if handler == nil {
panic("http: nil handler")
}
if _, exist := mux.m[pattern]; exist {
panic("http: multiple registrations for " + pattern)
}
if mux.m == nil {
mux.m = make(map[string]muxEntry)
}
e := muxEntry{h: handler, pattern: pattern}
mux.m[pattern] = e
if pattern[len(pattern)-1] == '/' {
mux.es = appendSorted(mux.es, e)
}
if pattern[0] != '/' {
mux.hosts = true
}
}
func appendSorted(es []muxEntry, e muxEntry) []muxEntry {
n := len(es)
i := sort.Search(n, func(i int) bool {
return len(es[i].pattern) < len(e.pattern)
})
if i == n {
return append(es, e)
}
// we now know that i points at where we want to insert
es = append(es, muxEntry{}) // try to grow the slice in place, any entry works.
copy(es[i+1:], es[i:]) // Move shorter entries down
es[i] = e
return es
}
// HandleFunc registers the handler function for the given pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
if handler == nil {
panic("http: nil handler")
}
mux.Handle(pattern, HandlerFunc(handler))
}
// Handle registers the handler for the given pattern
// in the DefaultServeMux.
// The documentation for ServeMux explains how patterns are matched.
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
// HandleFunc registers the handler function for the given pattern
// in the DefaultServeMux.
// The documentation for ServeMux explains how patterns are matched.
func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
DefaultServeMux.HandleFunc(pattern, handler)
}
// Serve accepts incoming HTTP connections on the listener l,
// creating a new service goroutine for each. The service goroutines
// read requests and then call handler to reply to them.
//
// The handler is typically nil, in which case the DefaultServeMux is used.
//
// HTTP/2 support is only enabled if the Listener returns *tls.Conn
// connections and they were configured with "h2" in the TLS
// Config.NextProtos.
//
// Serve always returns a non-nil error.
func Serve(l net.Listener, handler Handler) error {
srv := &Server{Handler: handler}
return srv.Serve(l)
}
// ServeTLS accepts incoming HTTPS connections on the listener l,
// creating a new service goroutine for each. The service goroutines
// read requests and then call handler to reply to them.
//
// The handler is typically nil, in which case the DefaultServeMux is used.
//
// Additionally, files containing a certificate and matching private key
// for the server must be provided. If the certificate is signed by a
// certificate authority, the certFile should be the concatenation
// of the server's certificate, any intermediates, and the CA's certificate.
//
// ServeTLS always returns a non-nil error.
func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error {
srv := &Server{Handler: handler}
return srv.ServeTLS(l, certFile, keyFile)
}
// A Server defines parameters for running an HTTP server.
// The zero value for Server is a valid configuration.
type Server struct {
// Addr optionally specifies the TCP address for the server to listen on,
// in the form "host:port". If empty, ":http" (port 80) is used.
// The service names are defined in RFC 6335 and assigned by IANA.
// See net.Dial for details of the address format.
Addr string
Handler Handler // handler to invoke, http.DefaultServeMux if nil
// TLSConfig optionally provides a TLS configuration for use
// by ServeTLS and ListenAndServeTLS. Note that this value is
// cloned by ServeTLS and ListenAndServeTLS, so it's not
// possible to modify the configuration with methods like
// tls.Config.SetSessionTicketKeys. To use
// SetSessionTicketKeys, use Server.Serve with a TLS Listener
// instead.
TLSConfig *tls.Config
// ReadTimeout is the maximum duration for reading the entire
// request, including the body.
//
// Because ReadTimeout does not let Handlers make per-request
// decisions on each request body's acceptable deadline or
// upload rate, most users will prefer to use
// ReadHeaderTimeout. It is valid to use them both.
ReadTimeout time.Duration
// ReadHeaderTimeout is the amount of time allowed to read
// request headers. The connection's read deadline is reset
// after reading the headers and the Handler can decide what
// is considered too slow for the body. If ReadHeaderTimeout
// is zero, the value of ReadTimeout is used. If both are
// zero, there is no timeout.
ReadHeaderTimeout time.Duration
// WriteTimeout is the maximum duration before timing out
// writes of the response. It is reset whenever a new
// request's header is read. Like ReadTimeout, it does not
// let Handlers make decisions on a per-request basis.
WriteTimeout time.Duration
// IdleTimeout is the maximum amount of time to wait for the
// next request when keep-alives are enabled. If IdleTimeout
// is zero, the value of ReadTimeout is used. If both are
// zero, there is no timeout.
IdleTimeout time.Duration
// MaxHeaderBytes controls the maximum number of bytes the
// server will read parsing the request header's keys and
// values, including the request line. It does not limit the
// size of the request body.
// If zero, DefaultMaxHeaderBytes is used.
MaxHeaderBytes int
// TLSNextProto optionally specifies a function to take over
// ownership of the provided TLS connection when an ALPN
// protocol upgrade has occurred. The map key is the protocol
// name negotiated. The Handler argument should be used to
// handle HTTP requests and will initialize the Request's TLS
// and RemoteAddr if not already set. The connection is
// automatically closed when the function returns.
// If TLSNextProto is not nil, HTTP/2 support is not enabled
// automatically.
TLSNextProto map[string]func(*Server, *tls.Conn, Handler)
// ConnState specifies an optional callback function that is
// called when a client connection changes state. See the
// ConnState type and associated constants for details.
ConnState func(net.Conn, ConnState)
// ErrorLog specifies an optional logger for errors accepting
// connections, unexpected behavior from handlers, and
// underlying FileSystem errors.
// If nil, logging is done via the log package's standard logger.
ErrorLog *log.Logger
// BaseContext optionally specifies a function that returns
// the base context for incoming requests on this server.
// The provided Listener is the specific Listener that's
// about to start accepting requests.
// If BaseContext is nil, the default is context.Background().
// If non-nil, it must return a non-nil context.
BaseContext func(net.Listener) context.Context
// ConnContext optionally specifies a function that modifies
// the context used for a new connection c. The provided ctx
// is derived from the base context and has a ServerContextKey
// value.
ConnContext func(ctx context.Context, c net.Conn) context.Context
disableKeepAlives int32 // accessed atomically.
inShutdown int32 // accessed atomically (non-zero means we're in Shutdown)
nextProtoOnce sync.Once // guards setupHTTP2_* init
nextProtoErr error // result of http2.ConfigureServer if used
mu sync.Mutex
listeners map[*net.Listener]struct{}
activeConn map[*conn]struct{}
doneChan chan struct{}
onShutdown []func()
}
func (s *Server) getDoneChan() <-chan struct{} {
s.mu.Lock()
defer s.mu.Unlock()
return s.getDoneChanLocked()
}
func (s *Server) getDoneChanLocked() chan struct{} {
if s.doneChan == nil {
s.doneChan = make(chan struct{})
}
return s.doneChan
}
func (s *Server) closeDoneChanLocked() {
ch := s.getDoneChanLocked()
select {
case <-ch:
// Already closed. Don't close again.
default:
// Safe to close here. We're the only closer, guarded
// by s.mu.
close(ch)
}
}
// Close immediately closes all active net.Listeners and any
// connections in state StateNew, StateActive, or StateIdle. For a
// graceful shutdown, use Shutdown.
//
// Close does not attempt to close (and does not even know about)
// any hijacked connections, such as WebSockets.
//
// Close returns any error returned from closing the Server's
// underlying Listener(s).
func (srv *Server) Close() error {
atomic.StoreInt32(&srv.inShutdown, 1)
srv.mu.Lock()
defer srv.mu.Unlock()
srv.closeDoneChanLocked()
err := srv.closeListenersLocked()
for c := range srv.activeConn {
c.rwc.Close()
delete(srv.activeConn, c)
}
return err
}
// shutdownPollInterval is how often we poll for quiescence
// during Server.Shutdown. This is lower during tests, to
// speed up tests.
// Ideally we could find a solution that doesn't involve polling,
// but which also doesn't have a high runtime cost (and doesn't
// involve any contentious mutexes), but that is left as an
// exercise for the reader.
var shutdownPollInterval = 500 * time.Millisecond
// Shutdown gracefully shuts down the server without interrupting any
// active connections. Shutdown works by first closing all open
// listeners, then closing all idle connections, and then waiting
// indefinitely for connections to return to idle and then shut down.
// If the provided context expires before the shutdown is complete,
// Shutdown returns the context's error, otherwise it returns any
// error returned from closing the Server's underlying Listener(s).
//
// When Shutdown is called, Serve, ListenAndServe, and
// ListenAndServeTLS immediately return ErrServerClosed. Make sure the
// program doesn't exit and waits instead for Shutdown to return.
//
// Shutdown does not attempt to close nor wait for hijacked
// connections such as WebSockets. The caller of Shutdown should
// separately notify such long-lived connections of shutdown and wait
// for them to close, if desired. See RegisterOnShutdown for a way to
// register shutdown notification functions.
//
// Once Shutdown has been called on a server, it may not be reused;
// future calls to methods such as Serve will return ErrServerClosed.
func (srv *Server) Shutdown(ctx context.Context) error {
atomic.StoreInt32(&srv.inShutdown, 1)
srv.mu.Lock()
lnerr := srv.closeListenersLocked()
srv.closeDoneChanLocked()
for _, f := range srv.onShutdown {
go f()
}
srv.mu.Unlock()
ticker := time.NewTicker(shutdownPollInterval)
defer ticker.Stop()
for {
if srv.closeIdleConns() {
return lnerr
}
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}
}
}
// RegisterOnShutdown registers a function to call on Shutdown.
// This can be used to gracefully shutdown connections that have
// undergone ALPN protocol upgrade or that have been hijacked.
// This function should start protocol-specific graceful shutdown,
// but should not wait for shutdown to complete.
func (srv *Server) RegisterOnShutdown(f func()) {
srv.mu.Lock()
srv.onShutdown = append(srv.onShutdown, f)
srv.mu.Unlock()
}
// closeIdleConns closes all idle connections and reports whether the
// server is quiescent.
func (s *Server) closeIdleConns() bool {
s.mu.Lock()
defer s.mu.Unlock()
quiescent := true
for c := range s.activeConn {
st, unixSec := c.getState()
// Issue 22682: treat StateNew connections as if
// they're idle if we haven't read the first request's
// header in over 5 seconds.
if st == StateNew && unixSec < time.Now().Unix()-5 {
st = StateIdle
}
if st != StateIdle || unixSec == 0 {
// Assume unixSec == 0 means it's a very new
// connection, without state set yet.
quiescent = false
continue
}
c.rwc.Close()
delete(s.activeConn, c)
}
return quiescent
}
func (s *Server) closeListenersLocked() error {
var err error
for ln := range s.listeners {
if cerr := (*ln).Close(); cerr != nil && err == nil {
err = cerr
}
delete(s.listeners, ln)
}
return err
}
// A ConnState represents the state of a client connection to a server.
// It's used by the optional Server.ConnState hook.
type ConnState int
const (
// StateNew represents a new connection that is expected to
// send a request immediately. Connections begin at this
// state and then transition to either StateActive or
// StateClosed.
StateNew ConnState = iota
// StateActive represents a connection that has read 1 or more
// bytes of a request. The Server.ConnState hook for
// StateActive fires before the request has entered a handler
// and doesn't fire again until the request has been
// handled. After the request is handled, the state
// transitions to StateClosed, StateHijacked, or StateIdle.
// For HTTP/2, StateActive fires on the transition from zero
// to one active request, and only transitions away once all
// active requests are complete. That means that ConnState
// cannot be used to do per-request work; ConnState only notes
// the overall state of the connection.
StateActive
// StateIdle represents a connection that has finished
// handling a request and is in the keep-alive state, waiting
// for a new request. Connections transition from StateIdle
// to either StateActive or StateClosed.
StateIdle
// StateHijacked represents a hijacked connection.
// This is a terminal state. It does not transition to StateClosed.
StateHijacked
// StateClosed represents a closed connection.
// This is a terminal state. Hijacked connections do not
// transition to StateClosed.
StateClosed
)
var stateName = map[ConnState]string{
StateNew: "new",
StateActive: "active",
StateIdle: "idle",
StateHijacked: "hijacked",
StateClosed: "closed",
}
func (c ConnState) String() string {
return stateName[c]
}
// serverHandler delegates to either the server's Handler or
// DefaultServeMux and also handles "OPTIONS *" requests.
type serverHandler struct {
srv *Server
}
func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) {
handler := sh.srv.Handler
if handler == nil {
handler = DefaultServeMux
}
if req.RequestURI == "*" && req.Method == "OPTIONS" {
handler = globalOptionsHandler{}
}
handler.ServeHTTP(rw, req)
}
// ListenAndServe listens on the TCP network address srv.Addr and then
// calls Serve to handle requests on incoming connections.
// Accepted connections are configured to enable TCP keep-alives.
//
// If srv.Addr is blank, ":http" is used.
//
// ListenAndServe always returns a non-nil error. After Shutdown or Close,
// the returned error is ErrServerClosed.
func (srv *Server) ListenAndServe() error {
if srv.shuttingDown() {
return ErrServerClosed
}
addr := srv.Addr
if addr == "" {
addr = ":http"
}
ln, err := net.Listen("tcp", addr)
if err != nil {
return err
}
return srv.Serve(ln)
}
var testHookServerServe func(*Server, net.Listener) // used if non-nil
// shouldDoServeHTTP2 reports whether Server.Serve should configure
// automatic HTTP/2. (which sets up the srv.TLSNextProto map)
func (srv *Server) shouldConfigureHTTP2ForServe() bool {
if srv.TLSConfig == nil {
// Compatibility with Go 1.6:
// If there's no TLSConfig, it's possible that the user just
// didn't set it on the http.Server, but did pass it to
// tls.NewListener and passed that listener to Serve.
// So we should configure HTTP/2 (to set up srv.TLSNextProto)
// in case the listener returns an "h2" *tls.Conn.
return true
}
// The user specified a TLSConfig on their http.Server.
// In this, case, only configure HTTP/2 if their tls.Config
// explicitly mentions "h2". Otherwise http2.ConfigureServer
// would modify the tls.Config to add it, but they probably already
// passed this tls.Config to tls.NewListener. And if they did,
// it's too late anyway to fix it. It would only be potentially racy.
// See Issue 15908.
return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS)
}
// ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe,
// and ListenAndServeTLS methods after a call to Shutdown or Close.
var ErrServerClosed = errors.New("http: Server closed")
// Serve accepts incoming connections on the Listener l, creating a
// new service goroutine for each. The service goroutines read requests and
// then call srv.Handler to reply to them.
//
// HTTP/2 support is only enabled if the Listener returns *tls.Conn
// connections and they were configured with "h2" in the TLS
// Config.NextProtos.
//
// Serve always returns a non-nil error and closes l.
// After Shutdown or Close, the returned error is ErrServerClosed.
func (srv *Server) Serve(l net.Listener) error {
if fn := testHookServerServe; fn != nil {
fn(srv, l) // call hook with unwrapped listener
}
origListener := l
l = &onceCloseListener{Listener: l}
defer l.Close()
if err := srv.setupHTTP2_Serve(); err != nil {
return err
}
if !srv.trackListener(&l, true) {
return ErrServerClosed
}
defer srv.trackListener(&l, false)
baseCtx := context.Background()
if srv.BaseContext != nil {
baseCtx = srv.BaseContext(origListener)
if baseCtx == nil {
panic("BaseContext returned a nil context")
}
}
var tempDelay time.Duration // how long to sleep on accept failure
ctx := context.WithValue(baseCtx, ServerContextKey, srv)
for {
rw, err := l.Accept()
if err != nil {
select {
case <-srv.getDoneChan():
return ErrServerClosed
default:
}
if ne, ok := err.(net.Error); ok && ne.Temporary() {
if tempDelay == 0 {
tempDelay = 5 * time.Millisecond
} else {
tempDelay *= 2
}
if max := 1 * time.Second; tempDelay > max {
tempDelay = max
}
srv.logf("http: Accept error: %v; retrying in %v", err, tempDelay)
time.Sleep(tempDelay)
continue
}
return err
}
connCtx := ctx
if cc := srv.ConnContext; cc != nil {
connCtx = cc(connCtx, rw)
if connCtx == nil {
panic("ConnContext returned nil")
}
}
tempDelay = 0
c := srv.newConn(rw)
c.setState(c.rwc, StateNew) // before Serve can return
go c.serve(connCtx)
}
}
// ServeTLS accepts incoming connections on the Listener l, creating a
// new service goroutine for each. The service goroutines perform TLS
// setup and then read requests, calling srv.Handler to reply to them.
//
// Files containing a certificate and matching private key for the
// server must be provided if neither the Server's
// TLSConfig.Certificates nor TLSConfig.GetCertificate are populated.
// If the certificate is signed by a certificate authority, the
// certFile should be the concatenation of the server's certificate,
// any intermediates, and the CA's certificate.
//
// ServeTLS always returns a non-nil error. After Shutdown or Close, the
// returned error is ErrServerClosed.
func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error {
// Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig
// before we clone it and create the TLS Listener.
if err := srv.setupHTTP2_ServeTLS(); err != nil {
return err
}
config := cloneTLSConfig(srv.TLSConfig)
if !strSliceContains(config.NextProtos, "http/1.1") {
config.NextProtos = append(config.NextProtos, "http/1.1")
}
configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil
if !configHasCert || certFile != "" || keyFile != "" {
var err error
config.Certificates = make([]tls.Certificate, 1)
config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return err
}
}
tlsListener := tls.NewListener(l, config)
return srv.Serve(tlsListener)
}
// trackListener adds or removes a net.Listener to the set of tracked
// listeners.
//
// We store a pointer to interface in the map set, in case the
// net.Listener is not comparable. This is safe because we only call
// trackListener via Serve and can track+defer untrack the same
// pointer to local variable there. We never need to compare a
// Listener from another caller.
//
// It reports whether the server is still up (not Shutdown or Closed).
func (s *Server) trackListener(ln *net.Listener, add bool) bool {
s.mu.Lock()
defer s.mu.Unlock()
if s.listeners == nil {
s.listeners = make(map[*net.Listener]struct{})
}
if add {
if s.shuttingDown() {
return false
}
s.listeners[ln] = struct{}{}
} else {
delete(s.listeners, ln)
}
return true
}
func (s *Server) trackConn(c *conn, add bool) {
s.mu.Lock()
defer s.mu.Unlock()
if s.activeConn == nil {
s.activeConn = make(map[*conn]struct{})
}
if add {
s.activeConn[c] = struct{}{}
} else {
delete(s.activeConn, c)
}
}
func (s *Server) idleTimeout() time.Duration {
if s.IdleTimeout != 0 {
return s.IdleTimeout
}
return s.ReadTimeout
}
func (s *Server) readHeaderTimeout() time.Duration {
if s.ReadHeaderTimeout != 0 {
return s.ReadHeaderTimeout
}
return s.ReadTimeout
}
func (s *Server) doKeepAlives() bool {
return atomic.LoadInt32(&s.disableKeepAlives) == 0 && !s.shuttingDown()
}
func (s *Server) shuttingDown() bool {
// TODO: replace inShutdown with the existing atomicBool type;
// see https://github.com/golang/go/issues/20239#issuecomment-381434582
return atomic.LoadInt32(&s.inShutdown) != 0
}
// SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled.
// By default, keep-alives are always enabled. Only very
// resource-constrained environments or servers in the process of
// shutting down should disable them.
func (srv *Server) SetKeepAlivesEnabled(v bool) {
if v {
atomic.StoreInt32(&srv.disableKeepAlives, 0)
return
}
atomic.StoreInt32(&srv.disableKeepAlives, 1)
// Close idle HTTP/1 conns:
srv.closeIdleConns()
// TODO: Issue 26303: close HTTP/2 conns as soon as they become idle.
}
func (s *Server) logf(format string, args ...interface{}) {
if s.ErrorLog != nil {
s.ErrorLog.Printf(format, args...)
} else {
log.Printf(format, args...)
}
}
// logf prints to the ErrorLog of the *Server associated with request r
// via ServerContextKey. If there's no associated server, or if ErrorLog
// is nil, logging is done via the log package's standard logger.
func logf(r *Request, format string, args ...interface{}) {
s, _ := r.Context().Value(ServerContextKey).(*Server)
if s != nil && s.ErrorLog != nil {
s.ErrorLog.Printf(format, args...)
} else {
log.Printf(format, args...)
}
}
// ListenAndServe listens on the TCP network address addr and then calls
// Serve with handler to handle requests on incoming connections.
// Accepted connections are configured to enable TCP keep-alives.
//
// The handler is typically nil, in which case the DefaultServeMux is used.
//
// ListenAndServe always returns a non-nil error.
func ListenAndServe(addr string, handler Handler) error {
server := &Server{Addr: addr, Handler: handler}
return server.ListenAndServe()
}
// ListenAndServeTLS acts identically to ListenAndServe, except that it
// expects HTTPS connections. Additionally, files containing a certificate and
// matching private key for the server must be provided. If the certificate
// is signed by a certificate authority, the certFile should be the concatenation
// of the server's certificate, any intermediates, and the CA's certificate.
func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
server := &Server{Addr: addr, Handler: handler}
return server.ListenAndServeTLS(certFile, keyFile)
}
// ListenAndServeTLS listens on the TCP network address srv.Addr and
// then calls ServeTLS to handle requests on incoming TLS connections.
// Accepted connections are configured to enable TCP keep-alives.
//
// Filenames containing a certificate and matching private key for the
// server must be provided if neither the Server's TLSConfig.Certificates
// nor TLSConfig.GetCertificate are populated. If the certificate is
// signed by a certificate authority, the certFile should be the
// concatenation of the server's certificate, any intermediates, and
// the CA's certificate.
//
// If srv.Addr is blank, ":https" is used.
//
// ListenAndServeTLS always returns a non-nil error. After Shutdown or
// Close, the returned error is ErrServerClosed.
func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
if srv.shuttingDown() {
return ErrServerClosed
}
addr := srv.Addr
if addr == "" {
addr = ":https"
}
ln, err := net.Listen("tcp", addr)
if err != nil {
return err
}
defer ln.Close()
return srv.ServeTLS(ln, certFile, keyFile)
}
// setupHTTP2_ServeTLS conditionally configures HTTP/2 on
// srv and reports whether there was an error setting it up. If it is
// not configured for policy reasons, nil is returned.
func (srv *Server) setupHTTP2_ServeTLS() error {
srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults)
return srv.nextProtoErr
}
// setupHTTP2_Serve is called from (*Server).Serve and conditionally
// configures HTTP/2 on srv using a more conservative policy than
// setupHTTP2_ServeTLS because Serve is called after tls.Listen,
// and may be called concurrently. See shouldConfigureHTTP2ForServe.
//
// The tests named TestTransportAutomaticHTTP2* and
// TestConcurrentServerServe in server_test.go demonstrate some
// of the supported use cases and motivations.
func (srv *Server) setupHTTP2_Serve() error {
srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve)
return srv.nextProtoErr
}
func (srv *Server) onceSetNextProtoDefaults_Serve() {
if srv.shouldConfigureHTTP2ForServe() {
srv.onceSetNextProtoDefaults()
}
}
// onceSetNextProtoDefaults configures HTTP/2, if the user hasn't
// configured otherwise. (by setting srv.TLSNextProto non-nil)
// It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*).
func (srv *Server) onceSetNextProtoDefaults() {
if omitBundledHTTP2 || strings.Contains(os.Getenv("GODEBUG"), "http2server=0") {
return
}
// Enable HTTP/2 by default if the user hasn't otherwise
// configured their TLSNextProto map.
if srv.TLSNextProto == nil {
conf := &http2Server{
NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) },
}
srv.nextProtoErr = http2ConfigureServer(srv, conf)
}
}
// TimeoutHandler returns a Handler that runs h with the given time limit.
//
// The new Handler calls h.ServeHTTP to handle each request, but if a
// call runs for longer than its time limit, the handler responds with
// a 503 Service Unavailable error and the given message in its body.
// (If msg is empty, a suitable default message will be sent.)
// After such a timeout, writes by h to its ResponseWriter will return
// ErrHandlerTimeout.
//
// TimeoutHandler supports the Pusher interface but does not support
// the Hijacker or Flusher interfaces.
func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler {
return &timeoutHandler{
handler: h,
body: msg,
dt: dt,
}
}
// ErrHandlerTimeout is returned on ResponseWriter Write calls
// in handlers which have timed out.
var ErrHandlerTimeout = errors.New("http: Handler timeout")
type timeoutHandler struct {
handler Handler
body string
dt time.Duration
// When set, no context will be created and this context will
// be used instead.
testContext context.Context
}
func (h *timeoutHandler) errorBody() string {
if h.body != "" {
return h.body
}
return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>"
}
func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) {
ctx := h.testContext
if ctx == nil {
var cancelCtx context.CancelFunc
ctx, cancelCtx = context.WithTimeout(r.Context(), h.dt)
defer cancelCtx()
}
r = r.WithContext(ctx)
done := make(chan struct{})
tw := &timeoutWriter{
w: w,
h: make(Header),
req: r,
}
panicChan := make(chan interface{}, 1)
go func() {
defer func() {
if p := recover(); p != nil {
panicChan <- p
}
}()
h.handler.ServeHTTP(tw, r)
close(done)
}()
select {
case p := <-panicChan:
panic(p)
case <-done:
tw.mu.Lock()
defer tw.mu.Unlock()
dst := w.Header()
for k, vv := range tw.h {
dst[k] = vv
}
if !tw.wroteHeader {
tw.code = StatusOK
}
w.WriteHeader(tw.code)
w.Write(tw.wbuf.Bytes())
case <-ctx.Done():
tw.mu.Lock()
defer tw.mu.Unlock()
w.WriteHeader(StatusServiceUnavailable)
io.WriteString(w, h.errorBody())
tw.timedOut = true
}
}
type timeoutWriter struct {
w ResponseWriter
h Header
wbuf bytes.Buffer
req *Request
mu sync.Mutex
timedOut bool
wroteHeader bool
code int
}
var _ Pusher = (*timeoutWriter)(nil)
// Push implements the Pusher interface.
func (tw *timeoutWriter) Push(target string, opts *PushOptions) error {
if pusher, ok := tw.w.(Pusher); ok {
return pusher.Push(target, opts)
}
return ErrNotSupported
}
func (tw *timeoutWriter) Header() Header { return tw.h }
func (tw *timeoutWriter) Write(p []byte) (int, error) {
tw.mu.Lock()
defer tw.mu.Unlock()
if tw.timedOut {
return 0, ErrHandlerTimeout
}
if !tw.wroteHeader {
tw.writeHeaderLocked(StatusOK)
}
return tw.wbuf.Write(p)
}
func (tw *timeoutWriter) writeHeaderLocked(code int) {
checkWriteHeaderCode(code)
switch {
case tw.timedOut:
return
case tw.wroteHeader:
if tw.req != nil {
caller := relevantCaller()
logf(tw.req, "http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
}
default:
tw.wroteHeader = true
tw.code = code
}
}
func (tw *timeoutWriter) WriteHeader(code int) {
tw.mu.Lock()
defer tw.mu.Unlock()
tw.writeHeaderLocked(code)
}
// onceCloseListener wraps a net.Listener, protecting it from
// multiple Close calls.
type onceCloseListener struct {
net.Listener
once sync.Once
closeErr error
}
func (oc *onceCloseListener) Close() error {
oc.once.Do(oc.close)
return oc.closeErr
}
func (oc *onceCloseListener) close() { oc.closeErr = oc.Listener.Close() }
// globalOptionsHandler responds to "OPTIONS *" requests.
type globalOptionsHandler struct{}
func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) {
w.Header().Set("Content-Length", "0")
if r.ContentLength != 0 {
// Read up to 4KB of OPTIONS body (as mentioned in the
// spec as being reserved for future use), but anything
// over that is considered a waste of server resources
// (or an attack) and we abort and close the connection,
// courtesy of MaxBytesReader's EOF behavior.
mb := MaxBytesReader(w, r.Body, 4<<10)
io.Copy(ioutil.Discard, mb)
}
}
// initALPNRequest is an HTTP handler that initializes certain
// uninitialized fields in its *Request. Such partially-initialized
// Requests come from ALPN protocol handlers.
type initALPNRequest struct {
ctx context.Context
c *tls.Conn
h serverHandler
}
// BaseContext is an exported but unadvertised http.Handler method
// recognized by x/net/http2 to pass down a context; the TLSNextProto
// API predates context support so we shoehorn through the only
// interface we have available.
func (h initALPNRequest) BaseContext() context.Context { return h.ctx }
func (h initALPNRequest) ServeHTTP(rw ResponseWriter, req *Request) {
if req.TLS == nil {
req.TLS = &tls.ConnectionState{}
*req.TLS = h.c.ConnectionState()
}
if req.Body == nil {
req.Body = NoBody
}
if req.RemoteAddr == "" {
req.RemoteAddr = h.c.RemoteAddr().String()
}
h.h.ServeHTTP(rw, req)
}
// loggingConn is used for debugging.
type loggingConn struct {
name string
net.Conn
}
var (
uniqNameMu sync.Mutex
uniqNameNext = make(map[string]int)
)
func newLoggingConn(baseName string, c net.Conn) net.Conn {
uniqNameMu.Lock()
defer uniqNameMu.Unlock()
uniqNameNext[baseName]++
return &loggingConn{
name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]),
Conn: c,
}
}
func (c *loggingConn) Write(p []byte) (n int, err error) {
log.Printf("%s.Write(%d) = ....", c.name, len(p))
n, err = c.Conn.Write(p)
log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err)
return
}
func (c *loggingConn) Read(p []byte) (n int, err error) {
log.Printf("%s.Read(%d) = ....", c.name, len(p))
n, err = c.Conn.Read(p)
log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err)
return
}
func (c *loggingConn) Close() (err error) {
log.Printf("%s.Close() = ...", c.name)
err = c.Conn.Close()
log.Printf("%s.Close() = %v", c.name, err)
return
}
// checkConnErrorWriter writes to c.rwc and records any write errors to c.werr.
// It only contains one field (and a pointer field at that), so it
// fits in an interface value without an extra allocation.
type checkConnErrorWriter struct {
c *conn
}
func (w checkConnErrorWriter) Write(p []byte) (n int, err error) {
n, err = w.c.rwc.Write(p)
if err != nil && w.c.werr == nil {
w.c.werr = err
w.c.cancelCtx()
}
return
}
func numLeadingCRorLF(v []byte) (n int) {
for _, b := range v {
if b == '\r' || b == '\n' {
n++
continue
}
break
}
return
}
func strSliceContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// tlsRecordHeaderLooksLikeHTTP reports whether a TLS record header
// looks like it might've been a misdirected plaintext HTTP request.
func tlsRecordHeaderLooksLikeHTTP(hdr [5]byte) bool {
switch string(hdr[:]) {
case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO":
return true
}
return false
}
| [
"\"GODEBUG\""
]
| []
| [
"GODEBUG"
]
| [] | ["GODEBUG"] | go | 1 | 0 | |
devs/simple-consumer/src/main/java/com/kafka/workshop/consumer/KafkaSimpleConsumer.java | package com.kafka.workshop.consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.time.Duration;
import java.util.Arrays;
import java.util.Properties;
public class KafkaSimpleConsumer {
private final Logger logger = LoggerFactory.getLogger(KafkaSimpleConsumer.class);
private final String topic;
private final Properties props;
public KafkaSimpleConsumer(String bootstrapServers, String topic) {
this.topic = topic;
String deserializer = StringDeserializer.class.getName();
props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ConsumerConfig.GROUP_ID_CONFIG, "ConsumerGroup-1");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, deserializer);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, deserializer);
}
public void consume() {
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
consumer.subscribe(Arrays.asList(topic));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
records.forEach(record ->
logger.info("{} [{}] offset={}, key={}, value=\"{}\"",
record.topic(), record.partition(),
record.offset(), record.key(), record.value())
);
consumer.commitSync();
}
// consumer.close();
}
public static void main(String[] args) {
String bootstrapServers = System.getenv("KAFKA_BOOTSTRAP_SERVERS");
String topic = System.getenv("KAFKA_TOPIC");
KafkaSimpleConsumer c = new KafkaSimpleConsumer(bootstrapServers, topic);
c.consume();
}
} | [
"\"KAFKA_BOOTSTRAP_SERVERS\"",
"\"KAFKA_TOPIC\""
]
| []
| [
"KAFKA_TOPIC",
"KAFKA_BOOTSTRAP_SERVERS"
]
| [] | ["KAFKA_TOPIC", "KAFKA_BOOTSTRAP_SERVERS"] | java | 2 | 0 | |
extern/sector-storage/ffiwrapper/sealer_test.go | package ffiwrapper
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"testing"
"time"
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
paramfetch "github.com/filecoin-project/go-paramfetch"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-storage/storage"
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/filecoin-ffi/generated"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/lotus/extern/storage-sealing/lib/nullreader"
)
func init() {
logging.SetLogLevel("*", "DEBUG") //nolint: errcheck
}
var sealProofType = abi.RegisteredSealProof_StackedDrg2KiBV1
var sectorSize, _ = sealProofType.SectorSize()
var sealRand = abi.SealRandomness{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2}
type seal struct {
ref storage.SectorRef
cids storage.SectorCids
pi abi.PieceInfo
ticket abi.SealRandomness
}
func data(sn abi.SectorNumber, dlen abi.UnpaddedPieceSize) io.Reader {
return io.MultiReader(
io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(123)),
io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(dlen-123)),
)
}
func (s *seal) precommit(t *testing.T, sb *Sealer, id storage.SectorRef, done func()) {
defer done()
dlen := abi.PaddedPieceSize(sectorSize).Unpadded()
var err error
r := data(id.ID.Number, dlen)
s.pi, err = sb.AddPiece(context.TODO(), id, []abi.UnpaddedPieceSize{}, dlen, r)
if err != nil {
t.Fatalf("%+v", err)
}
s.ticket = sealRand
p1, err := sb.SealPreCommit1(context.TODO(), id, s.ticket, []abi.PieceInfo{s.pi})
if err != nil {
t.Fatalf("%+v", err)
}
cids, err := sb.SealPreCommit2(context.TODO(), id, p1)
if err != nil {
t.Fatalf("%+v", err)
}
s.cids = cids
}
var seed = abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9}
func (s *seal) commit(t *testing.T, sb *Sealer, done func()) storage.Proof {
defer done()
pc1, err := sb.SealCommit1(context.TODO(), s.ref, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids)
if err != nil {
t.Fatalf("%+v", err)
}
proof, err := sb.SealCommit2(context.TODO(), s.ref, pc1)
if err != nil {
t.Fatalf("%+v", err)
}
ok, err := ProofVerifier.VerifySeal(proof2.SealVerifyInfo{
SectorID: s.ref.ID,
SealedCID: s.cids.Sealed,
SealProof: s.ref.ProofType,
Proof: proof,
Randomness: s.ticket,
InteractiveRandomness: seed,
UnsealedCID: s.cids.Unsealed,
})
if err != nil {
t.Fatalf("%+v", err)
}
if !ok {
t.Fatal("proof failed to validate")
}
return proof
}
func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si storage.SectorRef, done func()) {
defer done()
var b bytes.Buffer
_, err := sb.ReadPiece(context.TODO(), &b, si, 0, 1016)
if err != nil {
t.Fatal(err)
}
expect, _ := ioutil.ReadAll(data(si.ID.Number, 1016))
if !bytes.Equal(b.Bytes(), expect) {
t.Fatal("read wrong bytes")
}
p, sd, err := sp.AcquireSector(context.TODO(), si, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage)
if err != nil {
t.Fatal(err)
}
if err := os.Remove(p.Unsealed); err != nil {
t.Fatal(err)
}
sd()
_, err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016)
if err == nil {
t.Fatal("HOW?!")
}
log.Info("this is what we expect: ", err)
if err := sb.UnsealPiece(context.TODO(), si, 0, 1016, sealRand, s.cids.Unsealed); err != nil {
t.Fatal(err)
}
b.Reset()
_, err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016)
if err != nil {
t.Fatal(err)
}
expect, _ = ioutil.ReadAll(data(si.ID.Number, 1016))
require.Equal(t, expect, b.Bytes())
b.Reset()
have, err := sb.ReadPiece(context.TODO(), &b, si, 0, 2032)
if err != nil {
t.Fatal(err)
}
if have {
t.Errorf("didn't expect to read things")
}
if b.Len() != 0 {
t.Fatal("read bytes")
}
}
func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) {
randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7}
xsis := make([]proof7.ExtendedSectorInfo, len(seals))
for i, s := range seals {
xsis[i] = proof7.ExtendedSectorInfo{
SealProof: s.ref.ProofType,
SectorNumber: s.ref.ID.Number,
SealedCID: s.cids.Sealed,
}
}
proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].ref.ID.Miner, xsis, randomness)
if len(skipped) > 0 {
require.Error(t, err)
require.EqualValues(t, skipped, skp)
return
}
if err != nil {
t.Fatalf("%+v", err)
}
sis := make([]proof7.SectorInfo, len(seals))
for i, xsi := range xsis {
sis[i] = proof7.SectorInfo{
SealProof: xsi.SealProof,
SectorNumber: xsi.SectorNumber,
SealedCID: xsi.SealedCID,
}
}
ok, err := ProofVerifier.VerifyWindowPoSt(context.TODO(), proof7.WindowPoStVerifyInfo{
Randomness: randomness,
Proofs: proofs,
ChallengedSectors: sis,
Prover: seals[0].ref.ID.Miner,
})
if err != nil {
t.Fatalf("%+v", err)
}
if !ok {
t.Fatal("bad post")
}
}
func corrupt(t *testing.T, sealer *Sealer, id storage.SectorRef) {
paths, done, err := sealer.sectors.AcquireSector(context.Background(), id, storiface.FTSealed, 0, storiface.PathStorage)
require.NoError(t, err)
defer done()
log.Infof("corrupt %s", paths.Sealed)
f, err := os.OpenFile(paths.Sealed, os.O_RDWR, 0664)
require.NoError(t, err)
_, err = f.WriteAt(bytes.Repeat([]byte{'d'}, 2048), 0)
require.NoError(t, err)
require.NoError(t, f.Close())
}
func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) {
dat, err := ioutil.ReadFile("../../../build/proof-params/parameters.json")
if err != nil {
panic(err)
}
datSrs, err := ioutil.ReadFile("../../../build/proof-params/srs-inner-product.json")
if err != nil {
panic(err)
}
err = paramfetch.GetParams(context.TODO(), dat, datSrs, uint64(s))
if err != nil {
panic(xerrors.Errorf("failed to acquire Groth parameters for 2KiB sectors: %w", err))
}
}
// TestDownloadParams exists only so that developers and CI can pre-download
// Groth parameters and verifying keys before running the tests which rely on
// those parameters and keys. To do this, run the following command:
//
// go test -run=^TestDownloadParams
//
func TestDownloadParams(t *testing.T) {
// defer requireFDsClosed(t, openFDs(t)) flaky likely cause of how go-embed works with param files
getGrothParamFileAndVerifyingKeys(sectorSize)
}
func TestSealAndVerify(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
defer requireFDsClosed(t, openFDs(t))
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
t.Skip("this is slow")
}
_ = os.Setenv("RUST_LOG", "info")
getGrothParamFileAndVerifyingKeys(sectorSize)
cdir, err := ioutil.TempDir("", "sbtest-c-")
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: cdir,
}
sb, err := New(sp)
if err != nil {
t.Fatalf("%+v", err)
}
t.Cleanup(func() {
if t.Failed() {
fmt.Printf("not removing %s\n", cdir)
return
}
if err := os.RemoveAll(cdir); err != nil {
t.Error(err)
}
})
si := storage.SectorRef{
ID: abi.SectorID{Miner: miner, Number: 1},
ProofType: sealProofType,
}
s := seal{ref: si}
start := time.Now()
s.precommit(t, sb, si, func() {})
precommit := time.Now()
s.commit(t, sb, func() {})
commit := time.Now()
post(t, sb, nil, s)
epost := time.Now()
post(t, sb, nil, s)
if err := sb.FinalizeSector(context.TODO(), si, nil); err != nil {
t.Fatalf("%+v", err)
}
s.unseal(t, sb, sp, si, func() {})
fmt.Printf("PreCommit: %s\n", precommit.Sub(start).String())
fmt.Printf("Commit: %s\n", commit.Sub(precommit).String())
fmt.Printf("EPoSt: %s\n", epost.Sub(commit).String())
}
func TestSealPoStNoCommit(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
defer requireFDsClosed(t, openFDs(t))
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
t.Skip("this is slow")
}
_ = os.Setenv("RUST_LOG", "info")
getGrothParamFileAndVerifyingKeys(sectorSize)
dir, err := ioutil.TempDir("", "sbtest")
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: dir,
}
sb, err := New(sp)
if err != nil {
t.Fatalf("%+v", err)
}
t.Cleanup(func() {
if t.Failed() {
fmt.Printf("not removing %s\n", dir)
return
}
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
})
si := storage.SectorRef{
ID: abi.SectorID{Miner: miner, Number: 1},
ProofType: sealProofType,
}
s := seal{ref: si}
start := time.Now()
s.precommit(t, sb, si, func() {})
precommit := time.Now()
if err := sb.FinalizeSector(context.TODO(), si, nil); err != nil {
t.Fatal(err)
}
post(t, sb, nil, s)
epost := time.Now()
fmt.Printf("PreCommit: %s\n", precommit.Sub(start).String())
fmt.Printf("EPoSt: %s\n", epost.Sub(precommit).String())
}
func TestSealAndVerify3(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
defer requireFDsClosed(t, openFDs(t))
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
t.Skip("this is slow")
}
_ = os.Setenv("RUST_LOG", "trace")
getGrothParamFileAndVerifyingKeys(sectorSize)
dir, err := ioutil.TempDir("", "sbtest")
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: dir,
}
sb, err := New(sp)
if err != nil {
t.Fatalf("%+v", err)
}
t.Cleanup(func() {
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
})
var wg sync.WaitGroup
si1 := storage.SectorRef{
ID: abi.SectorID{Miner: miner, Number: 1},
ProofType: sealProofType,
}
si2 := storage.SectorRef{
ID: abi.SectorID{Miner: miner, Number: 2},
ProofType: sealProofType,
}
si3 := storage.SectorRef{
ID: abi.SectorID{Miner: miner, Number: 3},
ProofType: sealProofType,
}
s1 := seal{ref: si1}
s2 := seal{ref: si2}
s3 := seal{ref: si3}
wg.Add(3)
go s1.precommit(t, sb, si1, wg.Done) //nolint: staticcheck
time.Sleep(100 * time.Millisecond)
go s2.precommit(t, sb, si2, wg.Done) //nolint: staticcheck
time.Sleep(100 * time.Millisecond)
go s3.precommit(t, sb, si3, wg.Done) //nolint: staticcheck
wg.Wait()
wg.Add(3)
go s1.commit(t, sb, wg.Done) //nolint: staticcheck
go s2.commit(t, sb, wg.Done) //nolint: staticcheck
go s3.commit(t, sb, wg.Done) //nolint: staticcheck
wg.Wait()
post(t, sb, nil, s1, s2, s3)
corrupt(t, sb, si1)
corrupt(t, sb, si2)
post(t, sb, []abi.SectorID{si1.ID, si2.ID}, s1, s2, s3)
}
func TestSealAndVerifyAggregate(t *testing.T) {
numAgg := 5
if testing.Short() {
t.Skip("skipping test in short mode")
}
defer requireFDsClosed(t, openFDs(t))
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
t.Skip("this is slow")
}
_ = os.Setenv("RUST_LOG", "info")
getGrothParamFileAndVerifyingKeys(sectorSize)
cdir, err := ioutil.TempDir("", "sbtest-c-")
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: cdir,
}
sb, err := New(sp)
if err != nil {
t.Fatalf("%+v", err)
}
t.Cleanup(func() {
if t.Failed() {
fmt.Printf("not removing %s\n", cdir)
return
}
if err := os.RemoveAll(cdir); err != nil {
t.Error(err)
}
})
avi := proof5.AggregateSealVerifyProofAndInfos{
Miner: miner,
SealProof: sealProofType,
AggregateProof: policy.GetDefaultAggregationProof(),
Proof: nil,
Infos: make([]proof5.AggregateSealVerifyInfo, numAgg),
}
toAggregate := make([][]byte, numAgg)
for i := 0; i < numAgg; i++ {
si := storage.SectorRef{
ID: abi.SectorID{Miner: miner, Number: abi.SectorNumber(i + 1)},
ProofType: sealProofType,
}
s := seal{ref: si}
s.precommit(t, sb, si, func() {})
toAggregate[i] = s.commit(t, sb, func() {})
avi.Infos[i] = proof5.AggregateSealVerifyInfo{
Number: abi.SectorNumber(i + 1),
Randomness: s.ticket,
InteractiveRandomness: seed,
SealedCID: s.cids.Sealed,
UnsealedCID: s.cids.Unsealed,
}
}
aggStart := time.Now()
avi.Proof, err = ProofProver.AggregateSealProofs(avi, toAggregate)
require.NoError(t, err)
require.Len(t, avi.Proof, 11188)
aggDone := time.Now()
_, err = ProofProver.AggregateSealProofs(avi, toAggregate)
require.NoError(t, err)
aggHot := time.Now()
ok, err := ProofVerifier.VerifyAggregateSeals(avi)
require.NoError(t, err)
require.True(t, ok)
verifDone := time.Now()
fmt.Printf("Aggregate: %s\n", aggDone.Sub(aggStart).String())
fmt.Printf("Hot: %s\n", aggHot.Sub(aggDone).String())
fmt.Printf("Verify: %s\n", verifDone.Sub(aggHot).String())
}
func BenchmarkWriteWithAlignment(b *testing.B) {
bt := abi.UnpaddedPieceSize(2 * 127 * 1024 * 1024)
b.SetBytes(int64(bt))
for i := 0; i < b.N; i++ {
b.StopTimer()
rf, w, _ := commpffi.ToReadableFile(bytes.NewReader(bytes.Repeat([]byte{0xff, 0}, int(bt/2))), int64(bt))
tf, _ := ioutil.TempFile("/tmp/", "scrb-")
b.StartTimer()
ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg2KiBV1, rf, bt, tf, nil) // nolint:errcheck
_ = w()
}
}
func openFDs(t *testing.T) int {
dent, err := ioutil.ReadDir("/proc/self/fd")
require.NoError(t, err)
var skip int
for _, info := range dent {
l, err := os.Readlink(filepath.Join("/proc/self/fd", info.Name()))
if err != nil {
continue
}
if strings.HasPrefix(l, "/dev/nvidia") {
skip++
}
if strings.HasPrefix(l, "/var/tmp/filecoin-proof-parameters/") {
skip++
}
}
return len(dent) - skip
}
func requireFDsClosed(t *testing.T, start int) {
openNow := openFDs(t)
if start != openNow {
dent, err := ioutil.ReadDir("/proc/self/fd")
require.NoError(t, err)
for _, info := range dent {
l, err := os.Readlink(filepath.Join("/proc/self/fd", info.Name()))
if err != nil {
fmt.Printf("FD err %s\n", err)
continue
}
fmt.Printf("FD %s -> %s\n", info.Name(), l)
}
}
log.Infow("open FDs", "start", start, "now", openNow)
// todo make work with cuda somehow
// require.Equal(t, start, openNow, "FDs shouldn't leak")
}
func TestGenerateUnsealedCID(t *testing.T) {
pt := abi.RegisteredSealProof_StackedDrg2KiBV1
ups := int(abi.PaddedPieceSize(2048).Unpadded())
commP := func(b []byte) cid.Cid {
pf, werr, err := commpffi.ToReadableFile(bytes.NewReader(b), int64(len(b)))
require.NoError(t, err)
c, err := ffi.GeneratePieceCIDFromFile(pt, pf, abi.UnpaddedPieceSize(len(b)))
require.NoError(t, err)
require.NoError(t, werr())
return c
}
testCommEq := func(name string, in [][]byte, expect [][]byte) {
t.Run(name, func(t *testing.T) {
upi := make([]abi.PieceInfo, len(in))
for i, b := range in {
upi[i] = abi.PieceInfo{
Size: abi.UnpaddedPieceSize(len(b)).Padded(),
PieceCID: commP(b),
}
}
sectorPi := []abi.PieceInfo{
{
Size: 2048,
PieceCID: commP(bytes.Join(expect, nil)),
},
}
expectCid, err := GenerateUnsealedCID(pt, sectorPi)
require.NoError(t, err)
actualCid, err := GenerateUnsealedCID(pt, upi)
require.NoError(t, err)
require.Equal(t, expectCid, actualCid)
})
}
barr := func(b byte, den int) []byte {
return bytes.Repeat([]byte{b}, ups/den)
}
// 0000
testCommEq("zero",
nil,
[][]byte{barr(0, 1)},
)
// 1111
testCommEq("one",
[][]byte{barr(1, 1)},
[][]byte{barr(1, 1)},
)
// 11 00
testCommEq("one|2",
[][]byte{barr(1, 2)},
[][]byte{barr(1, 2), barr(0, 2)},
)
// 1 0 00
testCommEq("one|4",
[][]byte{barr(1, 4)},
[][]byte{barr(1, 4), barr(0, 4), barr(0, 2)},
)
// 11 2 0
testCommEq("one|2-two|4",
[][]byte{barr(1, 2), barr(2, 4)},
[][]byte{barr(1, 2), barr(2, 4), barr(0, 4)},
)
// 1 0 22
testCommEq("one|4-two|2",
[][]byte{barr(1, 4), barr(2, 2)},
[][]byte{barr(1, 4), barr(0, 4), barr(2, 2)},
)
// 1 0 22 0000
testCommEq("one|8-two|4",
[][]byte{barr(1, 8), barr(2, 4)},
[][]byte{barr(1, 8), barr(0, 8), barr(2, 4), barr(0, 2)},
)
// 11 2 0 0000
testCommEq("one|4-two|8",
[][]byte{barr(1, 4), barr(2, 8)},
[][]byte{barr(1, 4), barr(2, 8), barr(0, 8), barr(0, 2)},
)
// 1 0 22 3 0 00 4444 5 0 00
testCommEq("one|16-two|8-three|16-four|4-five|16",
[][]byte{barr(1, 16), barr(2, 8), barr(3, 16), barr(4, 4), barr(5, 16)},
[][]byte{barr(1, 16), barr(0, 16), barr(2, 8), barr(3, 16), barr(0, 16), barr(0, 8), barr(4, 4), barr(5, 16), barr(0, 16), barr(0, 8)},
)
}
func TestAddPiece512M(t *testing.T) {
sz := abi.PaddedPieceSize(512 << 20).Unpadded()
cdir, err := ioutil.TempDir("", "sbtest-c-")
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: cdir,
}
sb, err := New(sp)
if err != nil {
t.Fatalf("%+v", err)
}
cleanup := func() {
if t.Failed() {
fmt.Printf("not removing %s\n", cdir)
return
}
if err := os.RemoveAll(cdir); err != nil {
t.Error(err)
}
}
t.Cleanup(cleanup)
r := rand.New(rand.NewSource(0x7e5))
c, err := sb.AddPiece(context.TODO(), storage.SectorRef{
ID: abi.SectorID{
Miner: miner,
Number: 0,
},
ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1,
}, nil, sz, io.LimitReader(r, int64(sz)))
if err != nil {
t.Fatal(err)
}
require.Equal(t, "baga6ea4seaqhyticusemlcrjhvulpfng4nint6bu3wpe5s3x4bnuj2rs47hfacy", c.PieceCID.String())
}
func BenchmarkAddPiece512M(b *testing.B) {
sz := abi.PaddedPieceSize(512 << 20).Unpadded()
b.SetBytes(int64(sz))
cdir, err := ioutil.TempDir("", "sbtest-c-")
if err != nil {
b.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: cdir,
}
sb, err := New(sp)
if err != nil {
b.Fatalf("%+v", err)
}
cleanup := func() {
if b.Failed() {
fmt.Printf("not removing %s\n", cdir)
return
}
if err := os.RemoveAll(cdir); err != nil {
b.Error(err)
}
}
b.Cleanup(cleanup)
for i := 0; i < b.N; i++ {
c, err := sb.AddPiece(context.TODO(), storage.SectorRef{
ID: abi.SectorID{
Miner: miner,
Number: abi.SectorNumber(i),
},
ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1,
}, nil, sz, io.LimitReader(&nullreader.Reader{}, int64(sz)))
if err != nil {
b.Fatal(err)
}
fmt.Println(c)
}
}
func TestAddPiece512MPadded(t *testing.T) {
sz := abi.PaddedPieceSize(512 << 20).Unpadded()
cdir, err := ioutil.TempDir("", "sbtest-c-")
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: cdir,
}
sb, err := New(sp)
if err != nil {
t.Fatalf("%+v", err)
}
cleanup := func() {
if t.Failed() {
fmt.Printf("not removing %s\n", cdir)
return
}
if err := os.RemoveAll(cdir); err != nil {
t.Error(err)
}
}
t.Cleanup(cleanup)
r := rand.New(rand.NewSource(0x7e5))
c, err := sb.AddPiece(context.TODO(), storage.SectorRef{
ID: abi.SectorID{
Miner: miner,
Number: 0,
},
ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1,
}, nil, sz, io.LimitReader(r, int64(sz/4)))
if err != nil {
t.Fatalf("add piece failed: %s", err)
}
require.Equal(t, "baga6ea4seaqonenxyku4o7hr5xkzbqsceipf6xgli3on54beqbk6k246sbooobq", c.PieceCID.String())
}
func setupLogger(t *testing.T) *bytes.Buffer {
_ = os.Setenv("RUST_LOG", "info")
var bb bytes.Buffer
r, w, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
go func() {
_, _ = io.Copy(&bb, r)
runtime.KeepAlive(w)
}()
resp := generated.FilInitLogFd(int32(w.Fd()))
resp.Deref()
defer generated.FilDestroyInitLogFdResponse(resp)
if resp.StatusCode != generated.FCPResponseStatusFCPNoError {
t.Fatal(generated.RawString(resp.ErrorMsg).Copy())
}
return &bb
}
func TestMulticoreSDR(t *testing.T) {
if os.Getenv("TEST_RUSTPROOFS_LOGS") != "1" {
t.Skip("skipping test without TEST_RUSTPROOFS_LOGS=1")
}
rustLogger := setupLogger(t)
getGrothParamFileAndVerifyingKeys(sectorSize)
dir, err := ioutil.TempDir("", "sbtest")
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: dir,
}
sb, err := New(sp)
if err != nil {
t.Fatalf("%+v", err)
}
t.Cleanup(func() {
if t.Failed() {
fmt.Printf("not removing %s\n", dir)
return
}
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
})
si := storage.SectorRef{
ID: abi.SectorID{Miner: miner, Number: 1},
ProofType: sealProofType,
}
s := seal{ref: si}
// check multicore
_ = os.Setenv("FIL_PROOFS_USE_MULTICORE_SDR", "1")
rustLogger.Reset()
s.precommit(t, sb, si, func() {})
ok := false
for _, s := range strings.Split(rustLogger.String(), "\n") {
if strings.Contains(s, "create_label::multi") {
ok = true
break
}
}
require.True(t, ok)
}
func TestPoStChallengeAssumptions(t *testing.T) {
var r [32]byte
rand.Read(r[:])
r[31] &= 0x3f
// behaves like a pure function
{
c1, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1, 2, 3, 4})
require.NoError(t, err)
c2, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1, 2, 3, 4})
require.NoError(t, err)
require.Equal(t, c1, c2)
}
// doesn't sort, challenges position dependant
{
c1, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1, 2, 3, 4})
require.NoError(t, err)
c2, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{4, 2, 3, 1})
require.NoError(t, err)
require.NotEqual(t, c1, c2)
require.Equal(t, c1.Challenges[2], c2.Challenges[2])
require.Equal(t, c1.Challenges[3], c2.Challenges[3])
require.NotEqual(t, c1.Challenges[1], c2.Challenges[1])
require.NotEqual(t, c1.Challenges[4], c2.Challenges[4])
}
// length doesn't matter
{
c1, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1})
require.NoError(t, err)
c2, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1, 2})
require.NoError(t, err)
require.NotEqual(t, c1, c2)
require.Equal(t, c1.Challenges[1], c2.Challenges[1])
}
// generate dedupes
{
c1, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1, 2, 1, 4})
require.NoError(t, err)
require.Len(t, c1.Sectors, 3)
require.Len(t, c1.Challenges, 3)
}
}
| [
"\"CI\"",
"\"CI\"",
"\"CI\"",
"\"CI\"",
"\"TEST_RUSTPROOFS_LOGS\""
]
| []
| [
"CI",
"TEST_RUSTPROOFS_LOGS"
]
| [] | ["CI", "TEST_RUSTPROOFS_LOGS"] | go | 2 | 0 | |
apero/tools/recipes/spirou/cal_pphotpix_spirou.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2020-04-05 11:44:00
@author: cook
"""
import numpy as np
import os
from scipy.signal import medfilt,convolve2d
from astropy.table import Table
from astropy.io import fits
import argparse
from apero import core
from apero import lang
from apero.core import constants
from apero.core import math as mp
from apero.io import drs_data
from apero.tools.module.testing import drs_dev
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'cal_pphotpix_spirou.py'
__INSTRUMENT__ = 'SPIROU'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# get param dict
ParamDict = constants.ParamDict
# Get Logging function
WLOG = core.wlog
# Get the text types
TextEntry = lang.drs_text.TextEntry
TextDict = lang.drs_text.TextDict
Help = lang.drs_text.HelpDict(__INSTRUMENT__, Constants['LANGUAGE'])
# whether this is a debug run (produces mask image)
DEBUG = False
# define relative output path
DEBUGFILE = 'mask_hotpix_pp.fits'
# -----------------------------------------------------------------------------
# get file definitions for this instrument
FMOD = drs_dev.FileDefinition(instrument=__INSTRUMENT__)
# set up recipe definitions (overwrites default one)
RMOD = drs_dev.RecipeDefinition(instrument=__INSTRUMENT__)
# define a recipe for this tool
c_hotpix = drs_dev.TmpRecipe()
c_hotpix.name = __NAME__
c_hotpix.shortname = 'CRT_HTPX'
c_hotpix.instrument = __INSTRUMENT__
c_hotpix.outputdir = 'reduced'
c_hotpix.inputdir = 'raw'
c_hotpix.inputtype = 'raw'
c_hotpix.extension = 'fits'
c_hotpix.description = ('Create the hotpix table for an instrument (required '
'for preprocessing)')
c_hotpix.kind = 'misc'
c_hotpix.set_arg(pos=0, name='directory', dtype='directory',
helpstr=Help['DIRECTORY_HELP'])
c_hotpix.set_arg(pos=1, name='darkfile', dtype='file',
helpstr='[STRING] The night name (directory name)',
files=[FMOD.files.raw_dark_dark_int,
FMOD.files.raw_dark_dark_tel])
c_hotpix.set_kwarg(name='--debugfile', dtype='switch', default=False,
helpstr='If set activates debug mode (saves mask)')
# add recipe to recipe definition
RMOD.add(c_hotpix)
# =============================================================================
# Define functions
# =============================================================================
# All recipe code goes in _main
# Only change the following from here:
# 1) function calls (i.e. main(arg1, arg2, **kwargs)
# 2) fkwargs (i.e. fkwargs=dict(arg1=arg1, arg2=arg2, **kwargs)
# 3) config_main outputs value (i.e. None, pp, reduced)
# Everything else is controlled from recipe_definition
def main(instrument=None, directory=None, darkfile=None, **kwargs):
"""
Main function for exposuremeter_spirou.py
:param kwargs: additional keyword arguments
:type instrument: str
:keyword debug: int, debug level (0 for None)
:returns: dictionary of the local space
:rtype: dict
"""
# assign function calls (must add positional)
fkwargs = dict(instrument=instrument, directory=directory,
darkfile=darkfile, **kwargs)
# ----------------------------------------------------------------------
# deal with command line inputs / function call inputs
recipe, params = core.setup(__NAME__, __INSTRUMENT__, fkwargs,
rmod=RMOD)
# solid debug mode option
if kwargs.get('DEBUG0000', False):
return recipe, params
# ----------------------------------------------------------------------
# run main bulk of code (catching all errors)
llmain, success = core.run(__main__, recipe, params)
# ----------------------------------------------------------------------
# End Message
# ----------------------------------------------------------------------
return core.end_main(params, llmain, recipe, success)
def __main__(recipe, params):
# get input dark file drs fits file instance
darkfile = params['INPUTS']['darkfile'][1][0]
debug = params['INPUTS']['debugfile']
# ----------------------------------------------------------------------
# Prepare dark file
# ----------------------------------------------------------------------
WLOG(params, '', 'Loading dark and preparing image')
# load file
image = darkfile.data
# set NaNS and infs to zero. NaN pixels will not be flagged as hot pixels
image[~np.isfinite(image)] = 0
# subtract a DC offset of the image level
image = image - mp.nanmedian(image)
# express image normalized in terms of sigma
image = image / np.nanpercentile(np.abs(image), 100 * mp.normal_fraction())
# ----------------------------------------------------------------------
# Find hot pixels
# ----------------------------------------------------------------------
WLOG(params, '', 'Finding hot pixels')
# get box size from parameters
boxsize = params['PP_HOTPIX_BOXSIZE']
threshold = params['PP_CORRUPT_HOT_THRES']
# a hot pixel is a point that is > 10 sigma (positive) and that has a
# 5x5 median around it that is within +/- 1 sigma; it is well-behaved and
# not surrounded by bad pixels
WLOG(params, '', '\t- median filter')
medimage = medfilt(image, [boxsize, boxsize])
# find the hot pixels
mask = (np.abs(medimage) < 1.0) & (image > threshold)
hotpix = np.array(mask).astype(float)
# find if hot pixels are alone in a 5x5 box
WLOG(params, '', '\t- convolve')
box = np.ones([boxsize, boxsize]).astype(float)
neighbours = convolve2d(hotpix, box, mode='same')
# after the convolution, isolated (within 5x5)
# hotpixels have neighbours = 1
WLOG(params, '', '\t- find neighbours')
has_neighbours = neighbours == 1
# set non-isolated hot pixels to zero
hotpix[~has_neighbours] = 0.0
# find positions in x and y of good hot pixels
WLOG(params, '', '\t- locate')
y, x = np.where(hotpix)
# ----------------------------------------------------------------------
# write table to file
# ----------------------------------------------------------------------
# print progress
WLOG(params, '', 'Writing to file')
# create table
table = Table()
table['nsig'] = image[y, x]
table['xpix'] = x
table['ypix'] = y
# get outpath
filename = params['PP_HOTPIX_FILE']
relpath = params['DATA_ENGINEERING']
absoutpath = drs_data.construct_path(params, filename, relpath)
# write output as a csv file
WLOG(params, '', '\t Saved to: {0}'.format(absoutpath))
table.write(absoutpath, format='csv', overwrite=True)
# if debug is True save the mask (to compare to image)
if debug:
# get debug file
debugabspath = drs_data.construct_path(params, DEBUGFILE, relpath)
# print progress
WLOG(params, '', '\t Saved debug to: {0}'.format(debugabspath))
# write to file
fits.writeto(debugabspath, hotpix, overwrite=True)
# ----------------------------------------------------------------------
# End of main code
# ----------------------------------------------------------------------
return core.return_locals(params, locals())
# =============================================================================
# Start of code
# =============================================================================
if __name__ == "__main__":
# run main with no arguments (get from command line - sys.argv)
ll = main()
# =============================================================================
# End of code
# ============================================================================= | []
| []
| []
| [] | [] | python | null | null | null |
main.go | package main
import (
"fmt"
"os"
cowsay "github.com/Code-Hex/Neo-cowsay/v2"
)
func main() {
os.Setenv("COWPATH", os.Getenv("KO_DATA_PATH"))
say, err := cowsay.Say(
"Hello Cloud Native @ Scale",
cowsay.Type("default"),
cowsay.BallonWidth(40),
cowsay.Type("octo"),
)
if err != nil {
panic(err)
}
fmt.Println(say)
}
| [
"\"KO_DATA_PATH\""
]
| []
| [
"KO_DATA_PATH"
]
| [] | ["KO_DATA_PATH"] | go | 1 | 0 | |
pkg/networking/util/util.go | package util
import (
"fmt"
"net"
"os"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"github.com/go-chassis/go-chassis/core/common"
)
func SplitServiceKey(key string) (name, namespace string) {
sets := strings.Split(key, ".")
if len(sets) >= 2 {
return sets[0], sets[1]
}
ns := os.Getenv("POD_NAMESPACE")
if ns == "" {
ns = common.DefaultValue
}
if len(sets) == 1 {
return sets[0], ns
}
return key, ns
}
// GetInterfaceIP get net interface ipv4 address
func GetInterfaceIP(name string) (net.IP, error) {
ifi, err := net.InterfaceByName(name)
if err != nil {
return nil, err
}
addrs, _ := ifi.Addrs()
for _, addr := range addrs {
if ip, ipn, _ := net.ParseCIDR(addr.String()); len(ipn.Mask) == 4 {
return ip, nil
}
}
return nil, fmt.Errorf("no ip of version 4 found for interface %s", name)
}
// GetPodsSelector use the selector to obtain the backend pods bound to the service
func GetPodsSelector(svc *v1.Service) labels.Selector {
selector := labels.NewSelector()
for k, v := range svc.Spec.Selector {
r, _ := labels.NewRequirement(k, selection.Equals, []string{v})
selector = selector.Add(*r)
}
return selector
}
| [
"\"POD_NAMESPACE\""
]
| []
| [
"POD_NAMESPACE"
]
| [] | ["POD_NAMESPACE"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'oc_search.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
firetail/launcher.py | import sys
import os
import argparse
import subprocess
class ArgumentParser(argparse.ArgumentParser):
def __is_valid_directory(self, parser, arg):
if not os.path.isdir(arg):
parser.error('Directory {} not found.'.format(arg))
else:
return arg
def add_argument_with_dir_check(self, *args, **kwargs):
kwargs['type'] = lambda x: self.__is_valid_directory(self, x)
self.add_argument(*args, **kwargs)
def parse_cli_args():
parser = ArgumentParser(
description="Firetail - An EVE Online Discord Bot")
parser.add_argument(
"--no-restart", "-r",
help="Disables auto-restart.", action="store_true")
parser.add_argument(
"--debug", "-d", help="Enabled debug mode.", action="store_true")
return parser.parse_known_args()
def main():
print('''
______ _ _ _ _ \n
| ____(_) | | (_) | \n
| |__ _ _ __ ___| |_ __ _ _| | \n
| __| | | '__/ _ \ __/ _` | | | \n
| | | | | | __/ || (_| | | | \n
|_| |_|_| \___|\__\__,_|_|_| \n
''')
if sys.version_info < (3, 5, 0):
print("ERROR: Minimum Python version not met.\n"
"Firetail requires Python 3.5 or higher.\n")
return
print("Launching Firetail...", end=' ', flush=True)
launch_args, ft_args = parse_cli_args()
if launch_args.debug:
ft_args.append('-d')
# Get environment
env = os.environ
ft_args.append('-l')
while True:
code = subprocess.call(["firetail-bot", *ft_args], env=env)
if code == 0:
print("Goodbye!")
break
elif code == 26:
print("Rebooting! I'll be back in a bit!\n")
continue
else:
if launch_args.no_restart:
break
print("I crashed! Trying to restart...\n")
print("Exit code: {exit_code}".format(exit_code=code))
sys.exit(code)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
api/client/out.go | package client
import (
"io"
"os"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/term"
)
// OutStream is an output stream used by the DockerCli to write normal program
// output.
type OutStream struct {
out io.Writer
fd uintptr
isTerminal bool
state *term.State
}
func (o *OutStream) Write(p []byte) (int, error) {
return o.out.Write(p)
}
// FD returns the file descriptor number for this stream
func (o *OutStream) FD() uintptr {
return o.fd
}
// IsTerminal returns true if this stream is connected to a terminal
func (o *OutStream) IsTerminal() bool {
return o.isTerminal
}
// SetRawTerminal sets raw mode on the output terminal
func (o *OutStream) SetRawTerminal() (err error) {
if os.Getenv("NORAW") != "" || !o.isTerminal {
return nil
}
o.state, err = term.SetRawTerminalOutput(o.fd)
return err
}
// RestoreTerminal restores normal mode to the terminal
func (o *OutStream) RestoreTerminal() {
if o.state != nil {
term.RestoreTerminal(o.fd, o.state)
}
}
// GetTtySize returns the height and width in characters of the tty
func (o *OutStream) GetTtySize() (int, int) {
if !o.isTerminal {
return 0, 0
}
ws, err := term.GetWinsize(o.fd)
if err != nil {
logrus.Debugf("Error getting size: %s", err)
if ws == nil {
return 0, 0
}
}
return int(ws.Height), int(ws.Width)
}
// NewOutStream returns a new OutStream object from a Writer
func NewOutStream(out io.Writer) *OutStream {
fd, isTerminal := term.GetFdInfo(out)
return &OutStream{out: out, fd: fd, isTerminal: isTerminal}
}
| [
"\"NORAW\""
]
| []
| [
"NORAW"
]
| [] | ["NORAW"] | go | 1 | 0 | |
lib/backend/k8s/conversion/workload_endpoint_default.go | // Copyright (c) 2016-2021 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package conversion
import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"os"
"strings"
log "github.com/sirupsen/logrus"
kapiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiv3 "github.com/bw-bmbarga/libcalico-go/lib/apis/v3"
"github.com/bw-bmbarga/libcalico-go/lib/backend/model"
"github.com/bw-bmbarga/libcalico-go/lib/names"
cnet "github.com/bw-bmbarga/libcalico-go/lib/net"
"github.com/bw-bmbarga/libcalico-go/lib/numorstring"
)
type defaultWorkloadEndpointConverter struct{}
// VethNameForWorkload returns a deterministic veth name
// for the given Kubernetes workload (WEP) name and namespace.
func (wc defaultWorkloadEndpointConverter) VethNameForWorkload(namespace, podname string) string {
// A SHA1 is always 20 bytes long, and so is sufficient for generating the
// veth name and mac addr.
h := sha1.New()
h.Write([]byte(fmt.Sprintf("%s.%s", namespace, podname)))
prefix := os.Getenv("FELIX_INTERFACEPREFIX")
if prefix == "" {
// Prefix is not set. Default to "cali"
prefix = "cali"
} else {
// Prefix is set - use the first value in the list.
splits := strings.Split(prefix, ",")
prefix = splits[0]
}
log.WithField("prefix", prefix).Debugf("Using prefix to create a WorkloadEndpoint veth name")
return fmt.Sprintf("%s%s", prefix, hex.EncodeToString(h.Sum(nil))[:11])
}
func (wc defaultWorkloadEndpointConverter) PodToWorkloadEndpoints(pod *kapiv1.Pod) ([]*model.KVPair, error) {
wep, err := wc.podToDefaultWorkloadEndpoint(pod)
if err != nil {
return nil, err
}
return []*model.KVPair{wep}, nil
}
// PodToWorkloadEndpoint converts a Pod to a WorkloadEndpoint. It assumes the calling code
// has verified that the provided Pod is valid to convert to a WorkloadEndpoint.
// PodToWorkloadEndpoint requires a Pods Name and Node Name to be populated. It will
// fail to convert from a Pod to WorkloadEndpoint otherwise.
func (wc defaultWorkloadEndpointConverter) podToDefaultWorkloadEndpoint(pod *kapiv1.Pod) (*model.KVPair, error) {
log.WithField("pod", pod).Debug("Converting pod to WorkloadEndpoint")
// Get all the profiles that apply
var profiles []string
// Pull out the Namespace based profile off the pod name and Namespace.
profiles = append(profiles, NamespaceProfileNamePrefix+pod.Namespace)
// Pull out the Serviceaccount based profile off the pod SA and namespace
if pod.Spec.ServiceAccountName != "" {
profiles = append(profiles, serviceAccountNameToProfileName(pod.Spec.ServiceAccountName, pod.Namespace))
}
wepids := names.WorkloadEndpointIdentifiers{
Node: pod.Spec.NodeName,
Orchestrator: apiv3.OrchestratorKubernetes,
Endpoint: "eth0",
Pod: pod.Name,
}
wepName, err := wepids.CalculateWorkloadEndpointName(false)
if err != nil {
return nil, err
}
podIPNets, err := getPodIPs(pod)
if err != nil {
// IP address was present but malformed in some way, handle as an explicit failure.
return nil, err
}
if IsFinished(pod) {
// Pod is finished but not yet deleted. In this state the IP will have been freed and returned to the pool
// so we need to make sure we don't let the caller believe it still belongs to this endpoint.
// Pods with no IPs will get filtered out before they get to Felix in the watcher syncer cache layer.
// We can't pretend the workload endpoint is deleted _here_ because that would confuse users of the
// native v3 Watch() API.
log.Debug("Pod is in a 'finished' state so no longer owns its IP(s).")
podIPNets = nil
}
ipNets := []string{}
for _, ipNet := range podIPNets {
ipNets = append(ipNets, ipNet.String())
}
// Generate the interface name based on workload. This must match
// the host-side veth configured by the CNI plugin.
interfaceName := wc.VethNameForWorkload(pod.Namespace, pod.Name)
// Build the labels map. Start with the pod labels, and append two additional labels for
// namespace and orchestrator matches.
labels := pod.Labels
if labels == nil {
labels = make(map[string]string, 2)
}
labels[apiv3.LabelNamespace] = pod.Namespace
labels[apiv3.LabelOrchestrator] = apiv3.OrchestratorKubernetes
if pod.Spec.ServiceAccountName != "" && len(pod.Spec.ServiceAccountName) < 63 {
// For backwards compatibility, include the label if less than 63 characters.
labels[apiv3.LabelServiceAccount] = pod.Spec.ServiceAccountName
}
// Pull out floating IP annotation
var floatingIPs []apiv3.IPNAT
if annotation, ok := pod.Annotations["cni.projectcalico.org/floatingIPs"]; ok && len(podIPNets) > 0 {
// Parse Annotation data
var ips []string
err := json.Unmarshal([]byte(annotation), &ips)
if err != nil {
return nil, fmt.Errorf("failed to parse '%s' as JSON: %s", annotation, err)
}
// Get IPv4 and IPv6 targets for NAT
var podnetV4, podnetV6 *cnet.IPNet
for _, ipNet := range podIPNets {
if ipNet.IP.To4() != nil {
podnetV4 = ipNet
netmask, _ := podnetV4.Mask.Size()
if netmask != 32 {
return nil, fmt.Errorf("PodIP %v is not a valid IPv4: Mask size is %d, not 32", ipNet, netmask)
}
} else {
podnetV6 = ipNet
netmask, _ := podnetV6.Mask.Size()
if netmask != 128 {
return nil, fmt.Errorf("PodIP %v is not a valid IPv6: Mask size is %d, not 128", ipNet, netmask)
}
}
}
for _, ip := range ips {
if strings.Contains(ip, ":") {
if podnetV6 != nil {
floatingIPs = append(floatingIPs, apiv3.IPNAT{
InternalIP: podnetV6.IP.String(),
ExternalIP: ip,
})
}
} else {
if podnetV4 != nil {
floatingIPs = append(floatingIPs, apiv3.IPNAT{
InternalIP: podnetV4.IP.String(),
ExternalIP: ip,
})
}
}
}
}
// Map any named ports through.
var endpointPorts []apiv3.EndpointPort
for _, container := range pod.Spec.Containers {
for _, containerPort := range container.Ports {
if containerPort.Name != "" && containerPort.ContainerPort != 0 {
var modelProto numorstring.Protocol
switch containerPort.Protocol {
case kapiv1.ProtocolUDP:
modelProto = numorstring.ProtocolFromString("udp")
case kapiv1.ProtocolSCTP:
modelProto = numorstring.ProtocolFromString("sctp")
case kapiv1.ProtocolTCP, kapiv1.Protocol("") /* K8s default is TCP. */ :
modelProto = numorstring.ProtocolFromString("tcp")
default:
log.WithFields(log.Fields{
"protocol": containerPort.Protocol,
"pod": pod,
"port": containerPort,
}).Debug("Ignoring named port with unknown protocol")
continue
}
endpointPorts = append(endpointPorts, apiv3.EndpointPort{
Name: containerPort.Name,
Protocol: modelProto,
Port: uint16(containerPort.ContainerPort),
})
}
}
}
// Create the workload endpoint.
wep := apiv3.NewWorkloadEndpoint()
wep.ObjectMeta = metav1.ObjectMeta{
Name: wepName,
Namespace: pod.Namespace,
CreationTimestamp: pod.CreationTimestamp,
UID: pod.UID,
Labels: labels,
GenerateName: pod.GenerateName,
}
wep.Spec = apiv3.WorkloadEndpointSpec{
Orchestrator: "k8s",
Node: pod.Spec.NodeName,
Pod: pod.Name,
Endpoint: "eth0",
InterfaceName: interfaceName,
Profiles: profiles,
IPNetworks: ipNets,
Ports: endpointPorts,
IPNATs: floatingIPs,
ServiceAccountName: pod.Spec.ServiceAccountName,
}
// Embed the workload endpoint into a KVPair.
kvp := model.KVPair{
Key: model.ResourceKey{
Name: wepName,
Namespace: pod.Namespace,
Kind: apiv3.KindWorkloadEndpoint,
},
Value: wep,
Revision: pod.ResourceVersion,
}
return &kvp, nil
}
| [
"\"FELIX_INTERFACEPREFIX\""
]
| []
| [
"FELIX_INTERFACEPREFIX"
]
| [] | ["FELIX_INTERFACEPREFIX"] | go | 1 | 0 | |
staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
)
var (
pluginLong = templates.LongDesc(`
Provides utilities for interacting with plugins.
Plugins provide extended functionality that is not part of the major command-line distribution.
Please refer to the documentation and examples for more information about how write your own plugins.`)
pluginListLong = templates.LongDesc(`
List all available plugin files on a user's PATH.
Available plugin files are those that are:
- executable
- anywhere on the user's PATH
- begin with "kubectl-"
`)
ValidPluginFilenamePrefixes = []string{"kubectl"}
)
func NewCmdPlugin(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
cmd := &cobra.Command{
Use: "plugin [flags]",
DisableFlagsInUseLine: true,
Short: i18n.T("Provides utilities for interacting with plugins."),
Long: pluginLong,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.DefaultSubCommandRun(streams.ErrOut)(cmd, args)
},
}
cmd.AddCommand(NewCmdPluginList(f, streams))
return cmd
}
type PluginListOptions struct {
Verifier PathVerifier
NameOnly bool
PluginPaths []string
genericclioptions.IOStreams
}
// NewCmdPluginList provides a way to list all plugin executables visible to kubectl
func NewCmdPluginList(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
o := &PluginListOptions{
IOStreams: streams,
}
cmd := &cobra.Command{
Use: "list",
Short: "list all visible plugin executables on a user's PATH",
Long: pluginListLong,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(o.Complete(cmd))
cmdutil.CheckErr(o.Run())
},
}
cmd.Flags().BoolVar(&o.NameOnly, "name-only", o.NameOnly, "If true, display only the binary name of each plugin, rather than its full path")
return cmd
}
func (o *PluginListOptions) Complete(cmd *cobra.Command) error {
o.Verifier = &CommandOverrideVerifier{
root: cmd.Root(),
seenPlugins: make(map[string]string),
}
o.PluginPaths = filepath.SplitList(os.Getenv("PATH"))
return nil
}
func (o *PluginListOptions) Run() error {
pluginsFound := false
isFirstFile := true
pluginErrors := []error{}
pluginWarnings := 0
for _, dir := range uniquePathsList(o.PluginPaths) {
files, err := ioutil.ReadDir(dir)
if err != nil {
if _, ok := err.(*os.PathError); ok {
fmt.Fprintf(o.ErrOut, "Unable read directory %q from your PATH: %v. Skipping...", dir, err)
continue
}
pluginErrors = append(pluginErrors, fmt.Errorf("error: unable to read directory %q in your PATH: %v", dir, err))
continue
}
for _, f := range files {
if f.IsDir() {
continue
}
if !hasValidPrefix(f.Name(), ValidPluginFilenamePrefixes) {
continue
}
if isFirstFile {
fmt.Fprintf(o.ErrOut, "The following compatible plugins are available:\n\n")
pluginsFound = true
isFirstFile = false
}
pluginPath := f.Name()
if !o.NameOnly {
pluginPath = filepath.Join(dir, pluginPath)
}
fmt.Fprintf(o.Out, "%s\n", pluginPath)
if errs := o.Verifier.Verify(filepath.Join(dir, f.Name())); len(errs) != 0 {
for _, err := range errs {
fmt.Fprintf(o.ErrOut, " - %s\n", err)
pluginWarnings++
}
}
}
}
if !pluginsFound {
pluginErrors = append(pluginErrors, fmt.Errorf("error: unable to find any kubectl plugins in your PATH"))
}
if pluginWarnings > 0 {
if pluginWarnings == 1 {
pluginErrors = append(pluginErrors, fmt.Errorf("error: one plugin warning was found"))
} else {
pluginErrors = append(pluginErrors, fmt.Errorf("error: %v plugin warnings were found", pluginWarnings))
}
}
if len(pluginErrors) > 0 {
fmt.Fprintln(o.ErrOut)
errs := bytes.NewBuffer(nil)
for _, e := range pluginErrors {
fmt.Fprintln(errs, e)
}
return fmt.Errorf("%s", errs.String())
}
return nil
}
// pathVerifier receives a path and determines if it is valid or not
type PathVerifier interface {
// Verify determines if a given path is valid
Verify(path string) []error
}
type CommandOverrideVerifier struct {
root *cobra.Command
seenPlugins map[string]string
}
// Verify implements PathVerifier and determines if a given path
// is valid depending on whether or not it overwrites an existing
// kubectl command path, or a previously seen plugin.
func (v *CommandOverrideVerifier) Verify(path string) []error {
if v.root == nil {
return []error{fmt.Errorf("unable to verify path with nil root")}
}
// extract the plugin binary name
segs := strings.Split(path, "/")
binName := segs[len(segs)-1]
cmdPath := strings.Split(binName, "-")
if len(cmdPath) > 1 {
// the first argument is always "kubectl" for a plugin binary
cmdPath = cmdPath[1:]
}
errors := []error{}
if isExec, err := isExecutable(path); err == nil && !isExec {
errors = append(errors, fmt.Errorf("warning: %s identified as a kubectl plugin, but it is not executable", path))
} else if err != nil {
errors = append(errors, fmt.Errorf("error: unable to identify %s as an executable file: %v", path, err))
}
if existingPath, ok := v.seenPlugins[binName]; ok {
errors = append(errors, fmt.Errorf("warning: %s is overshadowed by a similarly named plugin: %s", path, existingPath))
} else {
v.seenPlugins[binName] = path
}
if cmd, _, err := v.root.Find(cmdPath); err == nil {
errors = append(errors, fmt.Errorf("warning: %s overwrites existing command: %q", binName, cmd.CommandPath()))
}
return errors
}
func isExecutable(fullPath string) (bool, error) {
info, err := os.Stat(fullPath)
if err != nil {
return false, err
}
if runtime.GOOS == "windows" {
fileExt := strings.ToLower(filepath.Ext(fullPath))
switch fileExt {
case ".bat", ".cmd", ".com", ".exe", ".ps1":
return true, nil
}
return false, nil
}
if m := info.Mode(); !m.IsDir() && m&0111 != 0 {
return true, nil
}
return false, nil
}
// uniquePathsList deduplicates a given slice of strings without
// sorting or otherwise altering its order in any way.
func uniquePathsList(paths []string) []string {
seen := map[string]bool{}
newPaths := []string{}
for _, p := range paths {
if seen[p] {
continue
}
seen[p] = true
newPaths = append(newPaths, p)
}
return newPaths
}
func hasValidPrefix(filepath string, validPrefixes []string) bool {
for _, prefix := range validPrefixes {
if !strings.HasPrefix(filepath, prefix+"-") {
continue
}
return true
}
return false
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
mesh_tensorflow/simd_mesh_impl.py | # coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SIMD Mesh implementation (for TPU/XLA)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import gin
from mesh_tensorflow import ops_with_redefined_builtins as mtf
from mesh_tensorflow import tpu_variables
from mesh_tensorflow import utils
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
from tensorflow.python.tpu.ops import tpu_ops # pylint: disable=g-direct-tensorflow-import
tf.flags.DEFINE_integer(
"logical_cores_per_chip",
default=2,
help="Number of logical accelerator cores per chip.")
FLAGS = tf.flags.FLAGS
@gin.configurable
class SimdMeshImpl(mtf.MeshImpl):
"""Mesh implementation for TPU using SIMD and MPI operations."""
def __init__(self,
shape,
layout,
devices=None,
device_assignment=None,
logical_to_physical=None,
allreduce_in_bfloat16_max_group_size=8):
"""Create a SimdMeshImpl.
Args:
shape: an input to mtf.convert_to_shape()
layout: an input to mtf.convert_to_layout_rules()
devices: deprecated
device_assignment: a tf.tpu.experimental.DeviceAssignment -
devices must be asssigned in lexicographic order
logical_to_physical: an optional permutation representing the mapping
from logical cores to "physical" cores, where the physical cores are
listed in lexicographic order in the physical mesh, and the logical
cores are listed in lexicographic order in the logical mesh.
Default is lexicographic order.
allreduce_in_bfloat16_max_group_size: an integer. Allreduces of bfloat16
tensors are done in float32 if the group size exceeds this value.
"""
super(SimdMeshImpl, self).__init__(shape, layout)
if devices is not None:
tf.logging.warning("SimdMeshImpl ignoring devices %s" % devices)
self._device_assignment = device_assignment
tf.logging.info("SimdMeshImpl init: {0} {1}".format(shape, layout))
tf.logging.info("Device Assignment: {0}".format(device_assignment))
if logical_to_physical is None:
# TODO(noam): maybe use auto_logical_to_physical_tpu() here
logical_to_physical = list(range(self.size))
if sorted(logical_to_physical) != list(range(self.size)):
raise ValueError(
"logical_to_physical must be a permutation on range(shape.size)"
" shape=%s logical_to_physical=%s" % (shape, logical_to_physical))
self._logical_to_physical = logical_to_physical
self._physical_to_logical = [None] * self.size
for logical, physical in enumerate(self._logical_to_physical):
self._physical_to_logical[physical] = logical
self._pnum_tensor = None
self.graph_device_function_stacks = []
self.copy_master_to_slice_ops = []
self._allreduce_in_bfloat16_max_group_size = (
allreduce_in_bfloat16_max_group_size)
@property
def pnum_tensor(self):
if self._pnum_tensor is not None:
return self._pnum_tensor
with utils.outside_all_rewrites():
tf.logging.info("Create pnum_tensor")
self._pnum_tensor = tpu_ops.tpu_replicated_input(
self._physical_to_logical, name="pnum_constants")
return self._pnum_tensor
def l2p(self, logical_pnum):
return self._logical_to_physical[logical_pnum]
def p2l(self, physical_pnum):
return self._physical_to_logical[physical_pnum]
class LaidOutTensor(object):
"""One Slice."""
def __init__(self, tensor_list):
assert isinstance(tensor_list, list)
self._tensor_list = tensor_list
def __repr__(self):
return "[" + ",".join([str(t) for t in self._tensor_list]) + "]"
@property
def tensor_list(self):
return self._tensor_list
@property
def one_slice(self):
return self._tensor_list[0]
@classmethod
def from_tensor_list(cls, tensor_list):
return cls(tensor_list)
@property
def all_slices(self):
return self._tensor_list
@property
def slice_shape(self):
return self.one_slice.shape.as_list()
def to_laid_out_tensor(self):
return self
class LaidOutVariable(object):
"""Maintains slice-variables and copy operations."""
def __init__(self, variable, mesh_impl):
"""Create a LaidOutVariable.
Args:
variable: a Variable (Operation)
mesh_impl: a MeshImpl
"""
self._variable = variable
self._mesh_impl = mesh_impl
shape = variable.outputs[0].shape
slice_shape = mesh_impl.slice_shape(shape)
base_name = variable.name
slices = []
slices_with_master_dtype = []
with tf.device(variable.master_device), utils.outside_all_rewrites():
zero_tensor = tf.zeros(slice_shape, dtype=variable.slice_dtype)
# pylint: disable=protected-access
init_device_stack = tf.get_default_graph()._device_function_stack
if not mesh_impl.graph_device_function_stacks:
for pnum in xrange(mesh_impl.size):
tpu_device = mesh_impl.device_assignment.tpu_device(replica=pnum)
with tf.device(tpu_device):
mesh_impl.graph_device_function_stacks.append(
tf.get_default_graph()._device_function_stack.copy())
for physical_pnum in xrange(mesh_impl.size):
slice_var_name = base_name + "_slice_%d" % physical_pnum
# Use tf.Variable instead of tf.get_variable since latter adds lots of
# useless operations to the TF graph. Use tf.get_variable only if
# in a AUTO_REUSE scope.
# Note: Repeatedly 'with tf.device():' slows down the graph
# construction. Therefore we directly use the cached device_stack here.
tf.get_default_graph()._device_function_stack = (
mesh_impl.graph_device_function_stacks[physical_pnum])
if tf.get_variable_scope().reuse == tf.AUTO_REUSE:
slice_var = tf.get_variable(
initializer=zero_tensor,
trainable=self._variable.trainable,
collections=["TPU_VAR"],
dtype=variable.slice_dtype,
name=slice_var_name)
else:
slice_var = tf.Variable(
initial_value=zero_tensor,
trainable=self._variable.trainable,
collections=["TPU_VAR"],
dtype=variable.slice_dtype,
name=slice_var_name,
expected_shape=slice_shape)
slices.append(slice_var)
# Restore the initial stack
tf.get_default_graph()._device_function_stack = init_device_stack
# pylint: enable=protected-access
self._laid_out_tensor = mesh_impl.LaidOutTensor(
[tpu_variables.ReplicatedVariable(base_name, slices)])
with tf.device(variable.master_device), utils.outside_all_rewrites():
if os.environ.get("MTF_SEQUENCE_MODE", "") == "1":
if mesh_impl.copy_master_to_slice_ops:
with tf.control_dependencies(
[mesh_impl.copy_master_to_slice_ops[-1]]):
self._copy_master_to_slices = self._gen_copy_master_to_slices_op(
variable.get_master(), shape, slices, slice_shape)
else:
self._copy_master_to_slices = self._gen_copy_master_to_slices_op(
variable.get_master(), shape, slices, slice_shape)
mesh_impl.copy_master_to_slice_ops.append(self._copy_master_to_slices)
else:
self._copy_master_to_slices = self._gen_copy_master_to_slices_op(
variable.get_master(), shape, slices, slice_shape)
slices_with_master_dtype = [
tf.cast(s, variable.master_dtype) for s in slices]
slices_with_master_dtype = [
slices_with_master_dtype[mesh_impl.l2p(logical_pnum)]
for logical_pnum in range(mesh_impl.size)]
self._copy_slices_to_master = variable.assign_to_master(
mesh_impl.combine_slices(slices_with_master_dtype, shape,
device=variable.master_device))
def _gen_copy_master_to_slices_op(self, master_variable, master_shape,
slices, slice_shape):
"""Generate ops which slices master and assign to slices.
Args:
master_variable: The master variable.
master_shape: The shape of master variable.
slices: The list of slice-variables in physical order.
slice_shape: The shape of the slice variable.
Returns:
A grouped tf.assign ops.
"""
mesh_impl = self._mesh_impl
master_layout = mesh_impl.tensor_layout(master_shape)
# For handling case: master is float32 and slices are bfloat16.
if master_variable.dtype != slices[0].dtype:
master_variable = tf.cast(master_variable, slices[0].dtype)
assign_ops = []
if master_layout.is_fully_replicated:
assign_ops = [tf.assign(t, master_variable) for t in slices]
else:
slice_dict = {}
for logical_pnum in xrange(len(slices)):
slice_begin = mesh_impl.slice_begin(master_shape, logical_pnum)
slice_begin_tuple = tuple(slice_begin)
# Reuse the same slice if slice_begin doesn't change.
if slice_begin_tuple not in slice_dict:
slice_dict[slice_begin_tuple] = tf.slice(master_variable,
slice_begin, slice_shape)
physical_pnum = mesh_impl.l2p(logical_pnum)
assign_ops.append(
tf.assign(slices[physical_pnum], slice_dict[slice_begin_tuple]))
return tf.group(assign_ops)
def assign_to_slices(self, assign_fn, values, assign_to_tensor_list=None):
"""Assign to the slice variables.
Args:
assign_fn: a function from
(mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation
values: a list of tf.Tensor
assign_to_tensor_list: an optional list of tf.Variable
Returns:
a tf.operation
"""
if assign_to_tensor_list is None:
assign_to_tensor_list = self._laid_out_tensor.all_slices
# Handle both N -> 1 and N -> N cases.
num_slices = min(len(assign_to_tensor_list), len(values))
devices = [""] * num_slices
return tf.group(
mtf.parallel(devices, assign_fn,
[self._variable] * len(devices),
assign_to_tensor_list[:num_slices],
values[:num_slices]))
@property
def laid_out_tensor(self):
return self._laid_out_tensor
@property
def copy_master_to_slices(self):
return self._copy_master_to_slices
@property
def copy_slices_to_master(self):
return self._copy_slices_to_master
def laid_out_pnum(self):
"""Returns a LaidOutTensor containing the logical processor number.
Returns:
a LaidOutTensor where each slice is an integer scalar
"""
return self.LaidOutTensor([self.pnum_tensor])
def _create_group_assignment(self, mesh_axes):
"""Create group assignment for XLA cross replica ops (physical pnums)."""
partitioning = {}
for logical_pnum in xrange(self.size):
group = mtf.pnum_to_group(self.shape, mesh_axes, logical_pnum)
if group not in partitioning:
partitioning[group] = []
partitioning[group].append(self.l2p(logical_pnum))
group_assignment = []
for group, physical_pnums in partitioning.items():
group_assignment.append(physical_pnums)
return group_assignment
def allreduce(self, x, mesh_axes, reduction_fn_string):
"""Grouped allreduce, (summed across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers
reduction_fn_string: "SUM"
Returns:
a LaidOutTensor
Raises:
ValueError: if the reduction is not yet implemented.
"""
if not mesh_axes:
return x
x = x.to_laid_out_tensor()
if reduction_fn_string == "SUM":
group_assignment = self._create_group_assignment(mesh_axes)
group_size = len(group_assignment[0])
tf_in = x.one_slice
dtype = tf_in.dtype
if dtype == tf.float32:
cast_to_float32 = False
elif dtype == tf.bfloat16:
cast_to_float32 = (
group_size > self._allreduce_in_bfloat16_max_group_size)
else:
tf.logging.info("Casting %s to float32 for allreduce" % tf_in.dtype)
cast_to_float32 = True
if cast_to_float32:
tf_in = tf.cast(tf_in, tf.float32)
tf_out = tpu_ops.cross_replica_sum(tf_in, group_assignment)
if cast_to_float32:
tf_out = tf.cast(tf_out, dtype)
return self.LaidOutTensor([tf_out])
else:
for axis in mesh_axes:
x = self.allconcat(x, axis, 0, stack=True)
x = self.LaidOutTensor(
[mtf.reduction_fn(reduction_fn_string)(x.one_slice, 0)])
return x
def allconcat(self, x, mesh_axis, concat_axis, stack=False):
"""Grouped allconcat (like MPI allgather followed by concat).
TODO(noam): inefficient - replace with a XLA allconcat when available
Args:
x: a LaidOutTensor
mesh_axis: an integer - the mesh axis along which to group
concat_axis: an integer (the Tensor axis along which to concatenate)
stack: a boolean - whether to stack instead of concat
Returns:
a LaidOutTensor
"""
x = x.to_laid_out_tensor()
coord = self.laid_out_pcoord(mesh_axis)
t = x.one_slice
old_shape = t.shape.as_list()
num_parts = self.shape[mesh_axis].size
t = tf.expand_dims(t, concat_axis)
t *= tf.reshape(
tf.one_hot(coord.one_slice, num_parts, dtype=t.dtype),
[num_parts if i == concat_axis else 1
for i in xrange(len(old_shape) + 1)])
if not stack:
new_shape = old_shape[:]
new_shape[concat_axis] *= num_parts
t = tf.reshape(t, new_shape)
return self.allreduce(self.LaidOutTensor([t]), [mesh_axis], "SUM")
def alltoall(self, x, mesh_axis, split_axis, concat_axis):
"""Grouped alltoall (like MPI alltoall with splitting and concatenation).
Args:
x: a LaidOutTensor
mesh_axis: an integer the mesh axis along which to group
split_axis: an integer (the Tensor axis along which to split)
concat_axis: an integer (the Tensor axis along which to concatenate)
Returns:
a LaidOutTensor
"""
x = x.to_laid_out_tensor()
t = x.one_slice
group_assignment = self._create_group_assignment([mesh_axis])
dtype = t.dtype
if dtype == tf.float32:
# There seems to be a bug with float32 alltoall.
# Do it in bfloat16 until the bug is fixed.
# TODO(noam): file a bug
t = tf.to_bfloat16(t)
t = tpu_ops.all_to_all(
t,
concat_dimension=concat_axis,
split_dimension=split_axis,
split_count=len(group_assignment[0]),
group_assignment=group_assignment)
t = tf.cast(t, dtype)
x = self.LaidOutTensor([t])
return x
def receive(self, x, mesh_axis, source_pcoord):
"""Collective receive in groups.
Each group contains the processors that differ only in mesh_axis.
```python
group_size = self.shape[mesh_axis].size
```
Args:
x: a LaidOutTensor
mesh_axis: an integer
source_pcoord: a list of optional integers. Each element is either None
or an integer in [0, group_size). If source_pcoord[k] is None, then the
output for the k-th processor in each group is a zero tensor. If
source_pcoord[k] is not None, then the output for the k-th processor in
each group is equal to the input for the source_pcoord[k]-th processor
in that group.
Returns:
a LaidOutTensor
"""
x = x.to_laid_out_tensor()
t = x.one_slice
source_target_pairs = []
for pnum in xrange(self.size):
coord = mtf.pnum_to_processor_coordinates(self.shape, pnum)
k = coord[mesh_axis]
if source_pcoord[k] is not None:
coord[mesh_axis] = source_pcoord[k]
source_pnum = mtf.processor_coordinates_to_pnum(self.shape, coord)
source_target_pairs.append(
[self.l2p(source_pnum),
self.l2p(pnum)])
if not source_target_pairs:
ret = tf.zeros_like(t, t.dtype)
elif t.dtype in [tf.float32, tf.bfloat16, tf.int32]:
ret = tpu_ops.collective_permute(t, source_target_pairs)
else:
# If t is not one of the allowed types, cast and cast back.
ret = tf.cast(tpu_ops.collective_permute(
tf.cast(t, tf.float32), source_target_pairs), t.dtype)
return self.LaidOutTensor([ret])
def slice(self, tf_tensor, tensor_shape):
""""Slice out the corresponding part of tensor given the pnum variable."""
tensor_layout = self.tensor_layout(tensor_shape)
if tensor_layout.is_fully_replicated:
return self.LaidOutTensor([tf_tensor])
else:
slice_shape = self.slice_shape(tensor_shape)
slice_begins = [
self.slice_begin(tensor_shape, pnum) for pnum in xrange(self.size)
]
slice_begins_tensor = tf.stack(slice_begins)
# slice on source device
selected_slice_begin = tf.gather(slice_begins_tensor, self.pnum_tensor)
return self.LaidOutTensor(
[tf.slice(tf_tensor, selected_slice_begin, slice_shape)])
def slicewise(self, fn, *inputs):
"""Execute a function in parallel on all slices.
Args:
fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors.
*inputs: a list of inputs. Each input is either a LaidOutTensor or
is convertible to a tf.Tensor.
Returns:
a LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple.
"""
# convert all inputs to LaidOutTensor where possible
inputs = mtf.convert_args_to_laid_out_tensors(inputs)
ret = fn(*[
x.one_slice if isinstance(x, self.LaidOutTensor) else x
for x in inputs])
if isinstance(ret, tuple):
return tuple([self.LaidOutTensor([t]) for t in ret])
else:
return self.LaidOutTensor([ret])
@property
def device_assignment(self):
return self._device_assignment
@property
def devices(self):
return self._devices
def random(self, shape, tf_fn, kwargs):
"""Call a random tf operation (e.g. random_uniform).
Args:
shape: a Shape
tf_fn: a function such as tf.random.uniform
kwargs: kwargs to pass to tf_fn, except for seed
Returns:
a LaidOutTensor
"""
# TODO(noam): can we make things better with stateless_random?
slice_shape = self.slice_shape(shape)
x = tf_fn(slice_shape, **kwargs)
# TPU does not have seeds enabled. Sync up the
# random choices by zeroing out all but the first core per group of
# identical slices, then allreducing by group.
layout = self.tensor_layout(shape)
# we need to sync across these axes.
mesh_axes = [i for i in xrange(self.ndims)
if i not in layout.tensor_axis_to_mesh_axis]
multiplier = 1.0
for axis in mesh_axes:
multiplier *= tf.cast(
tf.equal(self.laid_out_pcoord(axis).one_slice, 0), x.dtype)
x *= multiplier
x = self.LaidOutTensor([x])
x = self.allreduce(x, mesh_axes, "SUM")
return x
def export_to_tf_tensor(self, x, laid_out_x):
"""Turn a Tensor into a tf.Tensor.
Args:
x: a Tensor
laid_out_x: a LaidOutTensor
Returns:
a tf.Tensor
"""
tensor_layout = self.tensor_layout(x.shape)
if not tensor_layout.is_fully_replicated:
raise NotImplementedError(
"SimdMeshImpl only supports export_to_tf_tensor of fully-replicated "
"Tensors. Try reshaping to new dimension names. "
" x.shape = %s tensor_layout=%s"
% (x.shape, tensor_layout))
return laid_out_x.one_slice
def import_tf_tensor(self, x, tf_x):
"""Import a tf.Tensor, producing a LaidOutTensor.
Args:
x: a Tensor
tf_x: a tf.Tensor
Returns:
a LaidOutTensor
"""
return self.slice(tf_x, x.shape)
@property
def supports_control_dependencies(self):
return False
def einsum(self, equation, *slices):
"""Override this for custom einsum implementation.
Args:
equation: a string
*slices: a list of tf.Tensor
Returns:
a tf.Tensor
"""
return tf.einsum(equation, *slices)
def _ring_2d(m, n):
"""Ring-order of a mxn mesh.
If m and n are both even, then we generate a ring like this:
0 -- 1 -- 2 -- 3
| | | |
15-- 6 -- 5 -- 4
| | | |
14-- 7 -- 8 -- 9
| | | |
13-- 12-- 11-- 10
Args:
m: an integer
n: an integer
Returns:
a list of mxn pairs
"""
if m == 1:
return [(0, i) for i in range(n)]
if n == 1:
return [(i, 0) for i in range(m)]
if m % 2 != 0:
tf.logging.warning("Odd dimension")
return [(i % m, i // m) for i in range(n * m)]
ret = [(0, 0)]
for i in range(m // 2):
for j in range(1, n):
ret.append((2 * i, j))
for j in range(n-1, 0, -1):
ret.append((2 * i + 1, j))
for i in range(m-1, 0, -1):
ret.append((i, 0))
return ret
def _logical_1d_to_physical_subspace_auto(sizes_and_strides, physical_shape):
"""Maps logical 1d mesh to subspace of physical nd mesh.
We are mapping a 1d logical mesh to a subspace (a strided slice containing the
origin) of a n-dimensional physical mesh.
output[i] contains the coordinate-tuple in the physical mesh for the i-th
logical processor.
sizes_and_strides is a list of (size, stride) pairs specifying the dimensions
of the strided slice. For example,
sizes_and_strides=[(2, 16), (4, 1)] would represent the slice containing
[(0, 0), (0, 1), (0, 2), (0, 3),
(16, 0), (16, 1), (16, 2), (16, 3)]
This function heuristically picks an order, with the goal of optimizing
allreduce performance.
Args:
sizes_and_strides: a list of n (size, stride) pairs
physical_shape: ignored
Returns:
a list of coordinate-lists
"""
del physical_shape
ndims = len(sizes_and_strides)
sizes = [p[0] for p in sizes_and_strides]
strides = [p[1] for p in sizes_and_strides]
n = mtf.list_product(sizes)
if ndims >= 2 and sizes[0] > 1 and sizes[1] > 1:
ring = _ring_2d(sizes[0], sizes[1])
ret = []
sizes_combined = [sizes[0] * sizes[1]] + sizes[2:]
for logical_pnum in range(n):
logical_coord = mtf.pnum_to_processor_coordinates(
sizes_combined, logical_pnum)
ret.append(list(ring[logical_coord[0]]) + logical_coord[1:])
else:
ret = [mtf.pnum_to_processor_coordinates(sizes, logical_pnum)
for logical_pnum in range(n)]
# multiply by strides
ret = [[x * stride for x, stride in zip(pcoord, strides)] for pcoord in ret]
return ret
def _logical_to_physical_v1(
sizes_and_strides, physical_shape,
fn_1d=_logical_1d_to_physical_subspace_auto):
"""Maps logical m-dimensional mesh to physical n-dimensional mesh.
Also see comments to _logical_1d_to_physical_subspace_auto.
We are mapping a m-dimensonal logical mesh to a n-dimensional physical mesh.
output[i] contains the coordinate-tuple in the physical mesh for the i-th
logical processor (if the logical processors are ordered lexicographically).
sizes_and_strides is a list of m lists of n (size, stride) pairs.
sizes_and_strides[i] specifies the subspace (strided slice containing the
origin) of the physical mesh covered by axis i of the logical mesh. See
comments to _logical_1d_to_physical_subspace_auto for more detail.
For example, say we have a physical mesh with shape [4, 4, 2] and a logical
mesh with shape [4, 8]. We want to divide the physical mesh into 4 tiles,
each with shape [2, 2, 2]. The first logical dimension corresponds to which
tile, and the second logical dimension corresponds to position within a tile.
This would correspond to:
physical_shape=[4, 4, 2]
sizes_and_strides=[[(2, 2), (2, 2), (1, 2)], [(2, 1), (2, 1), (2, 1)]]
physical_shape can be inferred from sizes_and_strides, but is passed in for
error checking.
Args:
sizes_and_strides: a list of m list of n (size, stride) pairs
physical_shape: a list of integers
fn_1d: a function like _logical_1d_to_physical_subspace_auto
Returns:
a list of coordinate-lists
"""
pndims = len(physical_shape)
logical_shape = [
mtf.list_product([p[0] for p in l]) for l in sizes_and_strides]
n = mtf.list_product(physical_shape)
if n != mtf.list_product(logical_shape):
raise ValueError(
"logical size and physical size must match "
"- got sizes_and_strides=%s physical_shape=%s"
% (sizes_and_strides, physical_shape))
dimension_layouts = [fn_1d(l, physical_shape) for l in sizes_and_strides]
tf.logging.info("physical_shape: %s" % physical_shape)
tf.logging.info("sizes_and_strides: %s" % sizes_and_strides)
for i, l in enumerate(dimension_layouts):
tf.logging.info("dimension_layout %s: %s" % (i, l))
ret = []
for logical_pnum in range(n):
logical_coordinates = mtf.pnum_to_processor_coordinates(
logical_shape, logical_pnum)
physical_coordinates = [0] * pndims
for logical_axis, logical_coord in enumerate(logical_coordinates):
for physical_axis in range(pndims):
physical_coordinates[physical_axis] += (
dimension_layouts[logical_axis][logical_coord][physical_axis])
ret.append(physical_coordinates)
# verify that we have indeed covered all the processors
l2p = [mtf.processor_coordinates_to_pnum(physical_shape, c) for c in ret]
if sorted(l2p) != list(range(n)):
raise ValueError(
"logical_to_physical produced something that was not a permutation."
" sizes_and_strides=%s physical_shape=%s ret=%s"
% (sizes_and_strides, physical_shape, ret))
return ret
class HierarchicalTiling(object):
"""One kind of mapping of a logical mesh to a physical mesh."""
def __init__(self, spec, physical_shape):
"""Constructs a HierarchicalTiling.
spec is a list corresponding to the logical dimensions.
spec[i] corresponds to the i-th logical dimension and consists of a name
and a list of integers, the list being the shape of logical axis i when
it is physically projected to the physical mesh and then compacted.
Striding information is omitted. By convention, the earlier dimensions
get more strided. so the axis corresponding to the last dimension always
gets projected to the tile specified by its shape.
Args:
spec: a list of (string, list-of-integers) pairs
physical_shape: a list of integers
"""
self._names = [p[0] for p in spec]
logical_ndims = len(spec)
physical_ndims = len(physical_shape)
projected_shapes = [p[1] for p in spec]
if logical_ndims > 0 and projected_shapes[0] is None:
# fill in missing value
projected_shapes[0] = list(physical_shape)
for s in projected_shapes[1:]:
for i, x in enumerate(s):
projected_shapes[0][i] //= x
# compute strides, and verify that the spec is valid.
products = [1] * physical_ndims
sizes_and_strides = []
for s in reversed(projected_shapes):
sizes_and_strides.append(
[(size, stride) for size, stride in zip(s, products)])
for i, x in enumerate(s):
products[i] *= x
if products != physical_shape:
raise ValueError("mesh spec multiplies to the wrong size"
"spec=%s physical_shape=%s products=%s" %
(spec, physical_shape, products))
sizes_and_strides.reverse()
self._physical_coordinates = _logical_to_physical_v1(
sizes_and_strides, physical_shape)
self._logical_to_physical = [
mtf.processor_coordinates_to_pnum(physical_shape, c)
for c in self._physical_coordinates]
self._mesh_shape = mtf.Shape(
[mtf.Dimension(name, mtf.list_product(s))
for name, s in zip(self._names, projected_shapes)])
@property
def logical_to_physical(self):
"""List of physical processor numbers."""
return list(self._logical_to_physical)
@property
def mesh_shape(self):
return self._mesh_shape
@classmethod
def spec_to_mesh_shape(cls, spec, num_processors):
"""Compute mesh shape even without knowing the physical shape.
This is useful in cases where the mesh shape must be computed before
you know the physical_shape.
Args:
spec: a list of (string, list-of-integers) pairs
num_processors: an integer
Returns:
a mtf.Shape
"""
logical_ndims = len(spec)
names = [p[0] for p in spec]
sizes = [p[1] for p in spec]
sizes = [None if s is None else mtf.list_product(s) for s in sizes]
if logical_ndims > 0 and sizes[0] is None:
sizes[0] = num_processors // mtf.list_product(sizes[1:])
if mtf.list_product(sizes) != num_processors:
raise ValueError("product of spec must be num_processors"
" spec=%s num_processors=%s"
% (spec, num_processors))
return mtf.Shape(
[mtf.Dimension(name, s) for name, s in zip(names, sizes)])
def physical_shape_3d_from_topology_proto_4d(mesh_shape):
"""Convert a 4d shape that we get from TPU estimator to a 3d shape.
Args:
mesh_shape: a list of length 4
Returns:
a list of length 3
"""
if len(mesh_shape) != 4:
raise ValueError("Expected a 4d shape [x, y, z, core]")
return [mesh_shape[1]*mesh_shape[2], mesh_shape[0], mesh_shape[3]]
def auto_logical_to_physical_tpu(logical_shape,
physical_shape,
return_coordinates=False):
"""Set up a mapping from logical to physical cores for TPU.
We will try to set up a mapping so that allreduce operations are relatively
fast, prioritizing the later dimensions in the mesh_shape.
Example:
auto_logical_to_physical_tpu(
logical_shape=[16, 8], physical_shape=[8, 8, 1, 2])
Heuristics in this function subject to change.
Args:
logical_shape: a list of integers
physical_shape: a list of integers - typically [X, Y, 1, cores]
return_coordinates: a boolean - return a list of integer lists (coordinates)
instead of a list of processor indices
Returns:
logical_to_physical: a permutation of range(product(physical_shape)))
"""
tf.logging.info("auto_logical_to_physical_tpu "
"logical_shape=%s physical_shape=%s" %
(logical_shape, physical_shape))
if mtf.list_product(logical_shape) != mtf.list_product(physical_shape):
raise ValueError(
"physical and logical shapes must have the same product "
"physical_shape=%s logical_shape=%s" % (physical_shape, logical_shape))
# drop logical dimensions of size 1
logical_shape = [i for i in logical_shape if i != 1]
num_cores = mtf.list_product(logical_shape)
# For physical shapes different from what we are used to [2^a, 2^b, 2],
# return a simple default value (a lexicographic ordering)
def _default_value():
default = list(range(num_cores))
if return_coordinates:
default = [mtf.pnum_to_processor_coordinates(i) for i in default]
return default
if len(physical_shape) == 4 and physical_shape[2] == 1:
physical_shape = physical_shape_3d_from_topology_proto_4d(physical_shape)
elif len(physical_shape) != 3:
tf.logging.warning("Unrecognized format for tpu physical shape")
return _default_value()
# physical_shape is a triple of rows, cols, cores
p0, p1, p2 = physical_shape
if p2 != FLAGS.logical_cores_per_chip:
return _default_value
for dimsize in [p0, p1]:
# if dimsize not a power of 2, give up
if dimsize & (dimsize - 1):
return _default_value()
# At this point, the physical shape has at least 1x1x2=2 cores, so there
# must be at least one logical dimension.
assert logical_shape
if len(logical_shape) == 1:
# ring of p0 x p1 chips
ring = _ring_2d(p0, p1)
logical_to_physical = []
for logical_pnum in range(num_cores):
core_on_chip = logical_pnum % FLAGS.logical_cores_per_chip
chip_num = logical_pnum // FLAGS.logical_cores_per_chip
i, j = ring[chip_num]
logical_to_physical.append((i, j, core_on_chip))
else:
# We have a p0 x p1 rectangle of chips, which we will tile with rectangular
# tiles. The first logical dimension correspond to the number of tiles,
# and the other logical dimensions will correspond to position within a
# tile.
num_tiles = logical_shape[0]
tile_chips = num_cores // num_tiles // p2
# If we can, we make each tile occupy exactly one row or column of chips.
# Otherwise, we make each tile approximately square.
if len(logical_shape) == 2 and tile_chips == p0:
t0, t1 = [tile_chips, 1]
elif len(logical_shape) == 2 and tile_chips == p1:
t0, t1 = [1, tile_chips]
else:
# try to make the tile approximately square
lg_tile_chips = int(math.log(tile_chips, 2))
t0 = 2 ** (lg_tile_chips // 2)
# make sure that the tile fits in the mesh - i.e.
# t0 <= p0
# t1 == tile_chips // t0 <= p1
t0 = min(t0, p0)
t0 = max(t0, tile_chips // p1)
t1 = tile_chips // t0
# recursive call to find mapping for one tile
tile_logical_to_physical = auto_logical_to_physical_tpu(
logical_shape[1:], [t0, t1, p2], return_coordinates=True)
tiles_ring = _ring_2d(p0 // t0, p1 // t1)
logical_to_physical = []
for logical_pnum in range(num_cores):
logical_tile_num = logical_pnum // (t0 * t1 * p2)
logical_pos_in_tile = logical_pnum % (t0 * t1 * p2)
logical_to_physical.append((
tiles_ring[logical_tile_num][0] * t0 +
tile_logical_to_physical[logical_pos_in_tile][0],
tiles_ring[logical_tile_num][1] * t1 +
tile_logical_to_physical[logical_pos_in_tile][1],
tile_logical_to_physical[logical_pos_in_tile][2]))
tf.logging.info("auto_logical_to_physical_tpu logical_to_physical = %s"
% logical_to_physical)
if return_coordinates:
return logical_to_physical
else:
return [mtf.processor_coordinates_to_pnum(physical_shape, coord)
for coord in logical_to_physical]
| []
| []
| [
"MTF_SEQUENCE_MODE"
]
| [] | ["MTF_SEQUENCE_MODE"] | python | 1 | 0 | |
jaraco/translate/__main__.py | import sys
import os
from . import google
def main():
lang = sys.argv[1]
text = ' '.join(sys.argv[2:])
google.translate.API_key = os.environ['GOOGLE_TRANSLATE_API_KEY']
print(google.translate(text, target_lang=lang))
__name__ == '__main__' and main()
| []
| []
| [
"GOOGLE_TRANSLATE_API_KEY"
]
| [] | ["GOOGLE_TRANSLATE_API_KEY"] | python | 1 | 0 | |
test/extended/util/framework.go | package util
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
kapi "k8s.io/kubernetes/pkg/api"
kapiv1 "k8s.io/kubernetes/pkg/api/v1"
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
kbatchclient "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1"
kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
kinternalcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
"k8s.io/kubernetes/pkg/quota"
"k8s.io/kubernetes/test/e2e/framework"
authapi "github.com/openshift/origin/pkg/authorization/apis/authorization"
buildapi "github.com/openshift/origin/pkg/build/apis/build"
"github.com/openshift/origin/pkg/client"
deployapi "github.com/openshift/origin/pkg/deploy/apis/apps"
deployutil "github.com/openshift/origin/pkg/deploy/util"
imageapi "github.com/openshift/origin/pkg/image/apis/image"
"github.com/openshift/origin/pkg/util/namer"
"github.com/openshift/origin/test/extended/testdata"
)
const pvPrefix = "pv-"
// WaitForOpenShiftNamespaceImageStreams waits for the standard set of imagestreams to be imported
func WaitForOpenShiftNamespaceImageStreams(oc *CLI) error {
langs := []string{"ruby", "nodejs", "perl", "php", "python", "wildfly", "mysql", "postgresql", "mongodb", "jenkins"}
scan := func() bool {
for _, lang := range langs {
fmt.Fprintf(g.GinkgoWriter, "Checking language %v \n", lang)
is, err := oc.Client().ImageStreams("openshift").Get(lang, metav1.GetOptions{})
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "ImageStream Error: %#v \n", err)
return false
}
for tag := range is.Spec.Tags {
fmt.Fprintf(g.GinkgoWriter, "Checking tag %v \n", tag)
if _, ok := is.Status.Tags[tag]; !ok {
fmt.Fprintf(g.GinkgoWriter, "Tag Error: %#v \n", ok)
return false
}
}
}
return true
}
success := false
for i := 0; i < 10; i++ {
fmt.Fprintf(g.GinkgoWriter, "Running scan #%v \n", i)
success = scan()
if success {
break
}
fmt.Fprintf(g.GinkgoWriter, "Sleeping for 3 seconds \n")
time.Sleep(3 * time.Second)
}
if success {
fmt.Fprintf(g.GinkgoWriter, "Success! \n")
return nil
}
DumpImageStreams(oc)
return fmt.Errorf("Failed to import expected imagestreams")
}
// CheckOpenShiftNamespaceImageStreams is a temporary workaround for the intermittent
// issue seen in extended tests where *something* is deleteing the pre-loaded, languange
// imagestreams from the OpenShift namespace
func CheckOpenShiftNamespaceImageStreams(oc *CLI) {
missing := false
langs := []string{"ruby", "nodejs", "perl", "php", "python", "wildfly", "mysql", "postgresql", "mongodb", "jenkins"}
for _, lang := range langs {
_, err := oc.Client().ImageStreams("openshift").Get(lang, metav1.GetOptions{})
if err != nil {
missing = true
break
}
}
if missing {
fmt.Fprint(g.GinkgoWriter, "\n\n openshift namespace image streams corrupted \n\n")
DumpImageStreams(oc)
out, err := oc.Run("get").Args("is", "-n", "openshift", "--config", KubeConfigPath()).Output()
err = fmt.Errorf("something has tampered with the image streams in the openshift namespace; look at audits in master log; \n%s\n", out)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
fmt.Fprint(g.GinkgoWriter, "\n\n openshift namespace image streams OK \n\n")
}
}
//DumpImageStreams will dump both the openshift namespace and local namespace imagestreams
// as part of debugging when the language imagestreams in the openshift namespace seem to disappear
func DumpImageStreams(oc *CLI) {
out, err := oc.Run("get").Args("is", "-n", "openshift", "-o", "yaml", "--config", KubeConfigPath()).Output()
if err == nil {
fmt.Fprintf(g.GinkgoWriter, "\n imagestreams in openshift namespace: \n%s\n", out)
} else {
fmt.Fprintf(g.GinkgoWriter, "\n error on getting imagestreams in openshift namespace: %+v\n%#v\n", err, out)
}
out, err = oc.Run("get").Args("is", "-o", "yaml").Output()
if err == nil {
fmt.Fprintf(g.GinkgoWriter, "\n imagestreams in dynamic test namespace: \n%s\n", out)
} else {
fmt.Fprintf(g.GinkgoWriter, "\n error on getting imagestreams in dynamic test namespace: %+v\n%#v\n", err, out)
}
ids, err := ListImages()
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "\n got error on docker images %+v\n", err)
} else {
for _, id := range ids {
fmt.Fprintf(g.GinkgoWriter, " found local image %s\n", id)
}
}
}
// DumpBuildLogs will dump the latest build logs for a BuildConfig for debug purposes
func DumpBuildLogs(bc string, oc *CLI) {
buildOutput, err := oc.Run("logs").Args("-f", "bc/"+bc, "--timestamps").Output()
if err == nil {
fmt.Fprintf(g.GinkgoWriter, "\n\n build logs : %s\n\n", buildOutput)
} else {
fmt.Fprintf(g.GinkgoWriter, "\n\n got error on build logs %+v\n\n", err)
}
// if we suspect that we are filling up the registry file system, call ExamineDiskUsage / ExaminePodDiskUsage
// also see if manipulations of the quota around /mnt/openshift-xfs-vol-dir exist in the extended test set up scripts
ExamineDiskUsage()
ExaminePodDiskUsage(oc)
}
func GetDeploymentConfigPods(oc *CLI, dcName string, version int64) (*kapiv1.PodList, error) {
return oc.KubeClient().CoreV1().Pods(oc.Namespace()).List(metav1.ListOptions{LabelSelector: ParseLabelsOrDie(fmt.Sprintf("%s=%s-%d", deployapi.DeployerPodForDeploymentLabel, dcName, version)).String()})
}
func GetApplicationPods(oc *CLI, dcName string) (*kapiv1.PodList, error) {
return oc.KubeClient().CoreV1().Pods(oc.Namespace()).List(metav1.ListOptions{LabelSelector: ParseLabelsOrDie(fmt.Sprintf("deploymentconfig=%s", dcName)).String()})
}
// DumpDeploymentLogs will dump the latest deployment logs for a DeploymentConfig for debug purposes
func DumpDeploymentLogs(dcName string, version int64, oc *CLI) {
fmt.Fprintf(g.GinkgoWriter, "Dumping deployment logs for deploymentconfig %q\n", dcName)
pods, err := GetDeploymentConfigPods(oc, dcName, version)
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "Unable to retrieve pods for deploymentconfig %q: %v\n", dcName, err)
return
}
DumpPodLogs(pods.Items, oc)
}
// DumpApplicationPodLogs will dump the latest application logs for a DeploymentConfig for debug purposes
func DumpApplicationPodLogs(dcName string, oc *CLI) {
fmt.Fprintf(g.GinkgoWriter, "Dumping application logs for deploymentconfig %q\n", dcName)
pods, err := GetApplicationPods(oc, dcName)
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "Unable to retrieve pods for deploymentconfig %q: %v\n", dcName, err)
return
}
DumpPodLogs(pods.Items, oc)
}
func DumpPodLogs(pods []kapiv1.Pod, oc *CLI) {
for _, pod := range pods {
descOutput, err := oc.Run("describe").Args("pod/" + pod.Name).Output()
if err == nil {
fmt.Fprintf(g.GinkgoWriter, "Describing pod %q\n%s\n\n", pod.Name, descOutput)
} else {
fmt.Fprintf(g.GinkgoWriter, "Error retrieving description for pod %q: %v\n\n", pod.Name, err)
}
depOutput, err := oc.Run("logs").Args("pod/" + pod.Name).Output()
if err == nil {
fmt.Fprintf(g.GinkgoWriter, "Log for pod %q\n---->\n%s\n<----end of log for %[1]q\n", pod.Name, depOutput)
} else {
fmt.Fprintf(g.GinkgoWriter, "Error retrieving logs for pod %q: %v\n\n", pod.Name, err)
}
}
}
// GetMasterThreadDump will get a golang thread stack dump
func GetMasterThreadDump(oc *CLI) {
out, err := oc.AsAdmin().Run("get").Args("--raw", "/debug/pprof/goroutine?debug=2").Output()
if err == nil {
fmt.Fprintf(g.GinkgoWriter, "\n\n Master thread stack dump:\n\n%s\n\n", string(out))
return
}
fmt.Fprintf(g.GinkgoWriter, "\n\n got error on oc get --raw /debug/pprof/goroutine?godebug=2: %v\n\n", err)
}
// ExamineDiskUsage will dump df output on the testing system; leveraging this as part of diagnosing
// the registry's disk filling up during external tests on jenkins
func ExamineDiskUsage() {
out, err := exec.Command("/bin/df", "-m").Output()
if err == nil {
fmt.Fprintf(g.GinkgoWriter, "\n\n df -m output: %s\n\n", string(out))
} else {
fmt.Fprintf(g.GinkgoWriter, "\n\n got error on df %v\n\n", err)
}
out, err = exec.Command("/bin/docker", "info").Output()
if err == nil {
fmt.Fprintf(g.GinkgoWriter, "\n\n docker info output: \n%s\n\n", string(out))
} else {
fmt.Fprintf(g.GinkgoWriter, "\n\n got error on docker inspect %v\n\n", err)
}
}
// ExaminePodDiskUsage will dump df/du output on registry pod; leveraging this as part of diagnosing
// the registry's disk filling up during external tests on jenkins
func ExaminePodDiskUsage(oc *CLI) {
out, err := oc.Run("get").Args("pods", "-o", "json", "-n", "default", "--config", KubeConfigPath()).Output()
var podName string
if err == nil {
b := []byte(out)
var list kapiv1.PodList
err = json.Unmarshal(b, &list)
if err == nil {
for _, pod := range list.Items {
fmt.Fprintf(g.GinkgoWriter, "\n\n looking at pod %s \n\n", pod.ObjectMeta.Name)
if strings.Contains(pod.ObjectMeta.Name, "docker-registry-") && !strings.Contains(pod.ObjectMeta.Name, "deploy") {
podName = pod.ObjectMeta.Name
break
}
}
} else {
fmt.Fprintf(g.GinkgoWriter, "\n\n got json unmarshal err: %v\n\n", err)
}
} else {
fmt.Fprintf(g.GinkgoWriter, "\n\n got error on get pods: %v\n\n", err)
}
if len(podName) == 0 {
fmt.Fprintf(g.GinkgoWriter, "Unable to determine registry pod name, so we can't examine its disk usage.")
return
}
out, err = oc.Run("exec").Args("-n", "default", podName, "df", "--config", KubeConfigPath()).Output()
if err == nil {
fmt.Fprintf(g.GinkgoWriter, "\n\n df from registry pod: \n%s\n\n", out)
} else {
fmt.Fprintf(g.GinkgoWriter, "\n\n got error on reg pod df: %v\n", err)
}
out, err = oc.Run("exec").Args("-n", "default", podName, "du", "/registry", "--config", KubeConfigPath()).Output()
if err == nil {
fmt.Fprintf(g.GinkgoWriter, "\n\n du from registry pod: \n%s\n\n", out)
} else {
fmt.Fprintf(g.GinkgoWriter, "\n\n got error on reg pod du: %v\n", err)
}
}
// VarSubOnFile reads in srcFile, finds instances of ${key} from the map
// and replaces them with their associated values.
func VarSubOnFile(srcFile string, destFile string, vars map[string]string) error {
srcData, err := ioutil.ReadFile(srcFile)
if err == nil {
srcString := string(srcData)
for k, v := range vars {
k = "${" + k + "}"
srcString = strings.Replace(srcString, k, v, -1) // -1 means unlimited replacements
}
err = ioutil.WriteFile(destFile, []byte(srcString), 0644)
}
return err
}
// StartBuild executes OC start-build with the specified arguments. StdOut and StdErr from the process
// are returned as separate strings.
func StartBuild(oc *CLI, args ...string) (stdout, stderr string, err error) {
stdout, stderr, err = oc.Run("start-build").Args(args...).Outputs()
fmt.Fprintf(g.GinkgoWriter, "\n\nstart-build output with args %v:\nError>%v\nStdOut>\n%s\nStdErr>\n%s\n\n", args, err, stdout, stderr)
return stdout, stderr, err
}
var buildPathPattern = regexp.MustCompile(`^build/([\w\-\._]+)$`)
type LogDumperFunc func(oc *CLI, br *BuildResult) (string, error)
func NewBuildResult(oc *CLI, build *buildapi.Build) *BuildResult {
return &BuildResult{
oc: oc,
BuildName: build.Name,
BuildPath: "builds/" + build.Name,
}
}
type BuildResult struct {
// BuildPath is a resource qualified name (e.g. "build/test-1").
BuildPath string
// BuildName is the non-resource qualified name.
BuildName string
// StartBuildStdErr is the StdErr output generated by oc start-build.
StartBuildStdErr string
// StartBuildStdOut is the StdOut output generated by oc start-build.
StartBuildStdOut string
// StartBuildErr is the error, if any, returned by the direct invocation of the start-build command.
StartBuildErr error
// The buildconfig which generated this build.
BuildConfigName string
// Build is the resource created. May be nil if there was a timeout.
Build *buildapi.Build
// BuildAttempt represents that a Build resource was created.
// false indicates a severe error unrelated to Build success or failure.
BuildAttempt bool
// BuildSuccess is true if the build was finshed successfully.
BuildSuccess bool
// BuildFailure is true if the build was finished with an error.
BuildFailure bool
// BuildCancelled is true if the build was canceled.
BuildCancelled bool
// BuildTimeout is true if there was a timeout waiting for the build to finish.
BuildTimeout bool
// Alternate log dumper function. If set, this is called instead of 'oc logs'
LogDumper LogDumperFunc
// The openshift client which created this build.
oc *CLI
}
// DumpLogs sends logs associated with this BuildResult to the GinkgoWriter.
func (t *BuildResult) DumpLogs() {
fmt.Fprintf(g.GinkgoWriter, "\n\n*****************************************\n")
fmt.Fprintf(g.GinkgoWriter, "Dumping Build Result: %#v\n", *t)
if t == nil {
fmt.Fprintf(g.GinkgoWriter, "No build result available!\n\n")
return
}
desc, err := t.oc.Run("describe").Args(t.BuildPath).Output()
fmt.Fprintf(g.GinkgoWriter, "\n** Build Description:\n")
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "Error during description retrieval: %+v\n", err)
} else {
fmt.Fprintf(g.GinkgoWriter, "%s\n", desc)
}
fmt.Fprintf(g.GinkgoWriter, "\n** Build Logs:\n")
buildOuput, err := t.Logs()
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "Error during log retrieval: %+v\n", err)
} else {
fmt.Fprintf(g.GinkgoWriter, "%s\n", buildOuput)
}
fmt.Fprintf(g.GinkgoWriter, "\n\n")
t.dumpRegistryLogs()
// if we suspect that we are filling up the registry file system, call ExamineDiskUsage / ExaminePodDiskUsage
// also see if manipulations of the quota around /mnt/openshift-xfs-vol-dir exist in the extended test set up scripts
/*
ExamineDiskUsage()
ExaminePodDiskUsage(t.oc)
fmt.Fprintf(g.GinkgoWriter, "\n\n")
*/
}
func (t *BuildResult) dumpRegistryLogs() {
var buildStarted *time.Time
oc := t.oc
fmt.Fprintf(g.GinkgoWriter, "\n** Registry Logs:\n")
if t.Build != nil && !t.Build.CreationTimestamp.IsZero() {
buildStarted = &t.Build.CreationTimestamp.Time
} else {
proj, err := oc.Client().Projects().Get(oc.Namespace(), metav1.GetOptions{})
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "Failed to get project %s: %v\n", oc.Namespace(), err)
} else {
buildStarted = &proj.CreationTimestamp.Time
}
}
if buildStarted == nil {
fmt.Fprintf(g.GinkgoWriter, "Could not determine test' start time\n\n\n")
return
}
since := time.Now().Sub(*buildStarted)
// Changing the namespace on the derived client still changes it on the original client
// because the kubeFramework field is only copied by reference. Saving the original namespace
// here so we can restore it when done with registry logs
savedNamespace := t.oc.Namespace()
oadm := t.oc.AsAdmin().SetNamespace("default")
out, err := oadm.Run("logs").Args("dc/docker-registry", "--since="+since.String()).Output()
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "Error during log retrieval: %+v\n", err)
} else {
fmt.Fprintf(g.GinkgoWriter, "%s\n", out)
}
t.oc.SetNamespace(savedNamespace)
fmt.Fprintf(g.GinkgoWriter, "\n\n")
}
// Logs returns the logs associated with this build.
func (t *BuildResult) Logs() (string, error) {
if t == nil || t.BuildPath == "" {
return "", fmt.Errorf("Not enough information to retrieve logs for %#v", *t)
}
if t.LogDumper != nil {
return t.LogDumper(t.oc, t)
}
buildOuput, err := t.oc.Run("logs").Args("-f", t.BuildPath, "--timestamps").Output()
if err != nil {
return "", fmt.Errorf("Error retrieving logs for %#v: %v", *t, err)
}
return buildOuput, nil
}
// Dumps logs and triggers a Ginkgo assertion if the build did NOT succeed.
func (t *BuildResult) AssertSuccess() *BuildResult {
if !t.BuildSuccess {
t.DumpLogs()
}
o.ExpectWithOffset(1, t.BuildSuccess).To(o.BeTrue())
return t
}
// Dumps logs and triggers a Ginkgo assertion if the build did NOT have an error (this will not assert on timeouts)
func (t *BuildResult) AssertFailure() *BuildResult {
if !t.BuildFailure {
t.DumpLogs()
}
o.ExpectWithOffset(1, t.BuildFailure).To(o.BeTrue())
return t
}
func StartBuildResult(oc *CLI, args ...string) (result *BuildResult, err error) {
args = append(args, "-o=name") // ensure that the build name is the only thing send to stdout
stdout, stderr, err := StartBuild(oc, args...)
// Usually, with -o=name, we only expect the build path.
// However, the caller may have added --follow which can add
// content to stdout. So just grab the first line.
buildPath := strings.TrimSpace(strings.Split(stdout, "\n")[0])
result = &BuildResult{
Build: nil,
BuildPath: buildPath,
StartBuildStdOut: stdout,
StartBuildStdErr: stderr,
StartBuildErr: nil,
BuildAttempt: false,
BuildSuccess: false,
BuildFailure: false,
BuildCancelled: false,
BuildTimeout: false,
oc: oc,
}
// An error here does not necessarily mean we could not run start-build. For example
// when --wait is specified, start-build returns an error if the build fails. Therefore,
// we continue to collect build information even if we see an error.
result.StartBuildErr = err
matches := buildPathPattern.FindStringSubmatch(buildPath)
if len(matches) != 2 {
return result, fmt.Errorf("Build path output did not match expected format 'build/name' : %q", buildPath)
}
result.BuildName = matches[1]
return result, nil
}
// StartBuildAndWait executes OC start-build with the specified arguments on an existing buildconfig.
// Note that start-build will be run with "-o=name" as a parameter when using this method.
// If no error is returned from this method, it means that the build attempted successfully, NOT that
// the build completed. For completion information, check the BuildResult object.
func StartBuildAndWait(oc *CLI, args ...string) (result *BuildResult, err error) {
result, err = StartBuildResult(oc, args...)
if err != nil {
return result, err
}
return result, WaitForBuildResult(oc.Client().Builds(oc.Namespace()), result)
}
// WaitForBuildResult updates result wit the state of the build
func WaitForBuildResult(c client.BuildInterface, result *BuildResult) error {
fmt.Fprintf(g.GinkgoWriter, "Waiting for %s to complete\n", result.BuildName)
err := WaitForABuild(c, result.BuildName,
func(b *buildapi.Build) bool {
result.Build = b
result.BuildSuccess = CheckBuildSuccessFn(b)
return result.BuildSuccess
},
func(b *buildapi.Build) bool {
result.Build = b
result.BuildFailure = CheckBuildFailedFn(b)
return result.BuildFailure
},
func(b *buildapi.Build) bool {
result.Build = b
result.BuildCancelled = CheckBuildCancelledFn(b)
return result.BuildCancelled
},
)
if result.Build == nil {
// We only abort here if the build progress was unobservable. Only known cause would be severe, non-build related error in WaitForABuild.
return fmt.Errorf("Severe error waiting for build: %v", err)
}
result.BuildAttempt = true
result.BuildTimeout = !(result.BuildFailure || result.BuildSuccess || result.BuildCancelled)
fmt.Fprintf(g.GinkgoWriter, "Done waiting for %s: %#v\n with error: %v\n", result.BuildName, *result, err)
return nil
}
// WaitForABuild waits for a Build object to match either isOK or isFailed conditions.
func WaitForABuild(c client.BuildInterface, name string, isOK, isFailed, isCanceled func(*buildapi.Build) bool) error {
if isOK == nil {
isOK = CheckBuildSuccessFn
}
if isFailed == nil {
isFailed = CheckBuildFailedFn
}
if isCanceled == nil {
isCanceled = CheckBuildCancelledFn
}
// wait 2 minutes for build to exist
err := wait.Poll(1*time.Second, 2*time.Minute, func() (bool, error) {
if _, err := c.Get(name, metav1.GetOptions{}); err != nil {
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
return fmt.Errorf("Timed out waiting for build %q to be created", name)
}
if err != nil {
return err
}
// wait longer for the build to run to completion
err = wait.Poll(5*time.Second, 60*time.Minute, func() (bool, error) {
list, err := c.List(metav1.ListOptions{FieldSelector: fields.Set{"name": name}.AsSelector().String()})
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "error listing builds: %v", err)
return false, err
}
for i := range list.Items {
if name == list.Items[i].Name && (isOK(&list.Items[i]) || isCanceled(&list.Items[i])) {
return true, nil
}
if name != list.Items[i].Name {
return false, fmt.Errorf("While listing builds named %s, found unexpected build %#v", name, list.Items[i])
}
if isFailed(&list.Items[i]) {
return false, fmt.Errorf("The build %q status is %q", name, list.Items[i].Status.Phase)
}
}
return false, nil
})
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "WaitForABuild returning with error: %v", err)
}
if err == wait.ErrWaitTimeout {
return fmt.Errorf("Timed out waiting for build %q to complete", name)
}
return err
}
// CheckBuildSuccessFn returns true if the build succeeded
var CheckBuildSuccessFn = func(b *buildapi.Build) bool {
return b.Status.Phase == buildapi.BuildPhaseComplete
}
// CheckBuildFailedFn return true if the build failed
var CheckBuildFailedFn = func(b *buildapi.Build) bool {
return b.Status.Phase == buildapi.BuildPhaseFailed || b.Status.Phase == buildapi.BuildPhaseError
}
// CheckBuildCancelledFn return true if the build was canceled
var CheckBuildCancelledFn = func(b *buildapi.Build) bool {
return b.Status.Phase == buildapi.BuildPhaseCancelled
}
// WaitForBuilderAccount waits until the builder service account gets fully
// provisioned
func WaitForBuilderAccount(c kcoreclient.ServiceAccountInterface) error {
waitFn := func() (bool, error) {
sc, err := c.Get("builder", metav1.GetOptions{})
if err != nil {
// If we can't access the service accounts, let's wait till the controller
// create it.
if errors.IsForbidden(err) {
return false, nil
}
return false, err
}
for _, s := range sc.Secrets {
if strings.Contains(s.Name, "dockercfg") {
return true, nil
}
}
return false, nil
}
return wait.Poll(time.Duration(100*time.Millisecond), 1*time.Minute, waitFn)
}
// WaitForAnImageStream waits for an ImageStream to fulfill the isOK function
func WaitForAnImageStream(client client.ImageStreamInterface,
name string,
isOK, isFailed func(*imageapi.ImageStream) bool) error {
for {
list, err := client.List(metav1.ListOptions{FieldSelector: fields.Set{"name": name}.AsSelector().String()})
if err != nil {
return err
}
for i := range list.Items {
if isOK(&list.Items[i]) {
return nil
}
if isFailed(&list.Items[i]) {
return fmt.Errorf("The image stream %q status is %q",
name, list.Items[i].Annotations[imageapi.DockerImageRepositoryCheckAnnotation])
}
}
rv := list.ResourceVersion
w, err := client.Watch(metav1.ListOptions{FieldSelector: fields.Set{"name": name}.AsSelector().String(), ResourceVersion: rv})
if err != nil {
return err
}
defer w.Stop()
for {
val, ok := <-w.ResultChan()
if !ok {
// reget and re-watch
break
}
if e, ok := val.Object.(*imageapi.ImageStream); ok {
if isOK(e) {
return nil
}
if isFailed(e) {
return fmt.Errorf("The image stream %q status is %q",
name, e.Annotations[imageapi.DockerImageRepositoryCheckAnnotation])
}
}
}
}
}
// WaitForAnImageStreamTag waits until an image stream with given name has non-empty history for given tag.
// Defaults to waiting for 300 seconds
func WaitForAnImageStreamTag(oc *CLI, namespace, name, tag string) error {
return TimedWaitForAnImageStreamTag(oc, namespace, name, tag, time.Second*300)
}
// TimedWaitForAnImageStreamTag waits until an image stream with given name has non-empty history for given tag.
// Gives up waiting after the specified waitTimeout
func TimedWaitForAnImageStreamTag(oc *CLI, namespace, name, tag string, waitTimeout time.Duration) error {
g.By(fmt.Sprintf("waiting for an is importer to import a tag %s into a stream %s", tag, name))
start := time.Now()
c := make(chan error)
go func() {
err := WaitForAnImageStream(
oc.Client().ImageStreams(namespace),
name,
func(is *imageapi.ImageStream) bool {
if history, exists := is.Status.Tags[tag]; !exists || len(history.Items) == 0 {
return false
}
return true
},
func(is *imageapi.ImageStream) bool {
return time.Now().After(start.Add(waitTimeout))
})
c <- err
}()
select {
case e := <-c:
return e
case <-time.After(waitTimeout):
return fmt.Errorf("timed out while waiting of an image stream tag %s/%s:%s", namespace, name, tag)
}
}
// CheckImageStreamLatestTagPopulatedFn returns true if the imagestream has a ':latest' tag filed
var CheckImageStreamLatestTagPopulatedFn = func(i *imageapi.ImageStream) bool {
_, ok := i.Status.Tags["latest"]
return ok
}
// CheckImageStreamTagNotFoundFn return true if the imagestream update was not successful
var CheckImageStreamTagNotFoundFn = func(i *imageapi.ImageStream) bool {
return strings.Contains(i.Annotations[imageapi.DockerImageRepositoryCheckAnnotation], "not") ||
strings.Contains(i.Annotations[imageapi.DockerImageRepositoryCheckAnnotation], "error")
}
// WaitForDeploymentConfig waits for a DeploymentConfig to complete transition
// to a given version and report minimum availability.
func WaitForDeploymentConfig(kc kclientset.Interface, oc client.Interface, namespace, name string, version int64, cli *CLI) error {
fmt.Fprintf(g.GinkgoWriter, "waiting for deploymentconfig %s/%s to be available with version %d\n", namespace, name, version)
var dc *deployapi.DeploymentConfig
start := time.Now()
err := wait.Poll(time.Second, 15*time.Minute, func() (done bool, err error) {
dc, err = oc.DeploymentConfigs(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
// TODO re-enable this check once @mfojtik introduces a test that ensures we'll only ever get
// exactly one deployment triggered.
/*
if dc.Status.LatestVersion > version {
return false, fmt.Errorf("latestVersion %d passed %d", dc.Status.LatestVersion, version)
}
*/
if dc.Status.LatestVersion < version {
return false, nil
}
var progressing, available *deployapi.DeploymentCondition
for i, condition := range dc.Status.Conditions {
switch condition.Type {
case deployapi.DeploymentProgressing:
progressing = &dc.Status.Conditions[i]
case deployapi.DeploymentAvailable:
available = &dc.Status.Conditions[i]
}
}
if progressing != nil && progressing.Status == kapi.ConditionFalse {
return false, fmt.Errorf("not progressing")
}
if progressing != nil &&
progressing.Status == kapi.ConditionTrue &&
progressing.Reason == deployapi.NewRcAvailableReason &&
available != nil &&
available.Status == kapi.ConditionTrue {
return true, nil
}
return false, nil
})
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "got error %q when waiting for deploymentconfig %s/%s to be available with version %d\n", err, namespace, name, version)
cli.Run("get").Args("dc", dc.Name, "-o", "yaml").Execute()
DumpDeploymentLogs(name, version, cli)
DumpApplicationPodLogs(name, cli)
return err
}
requirement, err := labels.NewRequirement(deployapi.DeploymentLabel, selection.Equals, []string{deployutil.LatestDeploymentNameForConfig(dc)})
if err != nil {
return err
}
podnames, err := GetPodNamesByFilter(kc.CoreV1().Pods(namespace), labels.NewSelector().Add(*requirement), func(kapiv1.Pod) bool { return true })
if err != nil {
return err
}
fmt.Fprintf(g.GinkgoWriter, "deploymentconfig %s/%s available after %s\npods: %s\n", namespace, name, time.Now().Sub(start), strings.Join(podnames, ", "))
return nil
}
func isUsageSynced(received, expected kapi.ResourceList, expectedIsUpperLimit bool) bool {
resourceNames := quota.ResourceNames(expected)
masked := quota.Mask(received, resourceNames)
if len(masked) != len(expected) {
return false
}
if expectedIsUpperLimit {
if le, _ := quota.LessThanOrEqual(masked, expected); !le {
return false
}
} else {
if le, _ := quota.LessThanOrEqual(expected, masked); !le {
return false
}
}
return true
}
// WaitForResourceQuotaSync watches given resource quota until its usage is updated to desired level or a
// timeout occurs. If successful, used quota values will be returned for expected resources. Otherwise an
// ErrWaitTimeout will be returned. If expectedIsUpperLimit is true, given expected usage must compare greater
// or equal to quota's usage, which is useful for expected usage increment. Otherwise expected usage must
// compare lower or equal to quota's usage, which is useful for expected usage decrement.
func WaitForResourceQuotaSync(
client kinternalcoreclient.ResourceQuotaInterface,
name string,
expectedUsage kapi.ResourceList,
expectedIsUpperLimit bool,
timeout time.Duration,
) (kapi.ResourceList, error) {
startTime := time.Now()
endTime := startTime.Add(timeout)
expectedResourceNames := quota.ResourceNames(expectedUsage)
list, err := client.List(metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String()})
if err != nil {
return nil, err
}
for i := range list.Items {
used := quota.Mask(list.Items[i].Status.Used, expectedResourceNames)
if isUsageSynced(used, expectedUsage, expectedIsUpperLimit) {
return used, nil
}
}
rv := list.ResourceVersion
w, err := client.Watch(metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String(), ResourceVersion: rv})
if err != nil {
return nil, err
}
defer w.Stop()
for time.Now().Before(endTime) {
select {
case val, ok := <-w.ResultChan():
if !ok {
// reget and re-watch
continue
}
if rq, ok := val.Object.(*kapi.ResourceQuota); ok {
used := quota.Mask(rq.Status.Used, expectedResourceNames)
if isUsageSynced(used, expectedUsage, expectedIsUpperLimit) {
return used, nil
}
}
case <-time.After(endTime.Sub(time.Now())):
return nil, wait.ErrWaitTimeout
}
}
return nil, wait.ErrWaitTimeout
}
// GetPodNamesByFilter looks up pods that satisfy the predicate and returns their names.
func GetPodNamesByFilter(c kcoreclient.PodInterface, label labels.Selector, predicate func(kapiv1.Pod) bool) (podNames []string, err error) {
podList, err := c.List(metav1.ListOptions{LabelSelector: label.String()})
if err != nil {
return nil, err
}
for _, pod := range podList.Items {
if predicate(pod) {
podNames = append(podNames, pod.Name)
}
}
return podNames, nil
}
func WaitForAJob(c kbatchclient.JobInterface, name string, timeout time.Duration) error {
return wait.Poll(1*time.Second, timeout, func() (bool, error) {
j, e := c.Get(name, metav1.GetOptions{})
if e != nil {
return true, e
}
// TODO soltysh: replace this with a function once such exist, currently
// it's private in the controller
for _, c := range j.Status.Conditions {
if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == kapiv1.ConditionTrue {
return true, nil
}
}
return false, nil
})
}
// WaitForPods waits until given number of pods that match the label selector and
// satisfy the predicate are found
func WaitForPods(c kcoreclient.PodInterface, label labels.Selector, predicate func(kapiv1.Pod) bool, count int, timeout time.Duration) ([]string, error) {
var podNames []string
err := wait.Poll(1*time.Second, timeout, func() (bool, error) {
p, e := GetPodNamesByFilter(c, label, predicate)
if e != nil {
return true, e
}
if len(p) != count {
return false, nil
}
podNames = p
return true, nil
})
return podNames, err
}
// CheckPodIsRunningFn returns true if the pod is running
var CheckPodIsRunningFn = func(pod kapiv1.Pod) bool {
return pod.Status.Phase == kapiv1.PodRunning
}
// CheckPodIsSucceededFn returns true if the pod status is "Succdeded"
var CheckPodIsSucceededFn = func(pod kapiv1.Pod) bool {
return pod.Status.Phase == kapiv1.PodSucceeded
}
// CheckPodIsReadyFn returns true if the pod's ready probe determined that the pod is ready.
var CheckPodIsReadyFn = func(pod kapiv1.Pod) bool {
if pod.Status.Phase != kapiv1.PodRunning {
return false
}
for _, cond := range pod.Status.Conditions {
if cond.Type != kapiv1.PodReady {
continue
}
return cond.Status == kapiv1.ConditionTrue
}
return false
}
// WaitUntilPodIsGone waits until the named Pod will disappear
func WaitUntilPodIsGone(c kcoreclient.PodInterface, podName string, timeout time.Duration) error {
return wait.Poll(1*time.Second, timeout, func() (bool, error) {
_, err := c.Get(podName, metav1.GetOptions{})
if err != nil {
if strings.Contains(err.Error(), "not found") {
return true, nil
}
return true, err
}
return false, nil
})
}
// GetDockerImageReference retrieves the full Docker pull spec from the given ImageStream
// and tag
func GetDockerImageReference(c client.ImageStreamInterface, name, tag string) (string, error) {
imageStream, err := c.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
isTag, ok := imageStream.Status.Tags[tag]
if !ok {
return "", fmt.Errorf("ImageStream %q does not have tag %q", name, tag)
}
if len(isTag.Items) == 0 {
return "", fmt.Errorf("ImageStreamTag %q is empty", tag)
}
return isTag.Items[0].DockerImageReference, nil
}
// GetPodForContainer creates a new Pod that runs specified container
func GetPodForContainer(container kapiv1.Container) *kapiv1.Pod {
name := namer.GetPodName("test-pod", string(uuid.NewUUID()))
return &kapiv1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"name": name},
},
Spec: kapiv1.PodSpec{
Containers: []kapiv1.Container{container},
RestartPolicy: kapiv1.RestartPolicyNever,
},
}
}
// CreatePersistentVolume creates a HostPath Persistent Volume.
func CreatePersistentVolume(name, capacity, hostPath string) *kapiv1.PersistentVolume {
return &kapiv1.PersistentVolume{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolume",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"name": name},
},
Spec: kapiv1.PersistentVolumeSpec{
PersistentVolumeSource: kapiv1.PersistentVolumeSource{
HostPath: &kapiv1.HostPathVolumeSource{
Path: hostPath,
},
},
Capacity: kapiv1.ResourceList{
kapiv1.ResourceStorage: resource.MustParse(capacity),
},
AccessModes: []kapiv1.PersistentVolumeAccessMode{
kapiv1.ReadWriteOnce,
kapiv1.ReadOnlyMany,
kapiv1.ReadWriteMany,
},
},
}
}
// SetupHostPathVolumes will create multiple PersistentVolumes with given capacity
func SetupHostPathVolumes(c kcoreclient.PersistentVolumeInterface, prefix, capacity string, count int) (volumes []*kapiv1.PersistentVolume, err error) {
rootDir, err := ioutil.TempDir(TestContext.OutputDir, "persistent-volumes")
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "Error creating pv dir %s: %v\n", TestContext.OutputDir, err)
return volumes, err
}
fmt.Fprintf(g.GinkgoWriter, "Created pv dir %s\n", rootDir)
for i := 0; i < count; i++ {
dir, err := ioutil.TempDir(rootDir, fmt.Sprintf("%0.4d", i))
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "Error creating pv subdir %s: %v\n", rootDir, err)
return volumes, err
}
fmt.Fprintf(g.GinkgoWriter, "Created pv subdir %s\n", dir)
if _, err = exec.LookPath("chcon"); err == nil {
fmt.Fprintf(g.GinkgoWriter, "Found chcon in path\n")
//err := exec.Command("chcon", "-t", "container_file_t", dir).Run()
out, err := exec.Command("chcon", "-t", "svirt_sandbox_file_t", dir).CombinedOutput()
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "Error running chcon on %s, %s, %v\n", dir, string(out), err)
return volumes, err
}
fmt.Fprintf(g.GinkgoWriter, "Ran chcon on %s\n", dir)
}
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "Error finding chcon in path: %v\n", err)
return volumes, err
}
if err = os.Chmod(dir, 0777); err != nil {
fmt.Fprintf(g.GinkgoWriter, "Error running chmod on %s, %v\n", dir, err)
return volumes, err
}
fmt.Fprintf(g.GinkgoWriter, "Ran chmod on %s\n", dir)
pv, err := c.Create(CreatePersistentVolume(fmt.Sprintf("%s%s-%0.4d", pvPrefix, prefix, i), capacity, dir))
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "Error defining PV %v\n", err)
return volumes, err
}
fmt.Fprintf(g.GinkgoWriter, "Created PVs\n")
volumes = append(volumes, pv)
}
return volumes, err
}
// CleanupHostPathVolumes removes all PersistentVolumes created by
// SetupHostPathVolumes, with a given prefix
func CleanupHostPathVolumes(c kcoreclient.PersistentVolumeInterface, prefix string) error {
pvs, err := c.List(metav1.ListOptions{})
if err != nil {
return err
}
prefix = fmt.Sprintf("%s%s-", pvPrefix, prefix)
for _, pv := range pvs.Items {
if !strings.HasPrefix(pv.Name, prefix) {
continue
}
pvInfo, err := c.Get(pv.Name, metav1.GetOptions{})
if err != nil {
fmt.Fprintf(g.GinkgoWriter, "WARNING: couldn't get meta info for PV %s: %v\n", pv.Name, err)
continue
}
if err = c.Delete(pv.Name, nil); err != nil {
fmt.Fprintf(g.GinkgoWriter, "WARNING: couldn't remove PV %s: %v\n", pv.Name, err)
continue
}
volumeDir := pvInfo.Spec.HostPath.Path
if err = os.RemoveAll(volumeDir); err != nil {
fmt.Fprintf(g.GinkgoWriter, "WARNING: couldn't remove directory %q: %v\n", volumeDir, err)
continue
}
parentDir := filepath.Dir(volumeDir)
if parentDir == "." || parentDir == "/" {
continue
}
if err = os.Remove(parentDir); err != nil {
fmt.Fprintf(g.GinkgoWriter, "WARNING: couldn't remove directory %q: %v\n", parentDir, err)
continue
}
}
return nil
}
// KubeConfigPath returns the value of KUBECONFIG environment variable
func KubeConfigPath() string {
// can't use gomega in this method since it is used outside of It()
return os.Getenv("KUBECONFIG")
}
//ArtifactDirPath returns the value of ARTIFACT_DIR environment variable
func ArtifactDirPath() string {
path := os.Getenv("ARTIFACT_DIR")
o.Expect(path).NotTo(o.BeNil())
o.Expect(path).NotTo(o.BeEmpty())
return path
}
//ArtifactPath returns the absolute path to the fix artifact file
//The path is relative to ARTIFACT_DIR
func ArtifactPath(elem ...string) string {
return filepath.Join(append([]string{ArtifactDirPath()}, elem...)...)
}
var (
fixtureDirLock sync.Once
fixtureDir string
)
// FixturePath returns an absolute path to a fixture file in test/extended/testdata/,
// test/integration/, or examples/.
func FixturePath(elem ...string) string {
switch {
case len(elem) == 0:
panic("must specify path")
case len(elem) > 3 && elem[0] == ".." && elem[1] == ".." && elem[2] == "examples":
elem = elem[2:]
case len(elem) > 3 && elem[0] == ".." && elem[1] == "integration":
elem = append([]string{"test"}, elem[1:]...)
case elem[0] == "testdata":
elem = append([]string{"test", "extended"}, elem...)
default:
panic(fmt.Sprintf("Fixtures must be in test/extended/testdata or examples not %s", path.Join(elem...)))
}
fixtureDirLock.Do(func() {
dir, err := ioutil.TempDir("", "fixture-testdata-dir")
if err != nil {
panic(err)
}
fixtureDir = dir
})
relativePath := path.Join(elem...)
fullPath := path.Join(fixtureDir, relativePath)
if err := testdata.RestoreAsset(fixtureDir, relativePath); err != nil {
if err := testdata.RestoreAssets(fixtureDir, relativePath); err != nil {
panic(err)
}
if err := filepath.Walk(fullPath, func(path string, info os.FileInfo, err error) error {
if err := os.Chmod(path, 0640); err != nil {
return err
}
if stat, err := os.Lstat(path); err == nil && stat.IsDir() {
return os.Chmod(path, 0755)
}
return nil
}); err != nil {
panic(err)
}
} else {
if err := os.Chmod(fullPath, 0640); err != nil {
panic(err)
}
}
p, err := filepath.Abs(fullPath)
if err != nil {
panic(err)
}
return p
}
// FetchURL grabs the output from the specified url and returns it.
// It will retry once per second for duration retryTimeout if an error occurs during the request.
func FetchURL(url string, retryTimeout time.Duration) (response string, err error) {
waitFn := func() (bool, error) {
r, err := http.Get(url)
if err != nil || r.StatusCode != 200 {
// lie to the poller that we didn't get an error even though we did
// because otherwise it's going to give up.
return false, nil
}
defer r.Body.Close()
bytes, err := ioutil.ReadAll(r.Body)
response = string(bytes)
return true, nil
}
pollErr := wait.Poll(time.Duration(1*time.Second), retryTimeout, waitFn)
if pollErr == wait.ErrWaitTimeout {
return "", fmt.Errorf("Timed out while fetching url %q", url)
}
if pollErr != nil {
return "", pollErr
}
return
}
// ParseLabelsOrDie turns the given string into a label selector or
// panics; for tests or other cases where you know the string is valid.
// TODO: Move this to the upstream labels package.
func ParseLabelsOrDie(str string) labels.Selector {
ret, err := labels.Parse(str)
if err != nil {
panic(fmt.Sprintf("cannot parse '%v': %v", str, err))
}
return ret
}
// GetEndpointAddress will return an "ip:port" string for the endpoint.
func GetEndpointAddress(oc *CLI, name string) (string, error) {
err := framework.WaitForEndpoint(oc.KubeFramework().ClientSet, oc.Namespace(), name)
if err != nil {
return "", err
}
endpoint, err := oc.KubeClient().CoreV1().Endpoints(oc.Namespace()).Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
return fmt.Sprintf("%s:%d", endpoint.Subsets[0].Addresses[0].IP, endpoint.Subsets[0].Ports[0].Port), nil
}
// CreateExecPodOrFail creates a simple busybox pod in a sleep loop used as a
// vessel for kubectl exec commands.
// Returns the name of the created pod.
// TODO: expose upstream
func CreateExecPodOrFail(client kcoreclient.CoreV1Interface, ns, name string) string {
framework.Logf("Creating new exec pod")
execPod := framework.NewHostExecPodSpec(ns, name)
created, err := client.Pods(ns).Create(execPod)
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) {
retrievedPod, err := client.Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{})
if err != nil {
return false, nil
}
return retrievedPod.Status.Phase == kapiv1.PodRunning, nil
})
o.Expect(err).NotTo(o.HaveOccurred())
return created.Name
}
// CheckForBuildEvent will poll a build for up to 1 minute looking for an event with
// the specified reason and message template.
func CheckForBuildEvent(client kcoreclient.CoreV1Interface, build *buildapi.Build, reason, message string) {
var expectedEvent *kapiv1.Event
err := wait.PollImmediate(framework.Poll, 1*time.Minute, func() (bool, error) {
events, err := client.Events(build.Namespace).Search(kapi.Scheme, build)
if err != nil {
return false, err
}
for _, event := range events.Items {
framework.Logf("Found event %#v", event)
if reason == event.Reason {
expectedEvent = &event
return true, nil
}
}
return false, nil
})
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Should be able to get events from the build")
o.ExpectWithOffset(1, expectedEvent).NotTo(o.BeNil(), "Did not find a %q event on build %s/%s", reason, build.Namespace, build.Name)
o.ExpectWithOffset(1, expectedEvent.Message).To(o.Equal(fmt.Sprintf(message, build.Namespace, build.Name)))
}
type podExecutor struct {
client *CLI
podName string
}
// NewPodExecutor returns an executor capable of running commands in a Pod.
func NewPodExecutor(oc *CLI, name, image string) (*podExecutor, error) {
out, err := oc.Run("run").Args(name, "--labels", "name="+name, "--image", image, "--restart", "Never", "--command", "--", "/bin/bash", "-c", "sleep infinity").Output()
if err != nil {
return nil, fmt.Errorf("error: %v\n(%s)", err, out)
}
_, err = WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), ParseLabelsOrDie("name="+name), CheckPodIsReadyFn, 1, 3*time.Minute)
if err != nil {
return nil, err
}
return &podExecutor{client: oc, podName: name}, nil
}
// Exec executes a single command or a bash script in the running pod. It returns the
// command output and error if the command finished with non-zero status code or the
// command took longer then 3 minutes to run.
func (r *podExecutor) Exec(script string) (string, error) {
var out string
waitErr := wait.PollImmediate(1*time.Second, 3*time.Minute, func() (bool, error) {
var err error
out, err = r.client.Run("exec").Args(r.podName, "--", "/bin/bash", "-c", script).Output()
return true, err
})
return out, waitErr
}
// WaitForUserBeAuthorized waits a minute until the cluster bootstrap roles are available
// and the provided user is authorized to perform the action on the resource.
func WaitForUserBeAuthorized(oc *CLI, user, verb, resource string) error {
sar := authapi.SubjectAccessReview{
User: user,
Action: authapi.Action{
Namespace: oc.Namespace(),
Verb: verb,
Resource: resource,
},
}
return wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
resp, err := oc.AdminClient().SubjectAccessReviews().Create(&sar)
if err == nil && resp != nil && resp.Allowed {
return true, nil
}
return false, err
})
}
| [
"\"KUBECONFIG\"",
"\"ARTIFACT_DIR\""
]
| []
| [
"ARTIFACT_DIR",
"KUBECONFIG"
]
| [] | ["ARTIFACT_DIR", "KUBECONFIG"] | go | 2 | 0 | |
Solutions/CrowdStrike Falcon Endpoint Protection/Data Connectors/CrowdstrikeReplicator/CrowdstrikeFalconAPISentinelConnector/__init__.py | import asyncio
import os
import sys
import asyncio
import json
from botocore.config import Config as BotoCoreConfig
from aiobotocore.session import get_session
from gzip_stream import AsyncGZIPDecompressedStream
import re
from .sentinel_connector_async import AzureSentinelConnectorAsync
import time
import aiohttp
import logging
import azure.functions as func
import itertools
from operator import itemgetter
from .state_manager import StateManager
WORKSPACE_ID = os.environ['WorkspaceID']
SHARED_KEY = os.environ['WorkspaceKey']
LOG_TYPE = "CrowdstrikeReplicatorLogs"
AWS_KEY = os.environ['AWS_KEY']
AWS_SECRET = os.environ['AWS_SECRET']
AWS_REGION_NAME = os.environ['AWS_REGION_NAME']
QUEUE_URL = os.environ['QUEUE_URL']
VISIBILITY_TIMEOUT = 1800
LINE_SEPARATOR = os.environ.get('lineSeparator', '[\n\r\x0b\v\x0c\f\x1c\x1d\x85\x1e\u2028\u2029]+')
connection_string = os.environ['AzureWebJobsStorage']
# Defines how many files can be processed simultaneously
MAX_CONCURRENT_PROCESSING_FILES = int(os.environ.get('SimultaneouslyProcessingFiles', 20))
# Defines max number of events that can be sent in one request to Azure Sentinel
MAX_BUCKET_SIZE = int(os.environ.get('EventsBucketSize', 2000))
LOG_ANALYTICS_URI = os.environ.get('logAnalyticsUri')
if not LOG_ANALYTICS_URI or str(LOG_ANALYTICS_URI).isspace():
LOG_ANALYTICS_URI = 'https://' + WORKSPACE_ID + '.ods.opinsights.azure.com'
pattern = r'https:\/\/([\w\-]+)\.ods\.opinsights\.azure.([a-zA-Z\.]+)$'
match = re.match(pattern, str(LOG_ANALYTICS_URI))
if not match:
raise Exception("Invalid Log Analytics Uri.")
drop_files_array = []
def _create_sqs_client():
sqs_session = get_session()
return sqs_session.create_client(
'sqs',
region_name=AWS_REGION_NAME,
aws_access_key_id=AWS_KEY,
aws_secret_access_key=AWS_SECRET
)
def _create_s3_client():
s3_session = get_session()
boto_config = BotoCoreConfig(region_name=AWS_REGION_NAME, retries = {'max_attempts': 10, 'mode': 'standard'})
return s3_session.create_client(
's3',
region_name=AWS_REGION_NAME,
aws_access_key_id=AWS_KEY,
aws_secret_access_key=AWS_SECRET,
config=boto_config
)
def customize_event(line):
element = json.loads(line)
required_fileds = [
"timestamp", "aip", "aid", "EventType", "LogonType", "HostProcessType", "UserPrincipal", "DomainName",
"RemoteAddressIP", "ConnectionDirection", "TargetFileName", "LocalAddressIP4", "IsOnRemovableDisk",
"UserPrincipal", "UserIsAdmin", "LogonTime", "LogonDomain", "RemoteAccount", "UserId", "Prevalence",
"CurrentProcess", "ConnectionDirection", "event_simpleName", "TargetProcessId", "ProcessStartTime",
"UserName", "DeviceProductId", "TargetSHA256HashData", "SHA256HashData", "MD5HashData", "TargetDirectoryName",
"TargetFileName", "FirewallRule", "TaskName", "TaskExecCommand", "TargetAddress", "TargetProcessId",
"SourceFileName", "RegObjectName", "RegValueName", "ServiceObjectName", "RegistryPath", "RawProcessId",
"event_platform", "CommandLine", "ParentProcessId", "ParentCommandLine", "ParentBaseFileName",
"GrandParentBaseFileName", "RemotePort", "VolumeDeviceType", "VolumeName", "ClientComputerName", "ProductId"
]
required_fields_data = {}
custom_fields_data = {}
for key, value in element.items():
if key in required_fileds:
required_fields_data[key] = value
else:
custom_fields_data[key] = value
event = required_fields_data
custom_fields_data_text = str(json.dumps(custom_fields_data))
if custom_fields_data_text != "{}":
event["custom_fields_message"] = custom_fields_data_text
return event
def sort_files_by_bucket(array_obj):
array_obj = sorted(array_obj, key=itemgetter('bucket'))
sorted_array = []
temp_array = []
for key, value in itertools.groupby(array_obj, key=itemgetter('bucket')):
for i in value:
temp_array.append({'path': i.get('path')})
sorted_array.append({'bucket': key, 'files': temp_array})
return sorted_array
async def main(mytimer: func.TimerRequest):
global drop_files_array
drop_files_array.clear()
script_start_time = int(time.time())
filepath = 'drop_files_array_file'
state = StateManager(connection_string=connection_string, share_name='funcstatemarkershare', file_path=filepath)
last_dropped_messages = state.get()
last_dropped_messages_obj = ''
if last_dropped_messages != None and last_dropped_messages != '':
last_dropped_messages_obj = json.loads(last_dropped_messages)
state.post('')
logging.info("Detected files which not processed or previously processed with errors. Files count: {}. These files will be added to the common array for re-processing".format(len(last_dropped_messages_obj)))
logging.info("Creating SQS connection")
async with _create_sqs_client() as client:
async with aiohttp.ClientSession() as session:
if len(last_dropped_messages_obj) > 0:
logging.info("Processing files which added to re-processing. Files: {}".format(last_dropped_messages_obj))
last_dropped_messages_obj_sorted = sort_files_by_bucket(last_dropped_messages_obj)
for reprocessing_file_msg in last_dropped_messages_obj_sorted:
await download_message_files(reprocessing_file_msg, session)
logging.info('Trying to check messages off the queue...')
try:
response = await client.receive_message(
QueueUrl=QUEUE_URL,
WaitTimeSeconds=2,
VisibilityTimeout=VISIBILITY_TIMEOUT
)
if 'Messages' in response:
for msg in response['Messages']:
body_obj = json.loads(msg["Body"])
logging.info("Got message with MessageId {}. Start processing {} files from Bucket: {}. Path prefix: {}. Timestamp: {}.".format(msg["MessageId"], body_obj["fileCount"], body_obj["bucket"], body_obj["pathPrefix"], body_obj["timestamp"]))
await download_message_files(body_obj, session)
logging.info("Finished processing {} files from MessageId {}. Bucket: {}. Path prefix: {}".format(body_obj["fileCount"], msg["MessageId"], body_obj["bucket"], body_obj["pathPrefix"]))
try:
await client.delete_message(
QueueUrl=QUEUE_URL,
ReceiptHandle=msg['ReceiptHandle']
)
except Exception as e:
logging.error("Error during deleting message with MessageId {} from queue. Bucket: {}. Path prefix: {}. Error: {}".format(msg["MessageId"], body_obj["bucket"], body_obj["pathPrefix"], e))
else:
logging.info('No messages in queue. Re-trying to check...')
except KeyboardInterrupt:
pass
if len(drop_files_array) > 0:
logging.info("list of files that were not processed: {}".format(drop_files_array))
state.post(str(json.dumps(drop_files_array)))
async def process_file(bucket, s3_path, client, semaphore, session):
async with semaphore:
total_events = 0
logging.info("Start processing file {}".format(s3_path))
sentinel = AzureSentinelConnectorAsync(
session,
LOG_ANALYTICS_URI,
WORKSPACE_ID,
SHARED_KEY,
LOG_TYPE,
queue_size=MAX_BUCKET_SIZE
)
try:
response = await client.get_object(Bucket=bucket, Key=s3_path)
s = ''
async for decompressed_chunk in AsyncGZIPDecompressedStream(response["Body"]):
s += decompressed_chunk.decode(errors='ignore')
lines = re.split(r'{0}'.format(LINE_SEPARATOR), s)
for n, line in enumerate(lines):
if n < len(lines) - 1:
if line:
try:
event = customize_event(line)
except ValueError as e:
logging.error('Error while loading json Event at s value {}. Error: {}'.format(line, str(e)))
raise e
await sentinel.send(event)
s = line
if s:
try:
event = customize_event(line)
except ValueError as e:
logging.error('Error while loading json Event at s value {}. Error: {}'.format(line, str(e)))
raise e
await sentinel.send(event)
await sentinel.flush()
total_events += sentinel.successfull_sent_events_number
logging.info("Finish processing file {}. Sent events: {}".format(s3_path, sentinel.successfull_sent_events_number))
except Exception as e:
logging.warn("Processing file {} was failed. Error: {}".format(s3_path,e))
drop_files_array.append({'bucket': bucket, 'path': s3_path})
async def download_message_files(msg, session):
semaphore = asyncio.Semaphore(MAX_CONCURRENT_PROCESSING_FILES)
async with _create_s3_client() as client:
cors = []
for s3_file in msg['files']:
cors.append(process_file(msg['bucket'], s3_file['path'], client, semaphore, session))
await asyncio.gather(*cors) | []
| []
| [
"QUEUE_URL",
"SimultaneouslyProcessingFiles",
"logAnalyticsUri",
"WorkspaceID",
"AWS_SECRET",
"EventsBucketSize",
"lineSeparator",
"AWS_KEY",
"WorkspaceKey",
"AzureWebJobsStorage",
"AWS_REGION_NAME"
]
| [] | ["QUEUE_URL", "SimultaneouslyProcessingFiles", "logAnalyticsUri", "WorkspaceID", "AWS_SECRET", "EventsBucketSize", "lineSeparator", "AWS_KEY", "WorkspaceKey", "AzureWebJobsStorage", "AWS_REGION_NAME"] | python | 11 | 0 | |
datalad/utils.py | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink()
| []
| []
| [
"DATALAD_ALLOW_FAIL",
"DATALAD_TESTS_TEMP_KEEP",
"MSYSTEM",
"PWD",
"DATALAD_ASSERT_NO_OPEN_FILES",
"TMPDIR"
]
| [] | ["DATALAD_ALLOW_FAIL", "DATALAD_TESTS_TEMP_KEEP", "MSYSTEM", "PWD", "DATALAD_ASSERT_NO_OPEN_FILES", "TMPDIR"] | python | 6 | 0 | |
tests/remotes/hdfs.py | import locale
import os
import platform
import uuid
from contextlib import contextmanager
import pytest
from dvc.path_info import URLInfo
from .base import Base
class HDFS(Base, URLInfo): # pylint: disable=abstract-method
@contextmanager
def _hdfs(self):
import pyarrow
conn = pyarrow.hdfs.connect(self.host, self.port)
try:
yield conn
finally:
conn.close()
def is_file(self):
with self._hdfs() as _hdfs:
return _hdfs.isfile(self.path)
def is_dir(self):
with self._hdfs() as _hdfs:
return _hdfs.isfile(self.path)
def exists(self):
with self._hdfs() as _hdfs:
return _hdfs.exists(self.path)
def mkdir(self, mode=0o777, parents=False, exist_ok=False):
assert mode == 0o777
assert parents
assert not exist_ok
with self._hdfs() as _hdfs:
# NOTE: hdfs.mkdir always creates parents
_hdfs.mkdir(self.path)
def write_bytes(self, contents):
with self._hdfs() as _hdfs:
# NOTE: hdfs.open only supports 'rb', 'wb' or 'ab'
with _hdfs.open(self.path, "wb") as fobj:
fobj.write(contents)
def write_text(self, contents, encoding=None, errors=None):
if not encoding:
encoding = locale.getpreferredencoding(False)
assert errors is None
self.write_bytes(contents.encode(encoding))
def read_bytes(self):
with self._hdfs() as _hdfs:
# NOTE: hdfs.open only supports 'rb', 'wb' or 'ab'
with _hdfs.open(self.path, "rb") as fobj:
return fobj.read()
def read_text(self, encoding=None, errors=None):
if not encoding:
encoding = locale.getpreferredencoding(False)
assert errors is None
return self.read_bytes().decode(encoding)
@pytest.fixture(scope="session")
def hadoop():
import wget
import tarfile
from appdirs import user_cache_dir
if platform.system() != "Linux":
pytest.skip("only supported on Linux")
hadoop_name = "hadoop-2.7.2.tar.gz"
java_name = "openjdk-7u75-b13-linux-x64-18_dec_2014.tar.gz"
base_url = "https://s3-us-east-2.amazonaws.com/dvc-public/dvc-test/"
hadoop_url = base_url + hadoop_name
java_url = base_url + java_name
(cache_dir,) = (user_cache_dir("dvc-test", "iterative"),)
dname = os.path.join(cache_dir, "hdfs")
java_tar = os.path.join(dname, java_name)
hadoop_tar = os.path.join(dname, hadoop_name)
java_home = os.path.join(dname, "java-se-7u75-ri")
hadoop_home = os.path.join(dname, "hadoop-2.7.2")
def _get(url, tar, target):
if os.path.isdir(target):
return
if not os.path.exists(tar):
wget.download(url, out=tar)
tar = tarfile.open(tar)
tar.extractall(dname)
assert os.path.isdir(target)
os.makedirs(dname, exist_ok=True)
_get(hadoop_url, hadoop_tar, hadoop_home)
_get(java_url, java_tar, java_home)
os.environ["JAVA_HOME"] = java_home
os.environ["HADOOP_HOME"] = hadoop_home
os.environ["PATH"] += f":{hadoop_home}/bin:{hadoop_home}/sbin"
@pytest.fixture(scope="session")
def hdfs_server(hadoop, docker_compose, docker_services):
import pyarrow
port = docker_services.port_for("hdfs", 8020)
def _check():
try:
# NOTE: just connecting or even opening something is not enough,
# we need to make sure that we are able to write something.
conn = pyarrow.hdfs.connect("127.0.0.1", port)
try:
with conn.open(str(uuid.uuid4()), "wb") as fobj:
fobj.write(b"test")
finally:
conn.close()
return True
except (pyarrow.ArrowException, OSError):
return False
docker_services.wait_until_responsive(timeout=30.0, pause=5, check=_check)
return port
@pytest.fixture
def hdfs(hdfs_server):
port = hdfs_server
url = f"hdfs://127.0.0.1:{port}/{uuid.uuid4()}"
yield HDFS(url)
| []
| []
| [
"HADOOP_HOME",
"JAVA_HOME",
"PATH"
]
| [] | ["HADOOP_HOME", "JAVA_HOME", "PATH"] | python | 3 | 0 | |
misc/test/examples_test.go | // Copyright 2016-2017, Pulumi Corporation. All rights reserved.
package test
import (
"encoding/base64"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
homedir "github.com/mitchellh/go-homedir"
"github.com/pulumi/pulumi/pkg/v2/testing/integration"
"github.com/pulumi/pulumi/sdk/v2/go/common/resource"
"github.com/stretchr/testify/assert"
)
func TestAccAwsGoAssumeRole(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-assume-role", "create-role"),
Config: map[string]string{
"create-role:unprivilegedUsername": "unpriv-go",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsGoEks(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-eks"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["url"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello Kubernetes bootcamp!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsGoFargate(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-fargate"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["url"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Welcome to nginx!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsGoS3Folder(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-s3-folder"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["websiteUrl"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello, world!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsGoS3FolderComponent(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-s3-folder-component"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["websiteUrl"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello, world!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsGoWebserver(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-webserver"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["publicIp"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello, World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsCsAssumeRole(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-cs-assume-role", "create-role"),
Config: map[string]string{
"create-role:unprivilegedUsername": "unpriv-cs",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsCsS3Folder(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-cs-s3-folder"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["Endpoint"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello, world!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsFsS3Folder(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-fs-s3-folder"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["endpoint"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello, world!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsJsContainers(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-js-containers"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["frontendURL"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello, Pulumi!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsJsS3Folder(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-js-s3-folder"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, "http://"+stack.Outputs["websiteUrl"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Hello, Pulumi!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsJsS3FolderComponent(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-js-s3-folder-component"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["websiteUrl"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Hello, Pulumi!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsJsSqsSlack(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-js-sqs-slack"),
Config: map[string]string{
"slackToken": "token",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsJsWebserver(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-js-webserver"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPHelloWorld(t, stack.Outputs["publicHostName"], nil)
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsJsWebserverComponent(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-js-webserver-component"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPHelloWorld(t, stack.Outputs["webUrl"], nil)
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsPyAppSync(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-py-appsync"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsGoAppSync(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-appsync"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 8 * time.Minute
endpoint := stack.Outputs["endpoint"].(string)
mutation := "mutation AddTenant { addTenant(id: \"123\", name: \"FirstCorp\") { id name } }"
finalURL := fmt.Sprintf("%s?query=%s", endpoint, url.QueryEscape(mutation))
key := stack.Outputs["key"].(string)
headersMap := map[string]string{
"Content-Type": "application/graphql",
"x-api-key": key,
}
assertHTTPResultShapeWithRetry(t, finalURL, headersMap, maxWait, func(body string) bool {
return !strings.Contains(body, "AccessDeniedException")
}, func(body string) bool {
return assert.Contains(t, body, "FirstCorp")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsPyAssumeRole(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-py-assume-role", "create-role"),
Config: map[string]string{
"create-role:unprivilegedUsername": "unpriv-py",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsPyResources(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-py-resources"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsGoResources(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-go-resources"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsPyS3Folder(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-py-s3-folder"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, "http://"+stack.Outputs["website_url"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Hello, Pulumi!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsPyStepFunctions(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-py-stepfunctions"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsPyWebserver(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-py-webserver"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, "http://"+stack.Outputs["public_dns"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Hello, World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsAirflow(t *testing.T) {
t.Skip("Skip due to failures initializing 20(!) instances")
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-airflow"),
Config: map[string]string{
"airflow:dbPassword": "secretP4ssword",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsApiGateway(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-apigateway"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["endpoint"].(string)
assertHTTPResultWithRetry(t, endpoint+"hello", nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "route")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsAppSync(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-appsync"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsAssumeRole(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-assume-role", "create-role"),
Config: map[string]string{
"create-role:unprivilegedUsername": "unpriv",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsContainers(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-containers"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 15 * time.Minute
endpoint := stack.Outputs["frontendURL"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello, Pulumi!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsEc2Provisioners(t *testing.T) {
sess, err := session.NewSession(&aws.Config{
Region: aws.String(getAwsRegion())},
)
assert.NoError(t, err)
svc := ec2.New(sess)
keyName, err := resource.NewUniqueHex("test-keyname", 8, 20)
assert.NoError(t, err)
t.Logf("Creating keypair %s.\n", keyName)
key, err := svc.CreateKeyPair(&ec2.CreateKeyPairInput{
KeyName: aws.String(keyName),
})
assert.NoError(t, err)
defer func() {
t.Logf("Deleting keypair %s.\n", keyName)
_, err := svc.DeleteKeyPair(&ec2.DeleteKeyPairInput{
KeyName: aws.String(keyName),
})
assert.NoError(t, err)
}()
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-ec2-provisioners"),
Config: map[string]string{
"keyName": aws.StringValue(key.KeyName),
},
Secrets: map[string]string{
"privateKey": base64.StdEncoding.EncodeToString([]byte(aws.StringValue(key.KeyMaterial))),
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
catConfigStdout := stack.Outputs["catConfigStdout"].(string)
assert.Equal(t, "[test]\nx = 42\n", catConfigStdout)
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsEks(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-eks"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsEksHelloWorld(t *testing.T) {
t.Skip("Skip due to frequent failures: `timeout while waiting for state to become 'ACTIVE'`")
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-eks-hello-world"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["serviceHostname"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Welcome to nginx")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsHelloFargate(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-hello-fargate"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
maxWait := 10 * time.Minute
endpoint := stack.Outputs["url"].(string)
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Hello World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsPulumiWebhooks(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-pulumi-webhooks"),
Config: map[string]string{
"cloud:provider": "aws",
"aws-ts-pulumi-webhooks:slackChannel": "general",
"aws-ts-pulumi-webhooks:slackToken": "12345",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsPulumiMiniflux(t *testing.T) {
t.Skip("Skip until ECS Service supports custom timeouts")
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-pulumi-miniflux"),
Config: map[string]string{
"aws-ts-pulumi-miniflux:db_name": "miniflux",
"aws-ts-pulumi-miniflux:db_username": "minifluxuser",
"aws-ts-pulumi-miniflux:db_password": "2Password2",
"aws-ts-pulumi-miniflux:admin_username": "adminuser",
"aws-ts-pulumi-miniflux:admin_password": "2Password2",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsResources(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-resources"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsS3LambdaCopyZip(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-s3-lambda-copyzip"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsSlackbot(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-slackbot"),
Config: map[string]string{
"mentionbot:slackToken": "XXX",
"mentionbot:verificationToken": "YYY",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsStepFunctions(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-stepfunctions"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsThumbnailer(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-thumbnailer"),
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsTwitterAthena(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-twitter-athena"),
Config: map[string]string{
"aws-ts-twitter-athena:twitterConsumerKey": "12345",
"aws-ts-twitter-athena:twitterConsumerSecret": "xyz",
"aws-ts-twitter-athena:twitterAccessTokenKey": "12345",
"aws-ts-twitter-athena:twitterAccessTokenSecret": "xyz",
"aws-ts-twitter-athena:twitterQuery": "smurfs",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAwsTsLambdaEfs(t *testing.T) {
test := getAWSBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "aws-ts-lambda-efs"),
})
integration.ProgramTest(t, &test)
}
func TestAccAzureCsAppService(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-cs-appservice"),
Config: map[string]string{
"sqlPassword": "2@Password@2",
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertAppServiceResult(t, stack.Outputs["Endpoint"], func(body string) bool {
return assert.Contains(t, body, "Greetings from Azure App Service!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureCsWebserver(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-cs-webserver"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["IpAddress"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Hello, World")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureFsAppService(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-fs-appservice"),
Config: map[string]string{
"sqlPassword": "2@Password@2",
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertAppServiceResult(t, stack.Outputs["endpoint"], func(body string) bool {
return assert.Contains(t, body, "Greetings from Azure App Service!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureGoAci(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-go-aci"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertAppServiceResult(t, stack.Outputs["endpoint"], func(body string) bool {
return assert.Contains(t, body, "Hello, containers!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureGoAks(t *testing.T) {
t.Skip("The credentials in ServicePrincipalProfile were invalid")
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-go-aks"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertAppServiceResult(t, stack.Outputs["url"], func(body string) bool {
return assert.Contains(t, body, "Hello Kubernetes bootcamp!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureGoAksMulticluster(t *testing.T) {
skipIfShort(t)
t.Skip("Skipping Azure tests temporarily")
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-go-aks-multicluster"),
Config: map[string]string{
"password": "testTEST1234+_^$",
"sshPublicKey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDeREOgHTUgPT00PTr7iQF9JwZQ4QF1VeaLk2nHKRvWYOCiky6hDtzhmLM0k0Ib9Y7cwFbhObR+8yZpCgfSX3Hc3w2I1n6lXFpMfzr+wdbpx97N4fc1EHGUr9qT3UM1COqN6e/BEosQcMVaXSCpjqL1jeNaRDAnAS2Y3q1MFeXAvj9rwq8EHTqqAc1hW9Lq4SjSiA98STil5dGw6DWRhNtf6zs4UBy8UipKsmuXtclR0gKnoEP83ahMJOpCIjuknPZhb+HsiNjFWf+Os9U6kaS5vGrbXC8nggrVE57ow88pLCBL+3mBk1vBg6bJuLBCp2WTqRzDMhSDQ3AcWqkucGqf dremy@remthinkpad",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureGoAppservice(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-go-appservice"),
Config: map[string]string{
"sqlPassword": "2@Password@2",
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertAppServiceResult(t, stack.Outputs["endpoint"], func(body string) bool {
return assert.Contains(t, body, "Greetings from Azure App Service!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureGoWebserverComponent(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-go-webserver-component"),
Config: map[string]string{
"username": "webmaster",
"password": "Password1234!",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureJsWebserver(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-js-webserver"),
Config: map[string]string{
"username": "testuser",
"password": "testTEST1234+-*/",
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPHelloWorld(t, stack.Outputs["publicIP"], nil)
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzurePyAks(t *testing.T) {
t.Skip("The credentials in ServicePrincipalProfile were invalid")
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-py-aks"),
Config: map[string]string{
"password": "testTEST1234+_^$",
"sshkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDeREOgHTUgPT00PTr7iQF9JwZQ4QF1VeaLk2nHKRvWYOCiky6hDtzhmLM0k0Ib9Y7cwFbhObR+8yZpCgfSX3Hc3w2I1n6lXFpMfzr+wdbpx97N4fc1EHGUr9qT3UM1COqN6e/BEosQcMVaXSCpjqL1jeNaRDAnAS2Y3q1MFeXAvj9rwq8EHTqqAc1hW9Lq4SjSiA98STil5dGw6DWRhNtf6zs4UBy8UipKsmuXtclR0gKnoEP83ahMJOpCIjuknPZhb+HsiNjFWf+Os9U6kaS5vGrbXC8nggrVE57ow88pLCBL+3mBk1vBg6bJuLBCp2WTqRzDMhSDQ3AcWqkucGqf dremy@remthinkpad",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzurePyAppService(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-py-appservice"),
Config: map[string]string{
"sqlPassword": "2@Password@2",
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertAppServiceResult(t, stack.Outputs["endpoint"], func(body string) bool {
return assert.Contains(t, body, "Greetings from Azure App Service!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzurePyAppServiceDocker(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-py-appservice-docker"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertAppServiceResult(t, stack.Outputs["hello_endpoint"], func(body string) bool {
return assert.Contains(t, body, "Hello, world!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzurePyArmTemplate(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-py-arm-template"),
})
integration.ProgramTest(t, &test)
}
func TestAccAzurePyHdInsightSpark(t *testing.T) {
t.Skip("Skipping HDInsights tests due to a stuck cluster in the account")
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-py-hdinsight-spark"),
Config: map[string]string{
"username": "testuser",
"password": "MyPassword123+-*/",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzurePyVmScaleSet(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-py-vm-scaleset"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["public_address"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "nginx")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzurePyWebserver(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-py-webserver"),
Config: map[string]string{
"azure-web:username": "testuser",
"azure-web:password": "testTEST1234+-*/",
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPHelloWorld(t, stack.Outputs["public_ip"], nil)
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureTsAppService(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-ts-appservice"),
Config: map[string]string{
"sqlPassword": "2@Password@2",
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertAppServiceResult(t, stack.Outputs["endpoint"], func(body string) bool {
return assert.Contains(t, body, "Greetings from Azure App Service!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureTsAppServiceDocker(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-ts-appservice-docker"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertAppServiceResult(t, stack.Outputs["getStartedEndpoint"], func(body string) bool {
return assert.Contains(t, body, "Azure App Service")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureTsArmTemplate(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-ts-arm-template"),
})
integration.ProgramTest(t, &test)
}
func TestAccAzureTsFunctions(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-ts-functions"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["endpoint"], nil, func(body string) bool {
return assert.Contains(t, body, "Greetings from Azure Functions!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureTsHdInsightSpark(t *testing.T) {
t.Skip("Skipping HDInsights tests due to a stuck cluster in the account")
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-ts-hdinsight-spark"),
Config: map[string]string{
"username": "testuser",
"password": "MyPassword123+-*/",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureTsStreamAnalytics(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-ts-stream-analytics"),
})
integration.ProgramTest(t, &test)
}
func TestAccAzureTsVmScaleset(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-ts-vm-scaleset"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["publicAddress"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "nginx")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureTsWebserver(t *testing.T) {
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-ts-webserver"),
Config: map[string]string{
"username": "webmaster",
"password": "MySuperS3cretPassw0rd",
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["ipAddress"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Hello, World")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureTsAksHelm(t *testing.T) {
skipIfShort(t)
t.Skip("Skipping Azure tests temporarily")
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-ts-aks-helm"),
Config: map[string]string{
"password": "testTEST1234+_^$",
"sshPublicKey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDeREOgHTUgPT00PTr7iQF9JwZQ4QF1VeaLk2nHKRvWYOCiky6hDtzhmLM0k0Ib9Y7cwFbhObR+8yZpCgfSX3Hc3w2I1n6lXFpMfzr+wdbpx97N4fc1EHGUr9qT3UM1COqN6e/BEosQcMVaXSCpjqL1jeNaRDAnAS2Y3q1MFeXAvj9rwq8EHTqqAc1hW9Lq4SjSiA98STil5dGw6DWRhNtf6zs4UBy8UipKsmuXtclR0gKnoEP83ahMJOpCIjuknPZhb+HsiNjFWf+Os9U6kaS5vGrbXC8nggrVE57ow88pLCBL+3mBk1vBg6bJuLBCp2WTqRzDMhSDQ3AcWqkucGqf dremy@remthinkpad",
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["serviceIP"], nil, func(body string) bool {
return assert.Contains(t, body, "It works!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureTsAksKeda(t *testing.T) {
skipIfShort(t)
t.Skip("Skipping Azure tests temporarily")
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-ts-aks-keda"),
})
integration.ProgramTest(t, &test)
}
func TestAccAzureTsAksMulticluster(t *testing.T) {
skipIfShort(t)
t.Skip("Skipping Azure tests temporarily")
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-ts-aks-multicluster"),
Config: map[string]string{
"password": "testTEST1234+_^$",
"sshPublicKey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDeREOgHTUgPT00PTr7iQF9JwZQ4QF1VeaLk2nHKRvWYOCiky6hDtzhmLM0k0Ib9Y7cwFbhObR+8yZpCgfSX3Hc3w2I1n6lXFpMfzr+wdbpx97N4fc1EHGUr9qT3UM1COqN6e/BEosQcMVaXSCpjqL1jeNaRDAnAS2Y3q1MFeXAvj9rwq8EHTqqAc1hW9Lq4SjSiA98STil5dGw6DWRhNtf6zs4UBy8UipKsmuXtclR0gKnoEP83ahMJOpCIjuknPZhb+HsiNjFWf+Os9U6kaS5vGrbXC8nggrVE57ow88pLCBL+3mBk1vBg6bJuLBCp2WTqRzDMhSDQ3AcWqkucGqf dremy@remthinkpad",
},
})
integration.ProgramTest(t, &test)
}
func TestAccAzureTsCosmosDbLogicApp(t *testing.T) {
skipIfShort(t)
t.Skip("Skipping Azure tests temporarily")
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-ts-cosmosdb-logicapp"),
})
integration.ProgramTest(t, &test)
}
func TestAccAzureTsWebserverComponent(t *testing.T) {
t.Skip("Skipping Azure tests temporarily")
test := getAzureBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "azure-ts-webserver-component"),
Config: map[string]string{
"username": "webmaster",
"password": "MySuperS3cretPassw0rd",
},
})
integration.ProgramTest(t, &test)
}
func TestAccCloudJsApi(t *testing.T) {
test := getCloudBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "cloud-js-api"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["endpoint"].(string)+"/hello", nil, func(body string) bool {
return assert.Contains(t, body, "{\"route\":\"hello\",\"count\":1}")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccCloudJsContainers(t *testing.T) {
test := getCloudBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "cloud-js-containers"),
Config: map[string]string{
"cloud-aws:useFargate": "true",
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["hostname"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Hello, Pulumi!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccCloudJsHttpServer(t *testing.T) {
test := getCloudBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "cloud-js-httpserver"),
Config: map[string]string{
"cloud:provider": "aws",
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["endpoint"].(string)+"/hello", nil, func(body string) bool {
return assert.Contains(t, body, "{\"route\":\"/hello\",\"count\":1}")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccCloudJsThumbnailer(t *testing.T) {
test := getCloudBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "cloud-js-thumbnailer"),
Config: map[string]string{
"cloud-aws:useFargate": "true",
},
})
integration.ProgramTest(t, &test)
}
func TestAccCloudJsThumbnailerMachineLearning(t *testing.T) {
test := getCloudBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "cloud-js-thumbnailer-machine-learning"),
Config: map[string]string{
// use us-west-2 to assure fargate
"cloud-aws:useFargate": "true",
"cloud-aws:computeIAMRolePolicyARNs": "arn:aws:iam::aws:policy/AWSLambdaFullAccess,arn:aws:iam::aws:" +
"policy/AmazonEC2ContainerServiceFullAccess,arn:aws:iam::aws:policy/AmazonRekognitionFullAccess",
},
})
integration.ProgramTest(t, &test)
}
func TestAccCloudTsUrlShortener(t *testing.T) {
test := getCloudBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "cloud-ts-url-shortener"),
Config: map[string]string{
// use us-west-2 to assure fargate
"redisPassword": "s3cr7Password",
"cloud:provider": "aws",
"cloud-aws:useFargate": "true",
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["endpointUrl"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Short URL Manager")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccCloudTsUrlShortenerCache(t *testing.T) {
test := getCloudBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "cloud-ts-url-shortener-cache"),
Config: map[string]string{
// use us-west-2 to assure fargate
"redisPassword": "s3cr7Password",
"cloud:provider": "aws",
"cloud-aws:useFargate": "true",
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["endpointUrl"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Short URL Manager")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccCloudTsVotingApp(t *testing.T) {
test := getCloudBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "cloud-ts-voting-app"),
Config: map[string]string{
// use us-west-2 to assure fargate
"redisPassword": "s3cr7Password",
"cloud:provider": "aws",
"cloud-aws:useFargate": "true",
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["frontendURL"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Pulumi Voting App")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccDigitalOceanPyK8s(t *testing.T) {
test := getBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "digitalocean-py-k8s"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["ingress_ip"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Welcome to nginx!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccDigitalOceanPyLoadbalancedDroplets(t *testing.T) {
t.Skip("Skip due to 'Error waiting for Load Balancer' failures")
test := getBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "digitalocean-py-loadbalanced-droplets"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["endpoint"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Welcome to nginx!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccDigitalOceanTsK8s(t *testing.T) {
test := getBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "digitalocean-ts-k8s"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["ingressIp"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Welcome to nginx!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccDigitalOceanTsLoadbalancedDroplets(t *testing.T) {
test := getBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "digitalocean-ts-loadbalanced-droplets"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["endpoint"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Welcome to nginx!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccDigitalOceanCsK8s(t *testing.T) {
test := getBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "digitalocean-cs-k8s"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["IngressIp"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Welcome to nginx!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccDigitalOceanCsLoadbalancedDroplets(t *testing.T) {
test := getBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "digitalocean-cs-loadbalanced-droplets"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assertHTTPResult(t, stack.Outputs["Endpoint"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Welcome to nginx!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccLinodeJsWebserver(t *testing.T) {
test := getBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "linode-js-webserver"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["instanceIP"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello, World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpGoFunctions(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-go-functions"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["function"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpGoFunctionsRaw(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-go-functions-raw"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["function"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpGoGke(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-go-gke"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["url"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello Kubernetes bootcamp!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpGoInstance(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-go-instance"),
})
integration.ProgramTest(t, &test)
}
func TestAccGcpGoWebserver(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-go-webserver"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["instanceIP"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello, World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpJsWebserver(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-js-webserver"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["instanceIP"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello, World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpPyFunctions(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-py-functions"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["fxn_url"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Space Needle, Seattle, WA")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpPyServerlessRaw(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-py-serverless-raw"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["go_endpoint"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello World!")
})
assertHTTPResult(t, stack.Outputs["python_endpoint"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Hello World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpPyInstanceNginx(t *testing.T) {
t.Skip("Skip due to frequent failures: `35.239.87.214:80: connect: connection refused`")
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-py-instance-nginx"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["external_ip"].(string)
maxWait := time.Minute * 10
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Test Page for the Nginx HTTP Server on Fedora")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpTsFunctions(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-ts-functions"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["url"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Greetings from Google Cloud Functions!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpTsServerlessRaw(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-ts-serverless-raw"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["goEndpoint"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello World!")
})
assertHTTPResult(t, stack.Outputs["pythonEndpoint"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Hello World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpTsCloudRun(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-ts-cloudrun"),
RunUpdateTest: false,
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["rubyUrl"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello Pulumi!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccPacketPyWebserver(t *testing.T) {
test := getBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "packet-py-webserver"),
})
integration.ProgramTest(t, &test)
}
func TestAccPacketTsWebserver(t *testing.T) {
test := getBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "packet-ts-webserver"),
})
integration.ProgramTest(t, &test)
}
func TestAccKubernetesGuestbook(t *testing.T) {
_, err := homedir.Expand("~/.kube/config")
if err != nil {
t.Skipf("Missing KubeConfig to run test: %s", err)
}
tests := []integration.ProgramTestOptions{
integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "kubernetes-go-guestbook", "simple"),
NoParallel: true,
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["frontendIP"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Guestbook")
})
},
},
integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "kubernetes-go-guestbook", "components"),
NoParallel: true,
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["frontendIP"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Guestbook")
})
},
},
integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "kubernetes-cs-guestbook", "simple"),
NoParallel: true,
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["FrontendIp"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Guestbook")
})
},
},
integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "kubernetes-cs-guestbook", "components"),
NoParallel: true,
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["FrontendIp"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Guestbook")
})
},
},
integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "kubernetes-py-guestbook", "simple"),
NoParallel: true,
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["frontend_ip"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Guestbook")
})
},
},
integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "kubernetes-ts-guestbook", "simple"),
NoParallel: true,
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["frontendIp"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Guestbook")
})
},
},
integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "kubernetes-ts-guestbook", "components"),
NoParallel: true,
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["frontendIp"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Guestbook")
})
},
},
}
for _, ex := range tests {
example := ex
t.Run(example.Dir, func(t *testing.T) {
t.Log(example.StackName)
integration.ProgramTest(t, &example)
})
}
}
func skipIfShort(t *testing.T) {
if testing.Short() {
t.Skip("skipping long-running test in short mode")
}
}
func getAwsRegion() string {
awsRegion := os.Getenv("AWS_REGION")
if awsRegion == "" {
awsRegion = "us-west-1"
fmt.Println("Defaulting AWS_REGION to 'us-west-1'. You can override using the AWS_REGION environment variable")
}
return awsRegion
}
func getAzureEnvironment() string {
azureEnviron := os.Getenv("ARM_ENVIRONMENT")
if azureEnviron == "" {
azureEnviron = "public"
fmt.Println("Defaulting ARM_ENVIRONMENT to 'public'. You can override using the ARM_ENVIRONMENT variable")
}
return azureEnviron
}
func getAzureLocation() string {
azureLocation := os.Getenv("ARM_LOCATION")
if azureLocation == "" {
azureLocation = "westus"
fmt.Println("Defaulting ARM_LOCATION to 'westus'. You can override using the ARM_LOCATION variable")
}
return azureLocation
}
func getGoogleProject() string {
project := os.Getenv("GOOGLE_PROJECT")
if project == "" {
project = "pulumi-ci-gcp-provider"
fmt.Println("Defaulting GOOGLE_PROJECT to 'pulumi-ci-gcp-provider'. You can override using the GOOGLE_PROJECT variable")
}
return project
}
func getGoogleZone() string {
zone := os.Getenv("GOOGLE_ZONE")
if zone == "" {
zone = "us-central1-a"
fmt.Println("Defaulting GOOGLE_ZONE to 'us-central1-a'. You can override using the GOOGLE_ZONE variable")
}
return zone
}
func getGkeVersion() string {
gkeEngineVersion := os.Getenv("GKE_ENGINE_VERSION")
if gkeEngineVersion == "" {
gkeEngineVersion = "1.13.7-gke.24"
fmt.Println("Defaulting GKE_ENGINE_VERSION to '1.13.7-gke.24'. You can override using the GKE_ENGINE_VERSION variable")
}
return gkeEngineVersion
}
func getCwd(t *testing.T) string {
cwd, err := os.Getwd()
if err != nil {
t.FailNow()
}
return cwd
}
func getBaseOptions(t *testing.T) integration.ProgramTestOptions {
overrides, err := integration.DecodeMapString(os.Getenv("PULUMI_TEST_NODE_OVERRIDES"))
if err != nil {
t.FailNow()
}
base := integration.ProgramTestOptions{
Tracing: "https://tracing.pulumi-engineering.com/collector/api/v1/spans",
ExpectRefreshChanges: true,
Overrides: overrides,
Quick: true,
SkipRefresh: true,
RetryFailedSteps: true,
}
return base
}
func getAWSBase(t *testing.T) integration.ProgramTestOptions {
awsRegion := getAwsRegion()
base := getBaseOptions(t)
awsBase := base.With(integration.ProgramTestOptions{
Config: map[string]string{
"aws:region": awsRegion,
},
})
return awsBase
}
func getAzureBase(t *testing.T) integration.ProgramTestOptions {
azureEnviron := getAzureEnvironment()
azureLocation := getAzureLocation()
base := getBaseOptions(t)
azureBase := base.With(integration.ProgramTestOptions{
Config: map[string]string{
"azure:environment": azureEnviron,
"azure:location": azureLocation,
},
})
return azureBase
}
func getGoogleBase(t *testing.T) integration.ProgramTestOptions {
googleZone := getGoogleZone()
googleProject := getGoogleProject()
base := getBaseOptions(t)
gkeBase := base.With(integration.ProgramTestOptions{
Config: map[string]string{
"gcp:project": googleProject,
"gcp:zone": googleZone,
},
})
return gkeBase
}
func getCloudBase(t *testing.T) integration.ProgramTestOptions {
awsRegion := getAwsRegion()
base := getBaseOptions(t)
azureBase := base.With(integration.ProgramTestOptions{
Config: map[string]string{
"aws:region": awsRegion,
},
})
return azureBase
}
func assertHTTPResult(t *testing.T, output interface{}, headers map[string]string, check func(string) bool) bool {
return assertHTTPResultWithRetry(t, output, headers, 5*time.Minute, check)
}
func assertHTTPResultWithRetry(t *testing.T, output interface{}, headers map[string]string, maxWait time.Duration, check func(string) bool) bool {
return assertHTTPResultShapeWithRetry(t, output, headers, maxWait, func(string) bool { return true }, check)
}
func assertAppServiceResult(t *testing.T, output interface{}, check func(string) bool) bool {
ready := func(body string) bool {
// We got a welcome page from Azure App Service. This means the resource is deployed but our custom code is not
// there yet. Wait a bit more and retry later.
welcomePage := strings.Contains(body, "Your app service is up and running.")
return !welcomePage
}
return assertHTTPResultShapeWithRetry(t, output, nil, 5*time.Minute, ready, check)
}
func assertHTTPResultShapeWithRetry(t *testing.T, output interface{}, headers map[string]string, maxWait time.Duration,
ready func(string) bool, check func(string) bool) bool {
hostname, ok := output.(string)
if !assert.True(t, ok, fmt.Sprintf("expected `%s` output", output)) {
return false
}
if !(strings.HasPrefix(hostname, "http://") || strings.HasPrefix(hostname, "https://")) {
hostname = fmt.Sprintf("http://%s", hostname)
}
startTime := time.Now()
count, sleep := 0, 0
for true {
now := time.Now()
req, err := http.NewRequest("GET", hostname, nil)
if !assert.NoError(t, err) {
return false
}
for k, v := range headers {
// Host header cannot be set via req.Header.Set(), and must be set
// directly.
if strings.ToLower(k) == "host" {
req.Host = v
continue
}
req.Header.Set(k, v)
}
client := &http.Client{Timeout: time.Second * 10}
resp, err := client.Do(req)
if err == nil && resp.StatusCode == 200 {
if !assert.NotNil(t, resp.Body, "resp.body was nil") {
return false
}
// Read the body
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if !assert.NoError(t, err) {
return false
}
bodyText := string(body)
// Even if we got 200 and a response, it may not be ready for assertion yet - that's specific per test.
if ready(bodyText) {
// Verify it matches expectations
return check(bodyText)
}
}
if now.Sub(startTime) >= maxWait {
fmt.Printf("Timeout after %v. Unable to http.get %v successfully.", maxWait, hostname)
return false
}
count++
// delay 10s, 20s, then 30s and stay at 30s
if sleep > 30 {
sleep = 30
} else {
sleep += 10
}
time.Sleep(time.Duration(sleep) * time.Second)
fmt.Printf("Http Error: %v\n", err)
fmt.Printf(" Retry: %v, elapsed wait: %v, max wait %v\n", count, now.Sub(startTime), maxWait)
}
return false
}
func assertHTTPHelloWorld(t *testing.T, output interface{}, headers map[string]string) bool {
return assertHTTPResult(t, output, headers, func(s string) bool {
return assert.Equal(t, "Hello, World!\n", s)
})
}
| [
"\"AWS_REGION\"",
"\"ARM_ENVIRONMENT\"",
"\"ARM_LOCATION\"",
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_ZONE\"",
"\"GKE_ENGINE_VERSION\"",
"\"PULUMI_TEST_NODE_OVERRIDES\""
]
| []
| [
"GKE_ENGINE_VERSION",
"GOOGLE_ZONE",
"AWS_REGION",
"GOOGLE_PROJECT",
"ARM_ENVIRONMENT",
"PULUMI_TEST_NODE_OVERRIDES",
"ARM_LOCATION"
]
| [] | ["GKE_ENGINE_VERSION", "GOOGLE_ZONE", "AWS_REGION", "GOOGLE_PROJECT", "ARM_ENVIRONMENT", "PULUMI_TEST_NODE_OVERRIDES", "ARM_LOCATION"] | go | 7 | 0 | |
tests/integration/utils.go | // Copyright 2015 Sorint.lab
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"bufio"
"context"
"database/sql"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"reflect"
"regexp"
"sort"
"strconv"
"sync"
"testing"
"time"
"github.com/sorintlab/stolon/internal/cluster"
"github.com/sorintlab/stolon/internal/common"
pg "github.com/sorintlab/stolon/internal/postgresql"
"github.com/sorintlab/stolon/internal/store"
"github.com/sorintlab/stolon/internal/util"
_ "github.com/lib/pq"
"github.com/satori/go.uuid"
"github.com/sgotti/gexpect"
)
const (
sleepInterval = 500 * time.Millisecond
MinPort = 2048
MaxPort = 16384
)
var (
defaultPGParameters = cluster.PGParameters{"log_destination": "stderr", "logging_collector": "false"}
)
var curPort = MinPort
var portMutex = sync.Mutex{}
func pgParametersWithDefaults(p cluster.PGParameters) cluster.PGParameters {
pd := cluster.PGParameters{}
for k, v := range defaultPGParameters {
pd[k] = v
}
for k, v := range p {
pd[k] = v
}
return pd
}
type Querier interface {
Exec(query string, args ...interface{}) (sql.Result, error)
Query(query string, args ...interface{}) (*sql.Rows, error)
ReplQuery(query string, args ...interface{}) (*sql.Rows, error)
}
func GetPGParameters(q Querier) (common.Parameters, error) {
var pgParameters = common.Parameters{}
rows, err := q.Query("select name, setting, source from pg_settings")
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var name, setting, source string
if err = rows.Scan(&name, &setting, &source); err != nil {
return nil, err
}
if source == "configuration file" {
pgParameters[name] = setting
}
}
return pgParameters, nil
}
func GetSystemData(q Querier) (*pg.SystemData, error) {
rows, err := q.ReplQuery("IDENTIFY_SYSTEM")
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var sd pg.SystemData
var xLogPosLsn string
var unused *string
if err = rows.Scan(&sd.SystemID, &sd.TimelineID, &xLogPosLsn, &unused); err != nil {
return nil, err
}
sd.XLogPos, err = pg.PGLsnToInt(xLogPosLsn)
if err != nil {
return nil, err
}
return &sd, nil
}
return nil, fmt.Errorf("query returned 0 rows")
}
func GetXLogPos(q Querier) (uint64, error) {
// get the current master XLogPos
systemData, err := GetSystemData(q)
if err != nil {
return 0, err
}
return systemData.XLogPos, nil
}
// getReplicatinSlots return existing replication slots (also temporary
// replication slots on PostgreSQL > 10)
func getReplicationSlots(q Querier) (pg.ReplicationSlots, error) {
replSlots := pg.ReplicationSlots{}
rows, err := q.Query("select slot_name, slot_type from pg_replication_slots")
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var slotName pg.ReplicationSlot
if err := rows.Scan(&slotName.SlotName, &slotName.SlotType); err != nil {
return nil, err
}
replSlots = append(replSlots, slotName)
}
return replSlots, nil
}
func waitReplicationSlots(q Querier, replSlots []string, timeout time.Duration) error {
sort.Sort(sort.StringSlice(replSlots))
start := time.Now()
curReplSlots := []string{}
var err error
for time.Now().Add(-timeout).Before(start) {
curReplSlots, err := getReplicationSlots(q)
if err != nil {
goto end
}
sort.Sort(curReplSlots)
if reflect.DeepEqual(replSlots, curReplSlots) {
return nil
}
end:
time.Sleep(2 * time.Second)
}
return fmt.Errorf("timeout waiting for replSlots %v, got: %v, last err: %v", replSlots, curReplSlots, err)
}
func waitNotStolonReplicationSlots(q Querier, replSlots []string, timeout time.Duration) error {
sort.Sort(sort.StringSlice(replSlots))
start := time.Now()
var curReplSlots []string
var err error
for time.Now().Add(-timeout).Before(start) {
allReplSlots, err := getReplicationSlots(q)
if err != nil {
goto end
}
curReplSlots = []string{}
for _, s := range allReplSlots {
if !common.IsStolonName(s.SlotName) {
curReplSlots = append(curReplSlots, s.SlotName)
}
}
sort.Sort(sort.StringSlice(curReplSlots))
if reflect.DeepEqual(replSlots, curReplSlots) {
return nil
}
end:
time.Sleep(2 * time.Second)
}
return fmt.Errorf("timeout waiting for replSlots %v, got: %v, last err: %v", replSlots, curReplSlots, err)
}
type Process struct {
t *testing.T
uid string
name string
args []string
cmd *gexpect.ExpectSubprocess
bin string
}
func (p *Process) start() error {
if p.cmd != nil {
panic(fmt.Errorf("%s: cmd not cleanly stopped", p.uid))
}
cmd := exec.Command(p.bin, p.args...)
pr, pw, err := os.Pipe()
if err != nil {
return err
}
p.cmd = &gexpect.ExpectSubprocess{Cmd: cmd, Output: pw}
if err := p.cmd.Start(); err != nil {
return err
}
go func() {
scanner := bufio.NewScanner(pr)
for scanner.Scan() {
p.t.Logf("[%s %s]: %s", p.name, p.uid, scanner.Text())
}
}()
return nil
}
func (p *Process) Start() error {
if err := p.start(); err != nil {
return err
}
p.cmd.Continue()
return nil
}
func (p *Process) StartExpect() error {
return p.start()
}
func (p *Process) Signal(sig os.Signal) error {
p.t.Logf("signalling %s %s with %s", p.name, p.uid, sig)
if p.cmd == nil {
panic(fmt.Errorf("p: %s, cmd is empty", p.uid))
}
return p.cmd.Cmd.Process.Signal(sig)
}
func (p *Process) Kill() {
p.t.Logf("killing %s %s", p.name, p.uid)
if p.cmd == nil {
panic(fmt.Errorf("p: %s, cmd is empty", p.uid))
}
p.cmd.Cmd.Process.Signal(os.Kill)
p.cmd.Wait()
p.cmd = nil
}
func (p *Process) Stop() {
p.t.Logf("stopping %s %s", p.name, p.uid)
if p.cmd == nil {
panic(fmt.Errorf("p: %s, cmd is empty", p.uid))
}
p.cmd.Continue()
p.cmd.Cmd.Process.Signal(os.Interrupt)
p.cmd.Wait()
p.cmd = nil
}
func (p *Process) Wait(timeout time.Duration) error {
timeoutCh := time.NewTimer(timeout).C
endCh := make(chan error)
go func() {
err := p.cmd.Wait()
endCh <- err
}()
select {
case <-timeoutCh:
return fmt.Errorf("timeout waiting on process")
case <-endCh:
return nil
}
}
type TestKeeper struct {
t *testing.T
Process
dataDir string
pgListenAddress string
pgPort string
pgSUUsername string
pgSUPassword string
pgReplUsername string
pgReplPassword string
db *sql.DB
rdb *sql.DB
}
func NewTestKeeperWithID(t *testing.T, dir, uid, clusterName, pgSUUsername, pgSUPassword, pgReplUsername, pgReplPassword string, storeBackend store.Backend, storeEndpoints string, a ...string) (*TestKeeper, error) {
args := []string{}
dataDir := filepath.Join(dir, fmt.Sprintf("st%s", uid))
pgListenAddress, pgPort, err := getFreePort(true, false)
if err != nil {
return nil, err
}
args = append(args, fmt.Sprintf("--uid=%s", uid))
args = append(args, fmt.Sprintf("--cluster-name=%s", clusterName))
args = append(args, fmt.Sprintf("--pg-listen-address=%s", pgListenAddress))
args = append(args, fmt.Sprintf("--pg-port=%s", pgPort))
args = append(args, fmt.Sprintf("--data-dir=%s", dataDir))
args = append(args, fmt.Sprintf("--store-backend=%s", storeBackend))
args = append(args, fmt.Sprintf("--store-endpoints=%s", storeEndpoints))
args = append(args, fmt.Sprintf("--pg-su-username=%s", pgSUUsername))
if pgSUPassword != "" {
args = append(args, fmt.Sprintf("--pg-su-password=%s", pgSUPassword))
}
args = append(args, fmt.Sprintf("--pg-repl-username=%s", pgReplUsername))
args = append(args, fmt.Sprintf("--pg-repl-password=%s", pgReplPassword))
if os.Getenv("DEBUG") != "" {
args = append(args, "--debug")
}
args = append(args, a...)
connParams := pg.ConnParams{
"user": pgSUUsername,
"password": pgSUPassword,
"host": pgListenAddress,
"port": pgPort,
"dbname": "postgres",
"sslmode": "disable",
}
replConnParams := pg.ConnParams{
"user": pgReplUsername,
"password": pgReplPassword,
"host": pgListenAddress,
"port": pgPort,
"dbname": "postgres",
"sslmode": "disable",
"replication": "1",
}
connString := connParams.ConnString()
db, err := sql.Open("postgres", connString)
if err != nil {
return nil, err
}
replConnString := replConnParams.ConnString()
rdb, err := sql.Open("postgres", replConnString)
if err != nil {
return nil, err
}
bin := os.Getenv("STKEEPER_BIN")
if bin == "" {
return nil, fmt.Errorf("missing STKEEPER_BIN env")
}
tk := &TestKeeper{
t: t,
Process: Process{
t: t,
uid: uid,
name: "keeper",
bin: bin,
args: args,
},
dataDir: dataDir,
pgListenAddress: pgListenAddress,
pgPort: pgPort,
pgSUUsername: pgSUUsername,
pgSUPassword: pgSUPassword,
pgReplUsername: pgReplUsername,
pgReplPassword: pgReplPassword,
db: db,
rdb: rdb,
}
return tk, nil
}
func NewTestKeeper(t *testing.T, dir, clusterName, pgSUUsername, pgSUPassword, pgReplUsername, pgReplPassword string, storeBackend store.Backend, storeEndpoints string, a ...string) (*TestKeeper, error) {
u := uuid.NewV4()
uid := fmt.Sprintf("%x", u[:4])
return NewTestKeeperWithID(t, dir, uid, clusterName, pgSUUsername, pgSUPassword, pgReplUsername, pgReplPassword, storeBackend, storeEndpoints, a...)
}
func (tk *TestKeeper) PGDataVersion() (int, int, error) {
fh, err := os.Open(filepath.Join(tk.dataDir, "postgres", "PG_VERSION"))
if err != nil {
return 0, 0, fmt.Errorf("failed to read PG_VERSION: %v", err)
}
defer fh.Close()
scanner := bufio.NewScanner(fh)
scanner.Split(bufio.ScanLines)
scanner.Scan()
version := scanner.Text()
return pg.ParseVersion(version)
}
func (tk *TestKeeper) GetPrimaryConninfo() (pg.ConnParams, error) {
regex := regexp.MustCompile(`\s*primary_conninfo\s*=\s*'(.*)'$`)
fh, err := os.Open(filepath.Join(tk.dataDir, "postgres", "recovery.conf"))
if os.IsNotExist(err) {
return nil, nil
}
defer fh.Close()
scanner := bufio.NewScanner(fh)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
m := regex.FindStringSubmatch(scanner.Text())
if len(m) == 2 {
return pg.ParseConnString(m[1])
}
}
return nil, nil
}
func (tk *TestKeeper) Exec(query string, args ...interface{}) (sql.Result, error) {
res, err := tk.db.Exec(query, args...)
if err != nil {
return nil, err
}
return res, nil
}
func (tk *TestKeeper) Query(query string, args ...interface{}) (*sql.Rows, error) {
res, err := tk.db.Query(query, args...)
if err != nil {
return nil, err
}
return res, nil
}
func (tk *TestKeeper) ReplQuery(query string, args ...interface{}) (*sql.Rows, error) {
res, err := tk.rdb.Query(query, args...)
if err != nil {
return nil, err
}
return res, nil
}
func (tk *TestKeeper) SwitchWals(times int) error {
maj, _, err := tk.PGDataVersion()
if err != nil {
return err
}
var switchLogFunc string
if maj < 10 {
switchLogFunc = "select pg_switch_xlog()"
} else {
switchLogFunc = "select pg_switch_wal()"
}
tk.Exec("DROP TABLE switchwal")
if _, err := tk.Exec("CREATE TABLE switchwal(ID INT PRIMARY KEY NOT NULL)"); err != nil {
return err
}
// if times > 1 we have to do some transactions or the wal won't switch
for i := 0; i < times; i++ {
if _, err := tk.Exec("INSERT INTO switchwal VALUES ($1)", i); err != nil {
return err
}
if _, err := tk.db.Exec(switchLogFunc); err != nil {
return err
}
}
tk.Exec("DROP TABLE switchwal")
return nil
}
func (tk *TestKeeper) CheckPoint() error {
_, err := tk.Exec("CHECKPOINT")
return err
}
func (tk *TestKeeper) WaitDBUp(timeout time.Duration) error {
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
_, err := tk.Exec("select 1")
if err == nil {
return nil
}
tk.t.Logf("tk: %v, error: %v", tk.uid, err)
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
func (tk *TestKeeper) WaitDBDown(timeout time.Duration) error {
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
_, err := tk.Exec("select 1")
if err != nil {
return nil
}
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
func (tk *TestKeeper) GetPGProcess() (*os.Process, error) {
fh, err := os.Open(filepath.Join(tk.dataDir, "postgres/postmaster.pid"))
if err != nil {
return nil, err
}
defer fh.Close()
scanner := bufio.NewScanner(fh)
scanner.Split(bufio.ScanLines)
if !scanner.Scan() {
return nil, fmt.Errorf("not enough lines in pid file")
}
pidStr := scanner.Text()
pid, err := strconv.Atoi(string(pidStr))
if err != nil {
return nil, err
}
return os.FindProcess(pid)
}
func (tk *TestKeeper) SignalPG(sig os.Signal) error {
p, err := tk.GetPGProcess()
if err != nil {
return err
}
return p.Signal(sig)
}
func (tk *TestKeeper) isInRecovery() (bool, error) {
rows, err := tk.Query("SELECT pg_is_in_recovery from pg_is_in_recovery()")
if err != nil {
return false, err
}
defer rows.Close()
for rows.Next() {
var isInRecovery bool
if err := rows.Scan(&isInRecovery); err != nil {
return false, err
}
if isInRecovery {
return true, nil
}
return false, nil
}
return false, fmt.Errorf("no rows returned")
}
func (tk *TestKeeper) WaitDBRole(r common.Role, ptk *TestKeeper, timeout time.Duration) error {
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
time.Sleep(sleepInterval)
// when the cluster is in standby mode also the master db is a standby
// so we cannot just check if the keeper is in recovery but have to
// check if the primary_conninfo points to the primary db or to the
// cluster master
if ptk == nil {
ok, err := tk.isInRecovery()
if err != nil {
continue
}
if !ok && r == common.RoleMaster {
return nil
}
if ok && r == common.RoleStandby {
return nil
}
} else {
ok, err := tk.isInRecovery()
if err != nil {
continue
}
if !ok {
continue
}
// TODO(sgotti) get this information from the running instance instead than from
// recovery.conf to be really sure it's applied
conninfo, err := tk.GetPrimaryConninfo()
if err != nil {
continue
}
if conninfo["host"] == ptk.pgListenAddress && conninfo["port"] == ptk.pgPort {
if r == common.RoleMaster {
return nil
}
} else {
if r == common.RoleStandby {
return nil
}
}
}
}
return fmt.Errorf("timeout")
}
func (tk *TestKeeper) GetPGParameters() (common.Parameters, error) {
return GetPGParameters(tk)
}
type CheckFunc func(time.Duration) error
func waitChecks(timeout time.Duration, fns ...CheckFunc) error {
end := make(chan error)
fnc := len(fns)
for _, fn := range fns {
go func(fn CheckFunc, end chan error) {
end <- fn(timeout)
}(fn, end)
}
c := 0
for c < fnc {
err := <-end
if err != nil {
return err
}
c++
}
return nil
}
type TestSentinel struct {
t *testing.T
Process
}
func NewTestSentinel(t *testing.T, dir string, clusterName string, storeBackend store.Backend, storeEndpoints string, a ...string) (*TestSentinel, error) {
u := uuid.NewV4()
uid := fmt.Sprintf("%x", u[:4])
args := []string{}
args = append(args, fmt.Sprintf("--cluster-name=%s", clusterName))
args = append(args, fmt.Sprintf("--store-backend=%s", storeBackend))
args = append(args, fmt.Sprintf("--store-endpoints=%s", storeEndpoints))
if os.Getenv("DEBUG") != "" {
args = append(args, "--debug")
}
args = append(args, a...)
bin := os.Getenv("STSENTINEL_BIN")
if bin == "" {
return nil, fmt.Errorf("missing STSENTINEL_BIN env")
}
ts := &TestSentinel{
t: t,
Process: Process{
t: t,
uid: uid,
name: "sentinel",
bin: bin,
args: args,
},
}
return ts, nil
}
type TestProxy struct {
t *testing.T
Process
listenAddress string
port string
db *sql.DB
rdb *sql.DB
}
func NewTestProxy(t *testing.T, dir string, clusterName, pgSUUsername, pgSUPassword, pgReplUsername, pgReplPassword string, storeBackend store.Backend, storeEndpoints string, a ...string) (*TestProxy, error) {
u := uuid.NewV4()
uid := fmt.Sprintf("%x", u[:4])
listenAddress, port, err := getFreePort(true, false)
if err != nil {
return nil, err
}
args := []string{}
args = append(args, fmt.Sprintf("--cluster-name=%s", clusterName))
args = append(args, fmt.Sprintf("--listen-address=%s", listenAddress))
args = append(args, fmt.Sprintf("--port=%s", port))
args = append(args, fmt.Sprintf("--store-backend=%s", storeBackend))
args = append(args, fmt.Sprintf("--store-endpoints=%s", storeEndpoints))
if os.Getenv("DEBUG") != "" {
args = append(args, "--debug")
}
args = append(args, a...)
connParams := pg.ConnParams{
"user": pgSUUsername,
"password": pgSUPassword,
"host": listenAddress,
"port": port,
"dbname": "postgres",
"sslmode": "disable",
}
replConnParams := pg.ConnParams{
"user": pgReplUsername,
"password": pgReplPassword,
"host": listenAddress,
"port": port,
"dbname": "postgres",
"sslmode": "disable",
"replication": "1",
}
connString := connParams.ConnString()
db, err := sql.Open("postgres", connString)
if err != nil {
return nil, err
}
replConnString := replConnParams.ConnString()
rdb, err := sql.Open("postgres", replConnString)
if err != nil {
return nil, err
}
bin := os.Getenv("STPROXY_BIN")
if bin == "" {
return nil, fmt.Errorf("missing STPROXY_BIN env")
}
tp := &TestProxy{
t: t,
Process: Process{
t: t,
uid: uid,
name: "proxy",
bin: bin,
args: args,
},
listenAddress: listenAddress,
port: port,
db: db,
rdb: rdb,
}
return tp, nil
}
func (tp *TestProxy) WaitListening(timeout time.Duration) error {
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
_, err := net.DialTimeout("tcp", net.JoinHostPort(tp.listenAddress, tp.port), timeout-time.Now().Sub(start))
if err == nil {
return nil
}
tp.t.Logf("tp: %v, error: %v", tp.uid, err)
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
func (tp *TestProxy) CheckListening() bool {
_, err := net.Dial("tcp", net.JoinHostPort(tp.listenAddress, tp.port))
if err != nil {
return false
}
return true
}
func (tp *TestProxy) WaitNotListening(timeout time.Duration) error {
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
_, err := net.DialTimeout("tcp", net.JoinHostPort(tp.listenAddress, tp.port), timeout-time.Now().Sub(start))
if err != nil {
return nil
}
tp.t.Logf("tp: %v, error: %v", tp.uid, err)
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
func (tp *TestProxy) Exec(query string, args ...interface{}) (sql.Result, error) {
res, err := tp.db.Exec(query, args...)
if err != nil {
return nil, err
}
return res, nil
}
func (tp *TestProxy) Query(query string, args ...interface{}) (*sql.Rows, error) {
res, err := tp.db.Query(query, args...)
if err != nil {
return nil, err
}
return res, nil
}
func (tp *TestProxy) ReplQuery(query string, args ...interface{}) (*sql.Rows, error) {
res, err := tp.rdb.Query(query, args...)
if err != nil {
return nil, err
}
return res, nil
}
func (tp *TestProxy) GetPGParameters() (common.Parameters, error) {
return GetPGParameters(tp)
}
func (tp *TestProxy) WaitRightMaster(tk *TestKeeper, timeout time.Duration) error {
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
pgParameters, err := GetPGParameters(tp)
if err != nil {
goto end
}
if pgParameters["port"] == tk.pgPort {
return nil
}
end:
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
func StolonCtl(clusterName string, storeBackend store.Backend, storeEndpoints string, a ...string) error {
args := []string{}
args = append(args, fmt.Sprintf("--cluster-name=%s", clusterName))
args = append(args, fmt.Sprintf("--store-backend=%s", storeBackend))
args = append(args, fmt.Sprintf("--store-endpoints=%s", storeEndpoints))
args = append(args, a...)
bin := os.Getenv("STCTL_BIN")
if bin == "" {
return fmt.Errorf("missing STCTL_BIN env")
}
cmd := exec.Command(bin, args...)
return cmd.Run()
}
type TestStore struct {
t *testing.T
Process
listenAddress string
port string
store store.KVStore
storeBackend store.Backend
}
func NewTestStore(t *testing.T, dir string, a ...string) (*TestStore, error) {
storeBackend := store.Backend(os.Getenv("STOLON_TEST_STORE_BACKEND"))
switch storeBackend {
case "consul":
return NewTestConsul(t, dir, a...)
case "etcd":
storeBackend = "etcdv2"
fallthrough
case "etcdv2", "etcdv3":
return NewTestEtcd(t, dir, storeBackend, a...)
}
return nil, fmt.Errorf("wrong store backend")
}
func NewTestEtcd(t *testing.T, dir string, backend store.Backend, a ...string) (*TestStore, error) {
u := uuid.NewV4()
uid := fmt.Sprintf("%x", u[:4])
dataDir := filepath.Join(dir, fmt.Sprintf("etcd%s", uid))
listenAddress, port, err := getFreePort(true, false)
if err != nil {
return nil, err
}
listenAddress2, port2, err := getFreePort(true, false)
if err != nil {
return nil, err
}
args := []string{}
args = append(args, fmt.Sprintf("--name=%s", uid))
args = append(args, fmt.Sprintf("--data-dir=%s", dataDir))
args = append(args, fmt.Sprintf("--listen-client-urls=http://%s:%s", listenAddress, port))
args = append(args, fmt.Sprintf("--advertise-client-urls=http://%s:%s", listenAddress, port))
args = append(args, fmt.Sprintf("--listen-peer-urls=http://%s:%s", listenAddress2, port2))
args = append(args, fmt.Sprintf("--initial-advertise-peer-urls=http://%s:%s", listenAddress2, port2))
args = append(args, fmt.Sprintf("--initial-cluster=%s=http://%s:%s", uid, listenAddress2, port2))
args = append(args, a...)
storeEndpoints := fmt.Sprintf("%s:%s", listenAddress, port)
storeConfig := store.Config{
Backend: store.Backend(backend),
Endpoints: storeEndpoints,
}
kvstore, err := store.NewKVStore(storeConfig)
if err != nil {
return nil, fmt.Errorf("cannot create store: %v", err)
}
bin := os.Getenv("ETCD_BIN")
if bin == "" {
return nil, fmt.Errorf("missing ETCD_BIN env")
}
tstore := &TestStore{
t: t,
Process: Process{
t: t,
uid: uid,
name: "etcd",
bin: bin,
args: args,
},
listenAddress: listenAddress,
port: port,
store: kvstore,
storeBackend: backend,
}
return tstore, nil
}
func NewTestConsul(t *testing.T, dir string, a ...string) (*TestStore, error) {
u := uuid.NewV4()
uid := fmt.Sprintf("%x", u[:4])
dataDir := filepath.Join(dir, fmt.Sprintf("consul%s", uid))
listenAddress, portHTTP, err := getFreePort(true, false)
if err != nil {
return nil, err
}
_, portSerfLan, err := getFreePort(true, true)
if err != nil {
return nil, err
}
_, portSerfWan, err := getFreePort(true, true)
if err != nil {
return nil, err
}
_, portServer, err := getFreePort(true, false)
if err != nil {
return nil, err
}
f, err := os.Create(filepath.Join(dir, fmt.Sprintf("consul%s.json", uid)))
if err != nil {
return nil, err
}
defer f.Close()
f.WriteString(fmt.Sprintf(`{
"ports": {
"dns": -1,
"http": %s,
"serf_lan": %s,
"serf_wan": %s,
"server": %s
}
}`, portHTTP, portSerfLan, portSerfWan, portServer))
args := []string{}
args = append(args, "agent")
args = append(args, "-server")
args = append(args, fmt.Sprintf("-config-file=%s", f.Name()))
args = append(args, fmt.Sprintf("-data-dir=%s", dataDir))
args = append(args, fmt.Sprintf("-bind=%s", listenAddress))
args = append(args, fmt.Sprintf("-advertise=%s", listenAddress))
args = append(args, "-bootstrap-expect=1")
args = append(args, a...)
storeEndpoints := fmt.Sprintf("%s:%s", listenAddress, portHTTP)
storeConfig := store.Config{
Backend: store.CONSUL,
Endpoints: storeEndpoints,
}
kvstore, err := store.NewKVStore(storeConfig)
if err != nil {
return nil, fmt.Errorf("cannot create store: %v", err)
}
bin := os.Getenv("CONSUL_BIN")
if bin == "" {
return nil, fmt.Errorf("missing CONSUL_BIN env")
}
ts := &TestStore{
t: t,
Process: Process{
t: t,
uid: uid,
name: "consul",
bin: bin,
args: args,
},
listenAddress: listenAddress,
port: portHTTP,
store: kvstore,
storeBackend: store.CONSUL,
}
return ts, nil
}
func (ts *TestStore) WaitUp(timeout time.Duration) error {
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
_, err := ts.store.Get(context.TODO(), "anykey")
ts.t.Logf("err: %v", err)
if err != nil && err == store.ErrKeyNotFound {
return nil
}
if err == nil {
return nil
}
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
func (ts *TestStore) WaitDown(timeout time.Duration) error {
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
_, err := ts.store.Get(context.TODO(), "anykey")
if err != nil && err != store.ErrKeyNotFound {
return nil
}
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
func WaitClusterDataUpdated(e *store.KVBackedStore, timeout time.Duration) error {
icd, _, err := e.GetClusterData(context.TODO())
if err != nil {
return fmt.Errorf("unexpected err: %v", err)
}
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
cd, _, err := e.GetClusterData(context.TODO())
if err != nil || cd == nil {
goto end
}
if !reflect.DeepEqual(icd, cd) {
return nil
}
end:
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
func WaitClusterDataWithMaster(e *store.KVBackedStore, timeout time.Duration) (string, error) {
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
cd, _, err := e.GetClusterData(context.TODO())
if err != nil || cd == nil {
goto end
}
if cd.Cluster.Status.Phase == cluster.ClusterPhaseNormal && cd.Cluster.Status.Master != "" {
return cd.DBs[cd.Cluster.Status.Master].Spec.KeeperUID, nil
}
end:
time.Sleep(sleepInterval)
}
return "", fmt.Errorf("timeout")
}
func WaitClusterDataMaster(master string, e *store.KVBackedStore, timeout time.Duration) error {
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
cd, _, err := e.GetClusterData(context.TODO())
if err != nil || cd == nil {
goto end
}
if cd.Cluster.Status.Phase == cluster.ClusterPhaseNormal && cd.Cluster.Status.Master != "" {
if cd.DBs[cd.Cluster.Status.Master].Spec.KeeperUID == master {
return nil
}
}
end:
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
func WaitClusterDataKeeperInitialized(keeperUID string, e *store.KVBackedStore, timeout time.Duration) error {
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
cd, _, err := e.GetClusterData(context.TODO())
if err != nil || cd == nil {
goto end
}
// Check for db on keeper to be initialized
for _, db := range cd.DBs {
if db.Spec.KeeperUID == keeperUID {
if db.Status.CurrentGeneration >= cluster.InitialGeneration {
return nil
}
}
}
end:
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
// WaitClusterDataSynchronousStandbys waits for:
// * synchronous standby defined in masterdb spec
// * synchronous standby reported from masterdb status
func WaitClusterDataSynchronousStandbys(synchronousStandbys []string, e *store.KVBackedStore, timeout time.Duration) error {
sort.Sort(sort.StringSlice(synchronousStandbys))
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
cd, _, err := e.GetClusterData(context.TODO())
if err != nil || cd == nil {
goto end
}
if cd.Cluster.Status.Phase == cluster.ClusterPhaseNormal && cd.Cluster.Status.Master != "" {
masterDB := cd.DBs[cd.Cluster.Status.Master]
// get keepers for db spec synchronousStandbys
keepersUIDs := []string{}
for _, dbUID := range masterDB.Spec.SynchronousStandbys {
db, ok := cd.DBs[dbUID]
if ok {
keepersUIDs = append(keepersUIDs, db.Spec.KeeperUID)
}
}
sort.Sort(sort.StringSlice(keepersUIDs))
if !reflect.DeepEqual(synchronousStandbys, keepersUIDs) {
goto end
}
// get keepers for db status synchronousStandbys
keepersUIDs = []string{}
for _, dbUID := range masterDB.Status.SynchronousStandbys {
db, ok := cd.DBs[dbUID]
if ok {
keepersUIDs = append(keepersUIDs, db.Spec.KeeperUID)
}
}
sort.Sort(sort.StringSlice(keepersUIDs))
if !reflect.DeepEqual(synchronousStandbys, keepersUIDs) {
goto end
}
return nil
}
end:
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
func WaitClusterPhase(e *store.KVBackedStore, phase cluster.ClusterPhase, timeout time.Duration) error {
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
cd, _, err := e.GetClusterData(context.TODO())
if err != nil || cd == nil {
goto end
}
if cd.Cluster.Status.Phase == phase {
return nil
}
end:
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
func WaitNumDBs(e *store.KVBackedStore, n int, timeout time.Duration) error {
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
cd, _, err := e.GetClusterData(context.TODO())
if err != nil || cd == nil {
goto end
}
if len(cd.DBs) == n {
return nil
}
end:
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
func WaitStandbyKeeper(e *store.KVBackedStore, keeperUID string, timeout time.Duration) error {
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
cd, _, err := e.GetClusterData(context.TODO())
if err != nil || cd == nil {
goto end
}
for _, db := range cd.DBs {
if db.UID == cd.Cluster.Status.Master {
continue
}
if db.Spec.KeeperUID == keeperUID && db.Spec.Role == common.RoleStandby {
return nil
}
}
end:
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
func WaitClusterDataKeepers(keepersUIDs []string, e *store.KVBackedStore, timeout time.Duration) error {
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
cd, _, err := e.GetClusterData(context.TODO())
if err != nil || cd == nil {
goto end
}
if len(keepersUIDs) != len(cd.Keepers) {
goto end
}
// Check for db on keeper to be initialized
for _, keeper := range cd.Keepers {
if !util.StringInSlice(keepersUIDs, keeper.UID) {
goto end
}
}
return nil
end:
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
// WaitClusterSyncedXLogPos waits for all the specified keepers to have the same
// reported XLogPos and that it's >= than master XLogPos
func WaitClusterSyncedXLogPos(keepers []*TestKeeper, xLogPos uint64, e *store.KVBackedStore, timeout time.Duration) error {
keepersUIDs := []string{}
for _, sk := range keepers {
keepersUIDs = append(keepersUIDs, sk.uid)
}
// check that master and all the keepers XLogPos are the same and >=
// masterXLogPos
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
c := 0
curXLogPos := uint64(0)
cd, _, err := e.GetClusterData(context.TODO())
if err != nil || cd == nil {
goto end
}
// Check for db on keeper to be initialized
for _, keeper := range cd.Keepers {
if !util.StringInSlice(keepersUIDs, keeper.UID) {
continue
}
for _, db := range cd.DBs {
if db.Spec.KeeperUID == keeper.UID {
if db.Status.XLogPos < xLogPos {
goto end
}
if c == 0 {
curXLogPos = db.Status.XLogPos
} else {
if db.Status.XLogPos != curXLogPos {
goto end
}
}
}
}
c++
}
if c == len(keepersUIDs) {
return nil
}
end:
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
func WaitClusterDataEnabledProxiesNum(e *store.KVBackedStore, n int, timeout time.Duration) error {
// TODO(sgotti) find a way to retrieve the proxies internally generated uids
// and check for them instead of relying only on the number of proxies
start := time.Now()
for time.Now().Add(-timeout).Before(start) {
cd, _, err := e.GetClusterData(context.TODO())
if err != nil || cd == nil {
goto end
}
if len(cd.Proxy.Spec.EnabledProxies) == n {
return nil
}
end:
time.Sleep(sleepInterval)
}
return fmt.Errorf("timeout")
}
func testFreeTCPPort(port int) error {
ln, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", port))
if err != nil {
return err
}
ln.Close()
return nil
}
func testFreeUDPPort(port int) error {
ln, err := net.ListenPacket("udp", fmt.Sprintf("localhost:%d", port))
if err != nil {
return err
}
ln.Close()
return nil
}
// Hack to find a free tcp and udp port
func getFreePort(tcp bool, udp bool) (string, string, error) {
portMutex.Lock()
defer portMutex.Unlock()
if !tcp && !udp {
return "", "", fmt.Errorf("at least one of tcp or udp port shuld be required")
}
localhostIP, err := net.ResolveIPAddr("ip", "localhost")
if err != nil {
return "", "", fmt.Errorf("failed to resolve ip addr: %v", err)
}
for {
curPort++
if curPort > MaxPort {
return "", "", fmt.Errorf("all available ports to test have been exausted")
}
if tcp {
if err := testFreeTCPPort(curPort); err != nil {
continue
}
}
if udp {
if err := testFreeUDPPort(curPort); err != nil {
continue
}
}
return localhostIP.IP.String(), strconv.Itoa(curPort), nil
}
}
func writeClusterSpec(dir string, cs *cluster.ClusterSpec) (string, error) {
csj, err := json.Marshal(cs)
if err != nil {
return "", err
}
tmpFile, err := ioutil.TempFile(dir, "initial-cluster-spec.json")
if err != nil {
return "", err
}
defer tmpFile.Close()
if _, err := tmpFile.Write(csj); err != nil {
return "", err
}
return tmpFile.Name(), nil
}
| [
"\"DEBUG\"",
"\"STKEEPER_BIN\"",
"\"DEBUG\"",
"\"STSENTINEL_BIN\"",
"\"DEBUG\"",
"\"STPROXY_BIN\"",
"\"STCTL_BIN\"",
"\"STOLON_TEST_STORE_BACKEND\"",
"\"ETCD_BIN\"",
"\"CONSUL_BIN\""
]
| []
| [
"STOLON_TEST_STORE_BACKEND",
"CONSUL_BIN",
"STSENTINEL_BIN",
"STCTL_BIN",
"STPROXY_BIN",
"DEBUG",
"ETCD_BIN",
"STKEEPER_BIN"
]
| [] | ["STOLON_TEST_STORE_BACKEND", "CONSUL_BIN", "STSENTINEL_BIN", "STCTL_BIN", "STPROXY_BIN", "DEBUG", "ETCD_BIN", "STKEEPER_BIN"] | go | 8 | 0 | |
web/controllers/Controller.go | package controllers
import (
"html/template"
"net/http"
"github.com/gorilla/sessions"
"fmt"
"math/rand"
"reflect"
"strings"
"os"
)
var (
store = sessions.NewCookieStore([]byte(randSeq(10)))
letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
)
type flash struct {
Message string
Level string
}
func renderView(w http.ResponseWriter, r *http.Request, baseview string, view string, data interface{}) {
type d struct {
Data interface{}
Flash flash
Version string
}
t := template.Must(template.ParseFiles("./web/views/layouts/"+baseview, "./web/views/"+view))
t.ExecuteTemplate(w, "layout", d{
Data:data,
Flash:getFlash(w, r),
Version: os.Getenv("VERSION"),
})
}
func setFlash(message string, level string, w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "flash-session")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
session.AddFlash(message, level)
session.Save(r, w)
}
func getFlash(w http.ResponseWriter, r *http.Request) (message flash) {
session, err := store.Get(r, "flash-session")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
fm := session.Values
if fm == nil {
return
}
session.Save(r, w)
session.Options.MaxAge = -1
session.Save(r, w)
//return fmt.Sprintf("%v", fm[0])
keys := reflect.ValueOf(session.Values).MapKeys()
if len(keys) < 1 {
return
}
key := keys[0].Interface()
value := fmt.Sprintf("%s", session.Values[key])
value = strings.Replace(value, "[", "", -1)
value = strings.Replace(value, "]", "", -1)
return flash{
Message:value,
Level:fmt.Sprintf("%v", key),
}
}
func randSeq(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
| [
"\"VERSION\""
]
| []
| [
"VERSION"
]
| [] | ["VERSION"] | go | 1 | 0 | |
libcontainer/init_linux.go | package libcontainer
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"os"
"strings"
"unsafe"
"github.com/containerd/console"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
"github.com/opencontainers/runc/libcontainer/capabilities"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/runc/libcontainer/user"
"github.com/opencontainers/runc/libcontainer/utils"
)
type initType string
const (
initSetns initType = "setns"
initStandard initType = "standard"
)
type pid struct {
Pid int `json:"stage2_pid"`
PidFirstChild int `json:"stage1_pid"`
}
// network is an internal struct used to setup container networks.
type network struct {
configs.Network
// TempVethPeerName is a unique temporary veth peer name that was placed into
// the container's namespace.
TempVethPeerName string `json:"temp_veth_peer_name"`
}
// initConfig is used for transferring parameters from Exec() to Init()
type initConfig struct {
Args []string `json:"args"`
Env []string `json:"env"`
Cwd string `json:"cwd"`
Capabilities *configs.Capabilities `json:"capabilities"`
ProcessLabel string `json:"process_label"`
AppArmorProfile string `json:"apparmor_profile"`
NoNewPrivileges bool `json:"no_new_privileges"`
User string `json:"user"`
AdditionalGroups []string `json:"additional_groups"`
Config *configs.Config `json:"config"`
Networks []*network `json:"network"`
PassedFilesCount int `json:"passed_files_count"`
ContainerId string `json:"containerid"`
Rlimits []configs.Rlimit `json:"rlimits"`
CreateConsole bool `json:"create_console"`
ConsoleWidth uint16 `json:"console_width"`
ConsoleHeight uint16 `json:"console_height"`
RootlessEUID bool `json:"rootless_euid,omitempty"`
RootlessCgroups bool `json:"rootless_cgroups,omitempty"`
SpecState *specs.State `json:"spec_state,omitempty"`
Cgroup2Path string `json:"cgroup2_path,omitempty"`
}
type initer interface {
Init() error
}
func newContainerInit(t initType, pipe *os.File, consoleSocket *os.File, fifoFd, logFd int) (initer, error) {
var config *initConfig
if err := json.NewDecoder(pipe).Decode(&config); err != nil {
return nil, err
}
if err := populateProcessEnvironment(config.Env); err != nil {
return nil, err
}
switch t {
case initSetns:
return &linuxSetnsInit{
pipe: pipe,
consoleSocket: consoleSocket,
config: config,
logFd: logFd,
}, nil
case initStandard:
return &linuxStandardInit{
pipe: pipe,
consoleSocket: consoleSocket,
parentPid: unix.Getppid(),
config: config,
fifoFd: fifoFd,
logFd: logFd,
}, nil
}
return nil, fmt.Errorf("unknown init type %q", t)
}
// populateProcessEnvironment loads the provided environment variables into the
// current processes's environment.
func populateProcessEnvironment(env []string) error {
for _, pair := range env {
p := strings.SplitN(pair, "=", 2)
if len(p) < 2 {
return fmt.Errorf("invalid environment variable: %q", pair)
}
name, val := p[0], p[1]
if name == "" {
return fmt.Errorf("environment variable name can't be empty: %q", pair)
}
if strings.IndexByte(name, 0) >= 0 {
return fmt.Errorf("environment variable name can't contain null(\\x00): %q", pair)
}
if strings.IndexByte(val, 0) >= 0 {
return fmt.Errorf("environment variable value can't contain null(\\x00): %q", pair)
}
if err := os.Setenv(name, val); err != nil {
return err
}
}
return nil
}
// finalizeNamespace drops the caps, sets the correct user
// and working dir, and closes any leaked file descriptors
// before executing the command inside the namespace
func finalizeNamespace(config *initConfig) error {
// Ensure that all unwanted fds we may have accidentally
// inherited are marked close-on-exec so they stay out of the
// container
if err := utils.CloseExecFrom(config.PassedFilesCount + 3); err != nil {
return fmt.Errorf("error closing exec fds: %w", err)
}
// we only do chdir if it's specified
doChdir := config.Cwd != ""
if doChdir {
// First, attempt the chdir before setting up the user.
// This could allow us to access a directory that the user running runc can access
// but the container user cannot.
err := unix.Chdir(config.Cwd)
switch {
case err == nil:
doChdir = false
case os.IsPermission(err):
// If we hit an EPERM, we should attempt again after setting up user.
// This will allow us to successfully chdir if the container user has access
// to the directory, but the user running runc does not.
// This is useful in cases where the cwd is also a volume that's been chowned to the container user.
default:
return fmt.Errorf("chdir to cwd (%q) set in config.json failed: %w", config.Cwd, err)
}
}
caps := &configs.Capabilities{}
if config.Capabilities != nil {
caps = config.Capabilities
} else if config.Config.Capabilities != nil {
caps = config.Config.Capabilities
}
w, err := capabilities.New(caps)
if err != nil {
return err
}
// drop capabilities in bounding set before changing user
if err := w.ApplyBoundingSet(); err != nil {
return fmt.Errorf("unable to apply bounding set: %w", err)
}
// preserve existing capabilities while we change users
if err := system.SetKeepCaps(); err != nil {
return fmt.Errorf("unable to set keep caps: %w", err)
}
if err := setupUser(config); err != nil {
return fmt.Errorf("unable to setup user: %w", err)
}
// Change working directory AFTER the user has been set up, if we haven't done it yet.
if doChdir {
if err := unix.Chdir(config.Cwd); err != nil {
return fmt.Errorf("chdir to cwd (%q) set in config.json failed: %w", config.Cwd, err)
}
}
if err := system.ClearKeepCaps(); err != nil {
return fmt.Errorf("unable to clear keep caps: %w", err)
}
if err := w.ApplyCaps(); err != nil {
return fmt.Errorf("unable to apply caps: %w", err)
}
return nil
}
// setupConsole sets up the console from inside the container, and sends the
// master pty fd to the config.Pipe (using cmsg). This is done to ensure that
// consoles are scoped to a container properly (see runc#814 and the many
// issues related to that). This has to be run *after* we've pivoted to the new
// rootfs (and the users' configuration is entirely set up).
func setupConsole(socket *os.File, config *initConfig, mount bool) error {
defer socket.Close()
// At this point, /dev/ptmx points to something that we would expect. We
// used to change the owner of the slave path, but since the /dev/pts mount
// can have gid=X set (at the users' option). So touching the owner of the
// slave PTY is not necessary, as the kernel will handle that for us. Note
// however, that setupUser (specifically fixStdioPermissions) *will* change
// the UID owner of the console to be the user the process will run as (so
// they can actually control their console).
pty, slavePath, err := console.NewPty()
if err != nil {
return err
}
// After we return from here, we don't need the console anymore.
defer pty.Close()
if config.ConsoleHeight != 0 && config.ConsoleWidth != 0 {
err = pty.Resize(console.WinSize{
Height: config.ConsoleHeight,
Width: config.ConsoleWidth,
})
if err != nil {
return err
}
}
// Mount the console inside our rootfs.
if mount {
if err := mountConsole(slavePath); err != nil {
return err
}
}
// While we can access console.master, using the API is a good idea.
if err := utils.SendFd(socket, pty.Name(), pty.Fd()); err != nil {
return err
}
// Now, dup over all the things.
return dupStdio(slavePath)
}
// syncParentReady sends to the given pipe a JSON payload which indicates that
// the init is ready to Exec the child process. It then waits for the parent to
// indicate that it is cleared to Exec.
func syncParentReady(pipe io.ReadWriter) error {
// Tell parent.
if err := writeSync(pipe, procReady); err != nil {
return err
}
// Wait for parent to give the all-clear.
return readSync(pipe, procRun)
}
// syncParentHooks sends to the given pipe a JSON payload which indicates that
// the parent should execute pre-start hooks. It then waits for the parent to
// indicate that it is cleared to resume.
func syncParentHooks(pipe io.ReadWriter) error {
// Tell parent.
if err := writeSync(pipe, procHooks); err != nil {
return err
}
// Wait for parent to give the all-clear.
return readSync(pipe, procResume)
}
// syncParentSeccomp sends to the given pipe a JSON payload which
// indicates that the parent should pick up the seccomp fd with pidfd_getfd()
// and send it to the seccomp agent over a unix socket. It then waits for
// the parent to indicate that it is cleared to resume and closes the seccompFd.
// If the seccompFd is -1, there isn't anything to sync with the parent, so it
// returns no error.
func syncParentSeccomp(pipe io.ReadWriter, seccompFd int) error {
if seccompFd == -1 {
return nil
}
// Tell parent.
if err := writeSyncWithFd(pipe, procSeccomp, seccompFd); err != nil {
unix.Close(seccompFd)
return err
}
// Wait for parent to give the all-clear.
if err := readSync(pipe, procSeccompDone); err != nil {
unix.Close(seccompFd)
return fmt.Errorf("sync parent seccomp: %w", err)
}
if err := unix.Close(seccompFd); err != nil {
return fmt.Errorf("close seccomp fd: %w", err)
}
return nil
}
// setupUser changes the groups, gid, and uid for the user inside the container
func setupUser(config *initConfig) error {
// Set up defaults.
defaultExecUser := user.ExecUser{
Uid: 0,
Gid: 0,
Home: "/",
}
passwdPath, err := user.GetPasswdPath()
if err != nil {
return err
}
groupPath, err := user.GetGroupPath()
if err != nil {
return err
}
execUser, err := user.GetExecUserPath(config.User, &defaultExecUser, passwdPath, groupPath)
if err != nil {
return err
}
var addGroups []int
if len(config.AdditionalGroups) > 0 {
addGroups, err = user.GetAdditionalGroupsPath(config.AdditionalGroups, groupPath)
if err != nil {
return err
}
}
// Rather than just erroring out later in setuid(2) and setgid(2), check
// that the user is mapped here.
if _, err := config.Config.HostUID(execUser.Uid); err != nil {
return errors.New("cannot set uid to unmapped user in user namespace")
}
if _, err := config.Config.HostGID(execUser.Gid); err != nil {
return errors.New("cannot set gid to unmapped user in user namespace")
}
if config.RootlessEUID {
// We cannot set any additional groups in a rootless container and thus
// we bail if the user asked us to do so. TODO: We currently can't do
// this check earlier, but if libcontainer.Process.User was typesafe
// this might work.
if len(addGroups) > 0 {
return errors.New("cannot set any additional groups in a rootless container")
}
}
// Before we change to the container's user make sure that the processes
// STDIO is correctly owned by the user that we are switching to.
if err := fixStdioPermissions(config, execUser); err != nil {
return err
}
setgroups, err := os.ReadFile("/proc/self/setgroups")
if err != nil && !os.IsNotExist(err) {
return err
}
// This isn't allowed in an unprivileged user namespace since Linux 3.19.
// There's nothing we can do about /etc/group entries, so we silently
// ignore setting groups here (since the user didn't explicitly ask us to
// set the group).
allowSupGroups := !config.RootlessEUID && string(bytes.TrimSpace(setgroups)) != "deny"
if allowSupGroups {
suppGroups := append(execUser.Sgids, addGroups...)
if err := unix.Setgroups(suppGroups); err != nil {
return err
}
}
if err := system.Setgid(execUser.Gid); err != nil {
return err
}
if err := system.Setuid(execUser.Uid); err != nil {
return err
}
// if we didn't get HOME already, set it based on the user's HOME
if envHome := os.Getenv("HOME"); envHome == "" {
if err := os.Setenv("HOME", execUser.Home); err != nil {
return err
}
}
return nil
}
// fixStdioPermissions fixes the permissions of PID 1's STDIO within the container to the specified user.
// The ownership needs to match because it is created outside of the container and needs to be
// localized.
func fixStdioPermissions(config *initConfig, u *user.ExecUser) error {
var null unix.Stat_t
if err := unix.Stat("/dev/null", &null); err != nil {
return err
}
for _, fd := range []uintptr{
os.Stdin.Fd(),
os.Stderr.Fd(),
os.Stdout.Fd(),
} {
var s unix.Stat_t
if err := unix.Fstat(int(fd), &s); err != nil {
return err
}
// Skip chown of /dev/null if it was used as one of the STDIO fds.
if s.Rdev == null.Rdev {
continue
}
// We only change the uid owner (as it is possible for the mount to
// prefer a different gid, and there's no reason for us to change it).
// The reason why we don't just leave the default uid=X mount setup is
// that users expect to be able to actually use their console. Without
// this code, you couldn't effectively run as a non-root user inside a
// container and also have a console set up.
if err := unix.Fchown(int(fd), u.Uid, int(s.Gid)); err != nil {
// If we've hit an EINVAL then s.Gid isn't mapped in the user
// namespace. If we've hit an EPERM then the inode's current owner
// is not mapped in our user namespace (in particular,
// privileged_wrt_inode_uidgid() has failed). In either case, we
// are in a configuration where it's better for us to just not
// touch the stdio rather than bail at this point.
// nolint:errorlint // unix errors are bare
if err == unix.EINVAL || err == unix.EPERM {
continue
}
return err
}
}
return nil
}
// setupNetwork sets up and initializes any network interface inside the container.
func setupNetwork(config *initConfig) error {
for _, config := range config.Networks {
strategy, err := getStrategy(config.Type)
if err != nil {
return err
}
if err := strategy.initialize(config); err != nil {
return err
}
}
return nil
}
func setupRoute(config *configs.Config) error {
for _, config := range config.Routes {
_, dst, err := net.ParseCIDR(config.Destination)
if err != nil {
return err
}
src := net.ParseIP(config.Source)
if src == nil {
return fmt.Errorf("Invalid source for route: %s", config.Source)
}
gw := net.ParseIP(config.Gateway)
if gw == nil {
return fmt.Errorf("Invalid gateway for route: %s", config.Gateway)
}
l, err := netlink.LinkByName(config.InterfaceName)
if err != nil {
return err
}
route := &netlink.Route{
Scope: netlink.SCOPE_UNIVERSE,
Dst: dst,
Src: src,
Gw: gw,
LinkIndex: l.Attrs().Index,
}
if err := netlink.RouteAdd(route); err != nil {
return err
}
}
return nil
}
func setupRlimits(limits []configs.Rlimit, pid int) error {
for _, rlimit := range limits {
if err := system.Prlimit(pid, rlimit.Type, unix.Rlimit{Max: rlimit.Hard, Cur: rlimit.Soft}); err != nil {
return fmt.Errorf("error setting rlimit type %v: %w", rlimit.Type, err)
}
}
return nil
}
const _P_PID = 1
//nolint:structcheck,unused
type siginfo struct {
si_signo int32
si_errno int32
si_code int32
// below here is a union; si_pid is the only field we use
si_pid int32
// Pad to 128 bytes as detailed in blockUntilWaitable
pad [96]byte
}
// isWaitable returns true if the process has exited false otherwise.
// Its based off blockUntilWaitable in src/os/wait_waitid.go
func isWaitable(pid int) (bool, error) {
si := &siginfo{}
_, _, e := unix.Syscall6(unix.SYS_WAITID, _P_PID, uintptr(pid), uintptr(unsafe.Pointer(si)), unix.WEXITED|unix.WNOWAIT|unix.WNOHANG, 0, 0)
if e != 0 {
return false, os.NewSyscallError("waitid", e)
}
return si.si_pid != 0, nil
}
// signalAllProcesses freezes then iterates over all the processes inside the
// manager's cgroups sending the signal s to them.
// If s is SIGKILL then it will wait for each process to exit.
// For all other signals it will check if the process is ready to report its
// exit status and only if it is will a wait be performed.
func signalAllProcesses(m cgroups.Manager, s os.Signal) error {
var procs []*os.Process
if err := m.Freeze(configs.Frozen); err != nil {
logrus.Warn(err)
}
pids, err := m.GetAllPids()
if err != nil {
if err := m.Freeze(configs.Thawed); err != nil {
logrus.Warn(err)
}
return err
}
for _, pid := range pids {
p, err := os.FindProcess(pid)
if err != nil {
logrus.Warn(err)
continue
}
procs = append(procs, p)
if err := p.Signal(s); err != nil {
logrus.Warn(err)
}
}
if err := m.Freeze(configs.Thawed); err != nil {
logrus.Warn(err)
}
subreaper, err := system.GetSubreaper()
if err != nil {
// The error here means that PR_GET_CHILD_SUBREAPER is not
// supported because this code might run on a kernel older
// than 3.4. We don't want to throw an error in that case,
// and we simplify things, considering there is no subreaper
// set.
subreaper = 0
}
for _, p := range procs {
if s != unix.SIGKILL {
if ok, err := isWaitable(p.Pid); err != nil {
if !errors.Is(err, unix.ECHILD) {
logrus.Warn("signalAllProcesses: ", p.Pid, err)
}
continue
} else if !ok {
// Not ready to report so don't wait
continue
}
}
// In case a subreaper has been setup, this code must not
// wait for the process. Otherwise, we cannot be sure the
// current process will be reaped by the subreaper, while
// the subreaper might be waiting for this process in order
// to retrieve its exit code.
if subreaper == 0 {
if _, err := p.Wait(); err != nil {
if !errors.Is(err, unix.ECHILD) {
logrus.Warn("wait: ", err)
}
}
}
}
return nil
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
main.py | import os
import requests
import re
from dotenv import load_dotenv
import argparse
def expand_shotlink(token, bitlink_id):
headers = {
'Authorization': 'Bearer {}'.format(token)
}
data = {
'bitlink_id': bitlink_id,
}
url = 'https://api-ssl.bitly.com/v4/expand'
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()['link']
def get_shotlink(token, long_url):
headers = {
'Authorization': 'Bearer {}'.format(token)
}
data = {
'long_url': long_url,
}
url = 'https://api-ssl.bitly.com/v4/bitlinks'
response = requests.post(url, headers=headers, json=data)
if response.status_code == 400:
return False
response.raise_for_status()
return response.json()['link']
def get_clicks(token, bitlink):
headers = {
'Authorization': 'Bearer {}'.format(token)
}
data = {
'unit': 'month',
'units': -1
}
url = 'https://api-ssl.bitly.com/v4//bitlinks/{}/clicks'.format(bitlink)
response = requests.get(url, headers=headers, data=data)
if response.status_code in (403, 404):
return False
response.raise_for_status()
total = sum(x['clicks'] for x in response.json()['link_clicks'])
return total
def get_clicks_or_shortlink(token, link):
short_link = re.sub(r"https?://(.*)", r'\g<1>', link).strip().strip('/')
clicks = get_clicks(token, short_link)
if clicks is not False:
return ('Количество переходов по ссылке битли', clicks)
short_link = get_shotlink(token, link)
if short_link:
return ('', short_link)
return ('', False)
def main():
load_dotenv()
parser = argparse.ArgumentParser(
description='Получает короткую ссылку или количество кликов'
)
parser.add_argument('link', help='Ссылка')
args = parser.parse_args()
token = os.getenv("TOKEN")
link = args.link
(result_type, result) = get_clicks_or_shortlink(token, link)
print("{}: {}".format(result_type, result) if result else "Некоректная ссылка")
if __name__ == '__main__':
main()
| []
| []
| [
"TOKEN"
]
| [] | ["TOKEN"] | python | 1 | 0 | |
appengine/swarming/swarming_bot/client/isolateserver.py | #!/usr/bin/env python
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Archives a set of files or directories to an Isolate Server."""
from __future__ import print_function
import collections
import errno
import functools
import logging
import optparse
import os
import re
import signal
import stat
import sys
import tarfile
import threading
import time
import zlib
from utils import net
from utils import tools
tools.force_local_third_party()
# third_party/
import colorama
from depot_tools import fix_encoding
from depot_tools import subcommand
import six
from six.moves import queue as Queue
# pylint: disable=ungrouped-imports
import auth
import isolated_format
import isolate_storage
import local_caching
from utils import file_path
from utils import fs
from utils import logging_utils
from utils import net
from utils import on_error
from utils import subprocess42
from utils import threading_utils
__version__ = '0.9.0'
# Version of isolate protocol passed to the server in /handshake request.
ISOLATE_PROTOCOL_VERSION = '1.0'
# Maximum expected delay (in seconds) between successive file fetches or uploads
# in Storage. If it takes longer than that, a deadlock might be happening
# and all stack frames for all threads are dumped to log.
DEADLOCK_TIMEOUT = 5 * 60
# The number of files to check the isolate server per /pre-upload query.
# All files are sorted by likelihood of a change in the file content
# (currently file size is used to estimate this: larger the file -> larger the
# possibility it has changed). Then first ITEMS_PER_CONTAINS_QUERIES[0] files
# are taken and send to '/pre-upload', then next ITEMS_PER_CONTAINS_QUERIES[1],
# and so on. Numbers here is a trade-off; the more per request, the lower the
# effect of HTTP round trip latency and TCP-level chattiness. On the other hand,
# larger values cause longer lookups, increasing the initial latency to start
# uploading, which is especially an issue for large files. This value is
# optimized for the "few thousands files to look up with minimal number of large
# files missing" case.
ITEMS_PER_CONTAINS_QUERIES = (20, 20, 50, 50, 50, 100)
# A list of already compressed extension types that should not receive any
# compression before being uploaded.
ALREADY_COMPRESSED_TYPES = [
'7z',
'avi',
'cur',
'gif',
'h264',
'jar',
'jpeg',
'jpg',
'mp4',
'pdf',
'png',
'wav',
'zip',
]
# The delay (in seconds) to wait between logging statements when retrieving the
# required files. This is intended to let the user know that the program is
# still running.
DELAY_BETWEEN_UPDATES_IN_SECS = 30
DEFAULT_DENYLIST = (
# Temporary vim or python files.
r'^.+\.(?:pyc|swp)$',
# .git or .svn directory.
r'^(?:.+' + re.escape(os.path.sep) + r'|)\.(?:git|svn)$',
)
class Error(Exception):
"""Generic runtime error."""
pass
class Aborted(Error):
"""Operation aborted."""
pass
class AlreadyExists(Error):
"""File already exists."""
def file_read(path, chunk_size=isolated_format.DISK_FILE_CHUNK, offset=0):
"""Yields file content in chunks of |chunk_size| starting from |offset|."""
with fs.open(path, 'rb') as f:
if offset:
f.seek(offset)
while True:
data = f.read(chunk_size)
if not data:
break
yield data
def fileobj_path(fileobj):
"""Return file system path for file like object or None.
The returned path is guaranteed to exist and can be passed to file system
operations like copy.
"""
name = getattr(fileobj, 'name', None)
if name is None:
return None
# If the file like object was created using something like open("test.txt")
# name will end up being a str (such as a function outside our control, like
# the standard library). We want all our paths to be unicode objects, so we
# decode it.
if not isinstance(name, six.text_type):
# We incorrectly assume that UTF-8 is used everywhere.
name = name.decode('utf-8')
# fs.exists requires an absolute path, otherwise it will fail with an
# assertion error.
if not os.path.isabs(name):
return None
if fs.exists(name):
return name
return None
# TODO(tansell): Replace fileobj_copy with shutil.copyfileobj once proper file
# wrappers have been created.
def fileobj_copy(
dstfileobj, srcfileobj, size=-1,
chunk_size=isolated_format.DISK_FILE_CHUNK):
"""Copy data from srcfileobj to dstfileobj.
Providing size means exactly that amount of data will be copied (if there
isn't enough data, an IOError exception is thrown). Otherwise all data until
the EOF marker will be copied.
"""
if size == -1 and hasattr(srcfileobj, 'tell'):
if srcfileobj.tell() != 0:
raise IOError('partial file but not using size')
written = 0
while written != size:
readsize = chunk_size
if size > 0:
readsize = min(readsize, size-written)
data = srcfileobj.read(readsize)
if not data:
if size == -1:
break
raise IOError('partial file, got %s, wanted %s' % (written, size))
dstfileobj.write(data)
written += len(data)
def putfile(srcfileobj, dstpath, file_mode=None, size=-1, use_symlink=False):
"""Put srcfileobj at the given dstpath with given mode.
The function aims to do this as efficiently as possible while still allowing
any possible file like object be given.
Creating a tree of hardlinks has a few drawbacks:
- tmpfs cannot be used for the scratch space. The tree has to be on the same
partition as the cache.
- involves a write to the inode, which advances ctime, cause a metadata
writeback (causing disk seeking).
- cache ctime cannot be used to detect modifications / corruption.
- Some file systems (NTFS) have a 64k limit on the number of hardlink per
partition. This is why the function automatically fallbacks to copying the
file content.
- /proc/sys/fs/protected_hardlinks causes an additional check to ensure the
same owner is for all hardlinks.
- Anecdotal report that ext2 is known to be potentially faulty on high rate
of hardlink creation.
Creating a tree of symlinks has a few drawbacks:
- Tasks running the equivalent of os.path.realpath() will get the naked path
and may fail.
- Windows:
- Symlinks are reparse points:
https://msdn.microsoft.com/library/windows/desktop/aa365460.aspx
https://msdn.microsoft.com/library/windows/desktop/aa363940.aspx
- Symbolic links are Win32 paths, not NT paths.
https://googleprojectzero.blogspot.com/2016/02/the-definitive-guide-on-win32-to-nt.html
- Symbolic links are supported on Windows 7 and later only.
- SeCreateSymbolicLinkPrivilege is needed, which is not present by
default.
- SeCreateSymbolicLinkPrivilege is *stripped off* by UAC when a restricted
RID is present in the token;
https://msdn.microsoft.com/en-us/library/bb530410.aspx
"""
srcpath = fileobj_path(srcfileobj)
if srcpath and size == -1:
readonly = file_mode is None or (
file_mode & (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH))
if readonly:
# If the file is read only we can link the file
if use_symlink:
link_mode = file_path.SYMLINK_WITH_FALLBACK
else:
link_mode = file_path.HARDLINK_WITH_FALLBACK
else:
# If not read only, we must copy the file
link_mode = file_path.COPY
file_path.link_file(dstpath, srcpath, link_mode)
assert fs.exists(dstpath)
else:
# Need to write out the file
with fs.open(dstpath, 'wb') as dstfileobj:
fileobj_copy(dstfileobj, srcfileobj, size)
if sys.platform == 'win32' and file_mode and file_mode & stat.S_IWRITE:
# On windows, mode other than removing stat.S_IWRITE is ignored. Returns
# early to skip slow/unnecessary chmod call.
return
# file_mode of 0 is actually valid, so need explicit check.
if file_mode is not None:
fs.chmod(dstpath, file_mode)
def zip_compress(content_generator, level=7):
"""Reads chunks from |content_generator| and yields zip compressed chunks."""
compressor = zlib.compressobj(level)
for chunk in content_generator:
compressed = compressor.compress(chunk)
if compressed:
yield compressed
tail = compressor.flush(zlib.Z_FINISH)
if tail:
yield tail
def zip_decompress(
content_generator, chunk_size=isolated_format.DISK_FILE_CHUNK):
"""Reads zipped data from |content_generator| and yields decompressed data.
Decompresses data in small chunks (no larger than |chunk_size|) so that
zip bomb file doesn't cause zlib to preallocate huge amount of memory.
Raises IOError if data is corrupted or incomplete.
"""
decompressor = zlib.decompressobj()
compressed_size = 0
try:
for chunk in content_generator:
compressed_size += len(chunk)
data = decompressor.decompress(chunk, chunk_size)
if data:
yield data
while decompressor.unconsumed_tail:
data = decompressor.decompress(decompressor.unconsumed_tail, chunk_size)
if data:
yield data
tail = decompressor.flush()
if tail:
yield tail
except zlib.error as e:
raise IOError(
'Corrupted zip stream (read %d bytes) - %s' % (compressed_size, e))
# Ensure all data was read and decompressed.
if decompressor.unused_data or decompressor.unconsumed_tail:
raise IOError('Not all data was decompressed')
def _get_zip_compression_level(filename):
"""Given a filename calculates the ideal zip compression level to use."""
file_ext = os.path.splitext(filename)[1].lower()
# TODO(csharp): Profile to find what compression level works best.
return 0 if file_ext in ALREADY_COMPRESSED_TYPES else 7
def create_directories(base_directory, files):
"""Creates the directory structure needed by the given list of files."""
logging.debug('create_directories(%s, %d)', base_directory, len(files))
# Creates the tree of directories to create.
directories = set(os.path.dirname(f) for f in files)
for item in list(directories):
while item:
directories.add(item)
item = os.path.dirname(item)
for d in sorted(directories):
if d:
abs_d = os.path.join(base_directory, d)
if not fs.isdir(abs_d):
fs.mkdir(abs_d)
def _create_symlinks(base_directory, files):
"""Creates any symlinks needed by the given set of files."""
for filepath, properties in files:
if 'l' not in properties:
continue
if sys.platform == 'win32':
# TODO(maruel): Create symlink via the win32 api.
logging.warning('Ignoring symlink %s', filepath)
continue
outfile = os.path.join(base_directory, filepath)
try:
os.symlink(properties['l'], outfile) # pylint: disable=E1101
except OSError as e:
if e.errno == errno.EEXIST:
raise AlreadyExists('File %s already exists.' % outfile)
raise
class _ThreadFile(object):
"""Multithreaded fake file. Used by TarBundle."""
def __init__(self):
self._data = threading_utils.TaskChannel()
self._offset = 0
def __iter__(self):
return self._data
def tell(self):
return self._offset
def write(self, b):
self._data.send_result(b)
self._offset += len(b)
def close(self):
self._data.send_done()
class FileItem(isolate_storage.Item):
"""A file to push to Storage.
Its digest and size may be provided in advance, if known. Otherwise they will
be derived from the file content.
"""
def __init__(self, path, algo, digest=None, size=None, high_priority=False):
super(FileItem, self).__init__(
digest,
size if size is not None else fs.stat(path).st_size,
high_priority,
compression_level=_get_zip_compression_level(path))
self._path = path
self._algo = algo
self._meta = None
@property
def path(self):
return self._path
@property
def algo(self):
return self._algo
@property
def digest(self):
if not self._digest:
self._digest = isolated_format.hash_file(self._path, self._algo)
return self._digest
@property
def meta(self):
if not self._meta:
# TODO(maruel): Inline.
self._meta = isolated_format.file_to_metadata(self.path, False)
# We need to hash right away.
self._meta['h'] = self.digest
return self._meta
def content(self):
return file_read(self.path)
class TarBundle(isolate_storage.Item):
"""Tarfile to push to Storage.
Its digest is the digest of all the files it contains. It is generated on the
fly.
"""
def __init__(self, root, algo):
# 2 trailing 512 bytes headers.
super(TarBundle, self).__init__(size=1024)
self._items = []
self._meta = None
self._algo = algo
self._root_len = len(root) + 1
# Same value as for Go.
# https://chromium.googlesource.com/infra/luci/luci-go.git/+/master/client/archiver/tar_archiver.go
# https://chromium.googlesource.com/infra/luci/luci-go.git/+/master/client/archiver/upload_tracker.go
self._archive_max_size = int(10e6)
@property
def digest(self):
if not self._digest:
self._prepare()
return self._digest
@property
def size(self):
if self._size is None:
self._prepare()
return self._size
def try_add(self, item):
"""Try to add this file to the bundle.
It is extremely naive but this should be just enough for
https://crbug.com/825418.
Future improvements should be in the Go code, and the Swarming bot should be
migrated to use the Go code instead.
"""
if not item.size:
return False
# pylint: disable=unreachable
rounded = (item.size + 512) & ~511
if rounded + self._size > self._archive_max_size:
return False
# https://crbug.com/825418
return False
self._size += rounded
self._items.append(item)
return True
def yield_item_path_meta(self):
"""Returns a tuple(Item, filepath, meta_dict).
If the bundle contains less than 5 items, the items are yielded.
"""
if len(self._items) < 5:
# The tarball is too small, yield individual items, if any.
for item in self._items:
yield item, item.path[self._root_len:], item.meta
else:
# This ensures self._meta is set.
p = self.digest + '.tar'
# Yield itself as a tarball.
yield self, p, self._meta
def content(self):
"""Generates the tarfile content on the fly."""
obj = _ThreadFile()
def _tar_thread():
try:
t = tarfile.open(
fileobj=obj, mode='w', format=tarfile.PAX_FORMAT, encoding='utf-8')
for item in self._items:
logging.info(' tarring %s', item.path)
t.add(item.path)
t.close()
except Exception:
logging.exception('Internal failure')
finally:
obj.close()
t = threading.Thread(target=_tar_thread)
t.start()
try:
for data in obj:
yield data
finally:
t.join()
def _prepare(self):
h = self._algo()
total = 0
for chunk in self.content():
h.update(chunk)
total += len(chunk)
# pylint: disable=attribute-defined-outside-init
# This is not true, they are defined in Item.__init__().
self._digest = h.hexdigest()
self._size = total
self._meta = {
'h': self.digest,
's': self.size,
't': u'tar',
}
class BufferItem(isolate_storage.Item):
"""A byte buffer to push to Storage."""
def __init__(self, buf, algo, high_priority=False):
super(BufferItem, self).__init__(
digest=algo(buf).hexdigest(),
size=len(buf),
high_priority=high_priority)
self._buffer = buf
def content(self):
return [self._buffer]
class Storage(object):
"""Efficiently downloads or uploads large set of files via StorageApi.
Implements compression support, parallel 'contains' checks, parallel uploads
and more.
Works only within single namespace (and thus hashing algorithm and compression
scheme are fixed).
Spawns multiple internal threads. Thread safe, but not fork safe. Modifies
signal handlers table to handle Ctrl+C.
"""
def __init__(self, storage_api):
self._storage_api = storage_api
self._cpu_thread_pool = None
self._net_thread_pool = None
self._aborted = False
self._prev_sig_handlers = {}
@property
def server_ref(self):
"""Shortcut to get the server_ref from storage_api.
This can be used to get the underlying hash_algo.
"""
return self._storage_api.server_ref
@property
def cpu_thread_pool(self):
"""ThreadPool for CPU-bound tasks like zipping."""
if self._cpu_thread_pool is None:
threads = max(threading_utils.num_processors(), 2)
max_size = long(2)**32 if sys.version_info.major == 2 else 2**32
if sys.maxsize <= max_size:
# On 32 bits userland, do not try to use more than 16 threads.
threads = min(threads, 16)
self._cpu_thread_pool = threading_utils.ThreadPool(2, threads, 0, 'zip')
return self._cpu_thread_pool
@property
def net_thread_pool(self):
"""AutoRetryThreadPool for IO-bound tasks, retries IOError."""
if self._net_thread_pool is None:
self._net_thread_pool = threading_utils.IOAutoRetryThreadPool()
return self._net_thread_pool
def close(self):
"""Waits for all pending tasks to finish."""
logging.info('Waiting for all threads to die...')
if self._cpu_thread_pool:
self._cpu_thread_pool.join()
self._cpu_thread_pool.close()
self._cpu_thread_pool = None
if self._net_thread_pool:
self._net_thread_pool.join()
self._net_thread_pool.close()
self._net_thread_pool = None
logging.info('Done.')
def abort(self):
"""Cancels any pending or future operations."""
# This is not strictly theadsafe, but in the worst case the logging message
# will be printed twice. Not a big deal. In other places it is assumed that
# unprotected reads and writes to _aborted are serializable (it is true
# for python) and thus no locking is used.
if not self._aborted:
logging.warning('Aborting... It can take a while.')
self._aborted = True
def __enter__(self):
"""Context manager interface."""
assert not self._prev_sig_handlers, self._prev_sig_handlers
for s in (signal.SIGINT, signal.SIGTERM):
self._prev_sig_handlers[s] = signal.signal(s, lambda *_args: self.abort())
return self
def __exit__(self, _exc_type, _exc_value, _traceback):
"""Context manager interface."""
self.close()
while self._prev_sig_handlers:
s, h = self._prev_sig_handlers.popitem()
signal.signal(s, h)
return False
def upload_items(self, items, verify_push=False):
"""Uploads a generator of Item to the isolate server.
It figures out what items are missing from the server and uploads only them.
It uses 3 threads internally:
- One to create batches based on a timeout
- One to dispatch the /contains RPC and field the missing entries
- One to field the /push RPC
The main threads enumerates 'items' and pushes to the first thread. Then it
join() all the threads, waiting for them to complete.
(enumerate items of Item, this can be slow as disk is traversed)
|
v
_create_items_batches_thread Thread #1
(generates list(Item), every 3s or 20~100 items)
|
v
_do_lookups_thread Thread #2
| |
v v
(missing) (was on server)
|
v
_handle_missing_thread Thread #3
|
v
(upload Item, append to uploaded)
Arguments:
items: list of isolate_storage.Item instances that represents data to
upload.
verify_push: verify files are uploaded correctly by fetching from server.
Returns:
List of items that were uploaded. All other items are already there.
Raises:
The first exception being raised in the worker threads.
"""
incoming = Queue.Queue()
batches_to_lookup = Queue.Queue()
missing = Queue.Queue()
uploaded = []
exc_channel = threading_utils.TaskChannel()
def _create_items_batches_thread():
"""Creates batches for /contains RPC lookup from individual items.
Input: incoming
Output: batches_to_lookup
"""
try:
batch_size_index = 0
batch_size = ITEMS_PER_CONTAINS_QUERIES[batch_size_index]
batch = []
while not self._aborted:
try:
item = incoming.get(True, timeout=3)
if item:
batch.append(item)
except Queue.Empty:
item = False
if len(batch) == batch_size or (not item and batch):
if len(batch) == batch_size:
batch_size_index += 1
batch_size = ITEMS_PER_CONTAINS_QUERIES[
min(batch_size_index, len(ITEMS_PER_CONTAINS_QUERIES)-1)]
batches_to_lookup.put(batch)
batch = []
if item is None:
break
except Exception:
exc_channel.send_exception()
finally:
# Unblock the next pipeline.
batches_to_lookup.put(None)
def _do_lookups_thread():
"""Enqueues all the /contains RPCs and emits the missing items.
Input: batches_to_lookup
Output: missing, to_upload
"""
try:
channel = threading_utils.TaskChannel()
def _contains(b):
if self._aborted:
raise Aborted()
return self._storage_api.contains(b)
pending_contains = 0
while not self._aborted:
batch = batches_to_lookup.get()
if batch is None:
break
self.net_thread_pool.add_task_with_channel(
channel, threading_utils.PRIORITY_HIGH, _contains, batch)
pending_contains += 1
while pending_contains and not self._aborted:
try:
v = channel.next(timeout=0)
except threading_utils.TaskChannel.Timeout:
break
pending_contains -= 1
for missing_item, push_state in v.items():
missing.put((missing_item, push_state))
while pending_contains and not self._aborted:
for missing_item, push_state in channel.next().items():
missing.put((missing_item, push_state))
pending_contains -= 1
except Exception:
exc_channel.send_exception()
finally:
# Unblock the next pipeline.
missing.put((None, None))
def _handle_missing_thread():
"""Sends the missing items to the uploader.
Input: missing
Output: uploaded
"""
try:
with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT) as detector:
channel = threading_utils.TaskChannel()
pending_upload = 0
while not self._aborted:
try:
missing_item, push_state = missing.get(True, timeout=5)
if missing_item is None:
break
self._async_push(channel, missing_item, push_state, verify_push)
pending_upload += 1
except Queue.Empty:
pass
detector.ping()
while not self._aborted and pending_upload:
try:
item = channel.next(timeout=0)
except threading_utils.TaskChannel.Timeout:
break
uploaded.append(item)
pending_upload -= 1
logging.debug('Uploaded %d; %d pending: %s (%d)', len(uploaded),
pending_upload, item.digest, item.size)
while not self._aborted and pending_upload:
item = channel.next()
uploaded.append(item)
pending_upload -= 1
logging.debug(
'Uploaded %d; %d pending: %s (%d)',
len(uploaded), pending_upload, item.digest, item.size)
except Exception:
exc_channel.send_exception()
threads = [
threading.Thread(target=_create_items_batches_thread),
threading.Thread(target=_do_lookups_thread),
threading.Thread(target=_handle_missing_thread),
]
for t in threads:
t.start()
try:
# For each digest keep only first isolate_storage.Item that matches it.
# All other items are just indistinguishable copies from the point of view
# of isolate server (it doesn't care about paths at all, only content and
# digests).
seen = {}
try:
# TODO(maruel): Reorder the items as a priority queue, with larger items
# being processed first. This is, before hashing the data.
# This must be done in the primary thread since items can be a
# generator.
for item in items:
if seen.setdefault(item.digest, item) is item:
incoming.put(item)
finally:
incoming.put(None)
finally:
for t in threads:
t.join()
exc_channel.send_done()
for _ in exc_channel:
# If there is no exception, this loop does nothing. Otherwise, it raises
# the first exception put onto |exc_channel|.
pass
logging.info('All %s files are uploaded', len(uploaded))
if seen:
_print_upload_stats(seen.values(), uploaded)
return uploaded
def _async_push(self, channel, item, push_state, verify_push=False):
"""Starts asynchronous push to the server in a parallel thread.
Can be used only after |item| was checked for presence on a server with a
/contains RPC.
Arguments:
channel: TaskChannel that receives back |item| when upload ends.
item: item to upload as instance of isolate_storage.Item class.
push_state: push state returned by storage_api.contains(). It contains
storage specific information describing how to upload the item (for
example in case of cloud storage, it is signed upload URLs).
verify_push: verify files are uploaded correctly by fetching from server.
Returns:
None, but |channel| later receives back |item| when upload ends.
"""
# Thread pool task priority.
priority = (
threading_utils.PRIORITY_HIGH if item.high_priority
else threading_utils.PRIORITY_MED)
def _push(content):
"""Pushes an isolate_storage.Item and returns it to |channel|."""
if self._aborted:
raise Aborted()
self._storage_api.push(item, push_state, content)
if verify_push:
try:
self._fetch(
item.digest,
item.size,
# this consumes all elements from given generator.
lambda gen: collections.deque(gen, maxlen=0))
except Exception:
# reset push_state if failed to verify.
push_state.finalized = False
push_state.uploaded = False
raise
return item
# If zipping is not required, just start a push task. Don't pass 'content'
# so that it can create a new generator when it retries on failures.
if not self.server_ref.is_with_compression:
self.net_thread_pool.add_task_with_channel(channel, priority, _push, None)
return
# If zipping is enabled, zip in a separate thread.
def zip_and_push():
# TODO(vadimsh): Implement streaming uploads. Before it's done, assemble
# content right here. It will block until all file is zipped.
try:
if self._aborted:
raise Aborted()
stream = zip_compress(item.content(), item.compression_level)
# In Python3, zlib.compress returns a byte object instead of str.
data = six.b('').join(stream)
except Exception as exc:
logging.error('Failed to zip \'%s\': %s', item, exc)
channel.send_exception()
return
# Pass '[data]' explicitly because the compressed data is not same as the
# one provided by 'item'. Since '[data]' is a list, it can safely be
# reused during retries.
self.net_thread_pool.add_task_with_channel(
channel, priority, _push, [data])
self.cpu_thread_pool.add_task(priority, zip_and_push)
def push(self, item, push_state):
"""Synchronously pushes a single item to the server.
If you need to push many items at once, consider using 'upload_items' or
'_async_push' with instance of TaskChannel.
Arguments:
item: item to upload as instance of isolate_storage.Item class.
push_state: push state returned by storage_api.contains(). It contains
storage specific information describing how to upload the item (for
example in case of cloud storage, it is signed upload URLs).
Returns:
Pushed item (same object as |item|).
"""
channel = threading_utils.TaskChannel()
with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT):
self._async_push(channel, item, push_state)
pushed = channel.next()
assert pushed is item
return item
def _fetch(self, digest, size, sink):
try:
# Prepare reading pipeline.
stream = self._storage_api.fetch(digest, size, 0)
if self.server_ref.is_with_compression:
stream = zip_decompress(stream, isolated_format.DISK_FILE_CHUNK)
# Run |stream| through verifier that will assert its size.
verifier = FetchStreamVerifier(stream, self.server_ref.hash_algo, digest,
size)
# Verified stream goes to |sink|.
sink(verifier.run())
except Exception:
logging.exception('Failed to fetch %s', digest)
raise
def async_fetch(self, channel, priority, digest, size, sink):
"""Starts asynchronous fetch from the server in a parallel thread.
Arguments:
channel: TaskChannel that receives back |digest| when download ends.
priority: thread pool task priority for the fetch.
digest: hex digest of an item to download.
size: expected size of the item (after decompression).
sink: function that will be called as sink(generator).
"""
def fetch():
self._fetch(digest, size, sink)
return digest
# Don't bother with zip_thread_pool for decompression. Decompression is
# really fast and most probably IO bound anyway.
self.net_thread_pool.add_task_with_channel(channel, priority, fetch)
class FetchQueue(object):
"""Fetches items from Storage and places them into ContentAddressedCache.
It manages multiple concurrent fetch operations. Acts as a bridge between
Storage and ContentAddressedCache so that Storage and ContentAddressedCache
don't depend on each other at all.
"""
def __init__(self, storage, cache):
self.storage = storage
self.cache = cache
self._channel = threading_utils.TaskChannel()
self._pending = set()
self._accessed = set()
self._fetched = set(cache)
# Pending digests that the caller waits for, see wait_on()/wait().
self._waiting_on = set()
# Already fetched digests the caller waits for which are not yet returned by
# wait().
self._waiting_on_ready = set()
def add(
self,
digest,
size=local_caching.UNKNOWN_FILE_SIZE,
priority=threading_utils.PRIORITY_MED):
"""Starts asynchronous fetch of item |digest|."""
# Fetching it now?
if digest in self._pending:
return
# Mark this file as in use, verify_all_cached will later ensure it is still
# in cache.
self._accessed.add(digest)
# Already fetched? Notify cache to update item's LRU position.
if digest in self._fetched:
# 'touch' returns True if item is in cache and not corrupted.
if self.cache.touch(digest, size):
return
logging.error('%s is corrupted', digest)
self._fetched.remove(digest)
# TODO(maruel): It should look at the free disk space, the current cache
# size and the size of the new item on every new item:
# - Trim the cache as more entries are listed when free disk space is low,
# otherwise if the amount of data downloaded during the run > free disk
# space, it'll crash.
# - Make sure there's enough free disk space to fit all dependencies of
# this run! If not, abort early.
# Start fetching.
self._pending.add(digest)
self.storage.async_fetch(
self._channel, priority, digest, size,
functools.partial(self.cache.write, digest))
def wait_on(self, digest):
"""Updates digests to be waited on by 'wait'."""
# Calculate once the already fetched items. These will be retrieved first.
if digest in self._fetched:
self._waiting_on_ready.add(digest)
else:
self._waiting_on.add(digest)
def wait(self):
"""Waits until any of waited-on items is retrieved.
Once this happens, it is remove from the waited-on set and returned.
This function is called in two waves. The first wave it is done for HIGH
priority items, the isolated files themselves. The second wave it is called
for all the files.
If the waited-on set is empty, raises RuntimeError.
"""
# Flush any already fetched items.
if self._waiting_on_ready:
return self._waiting_on_ready.pop()
assert self._waiting_on, 'Needs items to wait on'
# Wait for one waited-on item to be fetched.
while self._pending:
digest = self._channel.next()
self._pending.remove(digest)
self._fetched.add(digest)
if digest in self._waiting_on:
self._waiting_on.remove(digest)
return digest
# Should never reach this point due to assert above.
raise RuntimeError('Impossible state')
@property
def wait_queue_empty(self):
"""Returns True if there is no digest left for wait() to return."""
return not self._waiting_on and not self._waiting_on_ready
def inject_local_file(self, path, algo):
"""Adds local file to the cache as if it was fetched from storage."""
with fs.open(path, 'rb') as f:
data = f.read()
digest = algo(data).hexdigest()
self.cache.write(digest, [data])
self._fetched.add(digest)
return digest
@property
def pending_count(self):
"""Returns number of items to be fetched."""
return len(self._pending)
def verify_all_cached(self):
"""True if all accessed items are in cache."""
# Not thread safe, but called after all work is done.
return self._accessed.issubset(self.cache)
class FetchStreamVerifier(object):
"""Verifies that fetched file is valid before passing it to the
ContentAddressedCache.
"""
def __init__(self, stream, hasher, expected_digest, expected_size):
"""Initializes the verifier.
Arguments:
* stream: an iterable yielding chunks of content
* hasher: an object from hashlib that supports update() and hexdigest()
(eg, hashlib.sha1).
* expected_digest: if the entire stream is piped through hasher and then
summarized via hexdigest(), this should be the result. That is, it
should be a hex string like 'abc123'.
* expected_size: either the expected size of the stream, or
local_caching.UNKNOWN_FILE_SIZE.
"""
assert stream is not None
self.stream = stream
self.expected_digest = expected_digest
self.expected_size = expected_size
self.current_size = 0
self.rolling_hash = hasher()
def run(self):
"""Generator that yields same items as |stream|.
Verifies |stream| is complete before yielding a last chunk to consumer.
Also wraps IOError produced by consumer into MappingError exceptions since
otherwise Storage will retry fetch on unrelated local cache errors.
"""
# Read one chunk ahead, keep it in |stored|.
# That way a complete stream can be verified before pushing last chunk
# to consumer.
stored = None
for chunk in self.stream:
assert chunk is not None
if stored is not None:
self._inspect_chunk(stored, is_last=False)
try:
yield stored
except IOError as exc:
raise isolated_format.MappingError(
'Failed to store an item in cache: %s' % exc)
stored = chunk
if stored is not None:
self._inspect_chunk(stored, is_last=True)
try:
yield stored
except IOError as exc:
raise isolated_format.MappingError(
'Failed to store an item in cache: %s' % exc)
def _inspect_chunk(self, chunk, is_last):
"""Called for each fetched chunk before passing it to consumer."""
self.current_size += len(chunk)
self.rolling_hash.update(chunk)
if not is_last:
return
if ((self.expected_size != local_caching.UNKNOWN_FILE_SIZE) and
(self.expected_size != self.current_size)):
msg = 'Incorrect file size: want %d, got %d' % (
self.expected_size, self.current_size)
raise IOError(msg)
actual_digest = self.rolling_hash.hexdigest()
if self.expected_digest != actual_digest:
msg = 'Incorrect digest: want %s, got %s' % (
self.expected_digest, actual_digest)
raise IOError(msg)
class IsolatedBundle(object):
"""Fetched and parsed .isolated file with all dependencies."""
def __init__(self, filter_cb):
"""
filter_cb: callback function to filter downloaded content.
When filter_cb is not None, Isolated file is downloaded iff
filter_cb(filepath) returns True.
"""
self.command = []
self.files = {}
self.relative_cwd = None
# The main .isolated file, a IsolatedFile instance.
self.root = None
self._filter_cb = filter_cb
def fetch(self, fetch_queue, root_isolated_hash, algo):
"""Fetches the .isolated and all the included .isolated.
It enables support for "included" .isolated files. They are processed in
strict order but fetched asynchronously from the cache. This is important so
that a file in an included .isolated file that is overridden by an embedding
.isolated file is not fetched needlessly. The includes are fetched in one
pass and the files are fetched as soon as all the ones on the left-side
of the tree were fetched.
The prioritization is very important here for nested .isolated files.
'includes' have the highest priority and the algorithm is optimized for both
deep and wide trees. A deep one is a long link of .isolated files referenced
one at a time by one item in 'includes'. A wide one has a large number of
'includes' in a single .isolated file. 'left' is defined as an included
.isolated file earlier in the 'includes' list. So the order of the elements
in 'includes' is important.
As a side effect this method starts asynchronous fetch of all data files
by adding them to |fetch_queue|. It doesn't wait for data files to finish
fetching though.
"""
self.root = isolated_format.IsolatedFile(root_isolated_hash, algo)
# Isolated files being retrieved now: hash -> IsolatedFile instance.
pending = {}
# Set of hashes of already retrieved items to refuse recursive includes.
seen = set()
# Set of IsolatedFile's whose data files have already being fetched.
processed = set()
def retrieve_async(isolated_file):
"""Retrieves an isolated file included by the root bundle."""
h = isolated_file.obj_hash
if h in seen:
raise isolated_format.IsolatedError(
'IsolatedFile %s is retrieved recursively' % h)
assert h not in pending
seen.add(h)
pending[h] = isolated_file
# This isolated item is being added dynamically, notify FetchQueue.
fetch_queue.wait_on(h)
fetch_queue.add(h, priority=threading_utils.PRIORITY_HIGH)
# Start fetching root *.isolated file (single file, not the whole bundle).
retrieve_async(self.root)
while pending:
# Wait until some *.isolated file is fetched, parse it.
item_hash = fetch_queue.wait()
item = pending.pop(item_hash)
with fetch_queue.cache.getfileobj(item_hash) as f:
item.load(f.read())
# Start fetching included *.isolated files.
for new_child in item.children:
retrieve_async(new_child)
# Always fetch *.isolated files in traversal order, waiting if necessary
# until next to-be-processed node loads. "Waiting" is done by yielding
# back to the outer loop, that waits until some *.isolated is loaded.
for node in isolated_format.walk_includes(self.root):
if node not in processed:
# Not visited, and not yet loaded -> wait for it to load.
if not node.is_loaded:
break
# Not visited and loaded -> process it and continue the traversal.
self._start_fetching_files(node, fetch_queue)
processed.add(node)
# All *.isolated files should be processed by now and only them.
all_isolateds = set(isolated_format.walk_includes(self.root))
assert all_isolateds == processed, (all_isolateds, processed)
assert fetch_queue.wait_queue_empty, 'FetchQueue should have been emptied'
# Extract 'command' and other bundle properties.
for node in isolated_format.walk_includes(self.root):
self._update_self(node)
self.relative_cwd = self.relative_cwd or ''
def _start_fetching_files(self, isolated, fetch_queue):
"""Starts fetching files from |isolated| that are not yet being fetched.
Modifies self.files.
"""
files = isolated.data.get('files', {})
logging.debug('fetch_files(%s, %d)', isolated.obj_hash, len(files))
for filepath, properties in files.items():
if self._filter_cb and not self._filter_cb(filepath):
continue
# Root isolated has priority on the files being mapped. In particular,
# overridden files must not be fetched.
if filepath not in self.files:
self.files[filepath] = properties
# Preemptively request hashed files.
if 'h' in properties:
fetch_queue.add(
properties['h'], properties['s'], threading_utils.PRIORITY_MED)
def _update_self(self, node):
"""Extracts bundle global parameters from loaded *.isolated file.
Will be called with each loaded *.isolated file in order of traversal of
isolated include graph (see isolated_format.walk_includes).
"""
# Grabs properties.
if not self.command and node.data.get('command'):
# Ensure paths are correctly separated on windows.
self.command = node.data['command']
if self.command:
self.command[0] = self.command[0].replace('/', os.path.sep)
if (self.relative_cwd is None and
node.data.get('relative_cwd') is not None):
self.relative_cwd = node.data['relative_cwd']
def get_storage(server_ref):
"""Returns Storage class that can upload and download from |namespace|.
Arguments:
server_ref: isolate_storage.ServerRef instance.
Returns:
Instance of Storage.
"""
# Handle the specific internal use case.
assert (isinstance(server_ref, isolate_storage.ServerRef) or
type(server_ref).__name__ == 'ServerRef'), repr(server_ref)
return Storage(isolate_storage.get_storage_api(server_ref))
def _map_file(dst, digest, props, cache, use_symlinks):
"""Put downloaded file to destination path. This function is used for multi
threaded file putting.
"""
with tools.Profiler("_map_file for %s" % dst):
with cache.getfileobj(digest) as srcfileobj:
filetype = props.get('t', 'basic')
if filetype == 'basic':
# Ignore all bits apart from the user.
file_mode = (props.get('m') or 0o500) & 0o700
putfile(srcfileobj, dst, file_mode, use_symlink=use_symlinks)
elif filetype == 'tar':
basedir = os.path.dirname(dst)
with tarfile.TarFile(fileobj=srcfileobj, encoding='utf-8') as t:
ensured_dirs = set()
for ti in t:
if not ti.isfile():
logging.warning('Path(%r) is nonfile (%s), skipped', ti.name,
ti.type)
continue
# Handle files created on Windows fetched on POSIX and the
# reverse.
other_sep = '/' if os.path.sep == '\\' else '\\'
name = ti.name.replace(other_sep, os.path.sep)
fp = os.path.normpath(os.path.join(basedir, name))
if not fp.startswith(basedir):
logging.error('Path(%r) is outside root directory', fp)
ifd = t.extractfile(ti)
fp_dir = os.path.dirname(fp)
if fp_dir not in ensured_dirs:
file_path.ensure_tree(fp_dir)
ensured_dirs.add(fp_dir)
file_mode = ti.mode & 0o700
putfile(ifd, fp, file_mode, ti.size)
else:
raise isolated_format.IsolatedError('Unknown file type %r' % filetype)
def fetch_isolated(isolated_hash, storage, cache, outdir, use_symlinks,
filter_cb=None):
"""Aggressively downloads the .isolated file(s), then download all the files.
Arguments:
isolated_hash: hash of the root *.isolated file.
storage: Storage class that communicates with isolate storage.
cache: ContentAddressedCache class that knows how to store and map files
locally.
outdir: Output directory to map file tree to.
use_symlinks: Use symlinks instead of hardlinks when True.
filter_cb: filter that works as allowlist for downloaded files.
Returns:
IsolatedBundle object that holds details about loaded *.isolated file.
"""
logging.debug(
'fetch_isolated(%s, %s, %s, %s, %s)',
isolated_hash, storage, cache, outdir, use_symlinks)
# Hash algorithm to use, defined by namespace |storage| is using.
algo = storage.server_ref.hash_algo
fetch_queue = FetchQueue(storage, cache)
bundle = IsolatedBundle(filter_cb)
with tools.Profiler('GetIsolateds'):
# Optionally support local files by manually adding them to cache.
if not isolated_format.is_valid_hash(isolated_hash, algo):
logging.debug(
'%s is not a valid hash, assuming a file '
'(algo was %s, hash size was %d)', isolated_hash, algo(),
algo().digest_size)
path = six.text_type(os.path.abspath(isolated_hash))
try:
isolated_hash = fetch_queue.inject_local_file(path, algo)
except IOError as e:
raise isolated_format.MappingError(
'%s doesn\'t seem to be a valid file. Did you intent to pass a '
'valid hash (error: %s)?' % (isolated_hash, e))
# Load all *.isolated and start loading rest of the files.
bundle.fetch(fetch_queue, isolated_hash, algo)
with tools.Profiler('GetRest'):
# Create file system hierarchy.
file_path.ensure_tree(outdir)
create_directories(outdir, bundle.files)
_create_symlinks(outdir, bundle.files.items())
# Ensure working directory exists.
cwd = os.path.normpath(os.path.join(outdir, bundle.relative_cwd))
file_path.ensure_tree(cwd)
# Multimap: digest -> list of pairs (path, props).
remaining = {}
for filepath, props in bundle.files.items():
if 'h' in props:
remaining.setdefault(props['h'], []).append((filepath, props))
fetch_queue.wait_on(props['h'])
# Now block on the remaining files to be downloaded and mapped.
logging.info('Retrieving remaining files (%d of them)...',
fetch_queue.pending_count)
last_update = time.time()
with threading_utils.ThreadPool(2, 32, 32) as putfile_thread_pool:
with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT) as detector:
while remaining:
detector.ping()
# Wait for any item to finish fetching to cache.
digest = fetch_queue.wait()
# Create the files in the destination using item in cache as the
# source.
for filepath, props in remaining.pop(digest):
fullpath = os.path.join(outdir, filepath)
putfile_thread_pool.add_task(threading_utils.PRIORITY_HIGH,
_map_file, fullpath, digest, props,
cache, use_symlinks)
# Report progress.
duration = time.time() - last_update
if duration > DELAY_BETWEEN_UPDATES_IN_SECS:
msg = '%d files remaining...' % len(remaining)
sys.stdout.write(msg + '\n')
sys.stdout.flush()
logging.info(msg)
last_update = time.time()
assert fetch_queue.wait_queue_empty, 'FetchQueue should have been emptied'
putfile_thread_pool.join()
# Save the cache right away to not loose the state of the new objects.
cache.save()
# Cache could evict some items we just tried to fetch, it's a fatal error.
if not fetch_queue.verify_all_cached():
free_disk = file_path.get_free_space(cache.cache_dir)
msg = (
'Cache is too small to hold all requested files.\n'
' %s\n cache=%dbytes, %d items; %sb free_space') % (
cache.policies, cache.total_size, len(cache), free_disk)
raise isolated_format.MappingError(msg)
return bundle
def _directory_to_metadata(root, algo, denylist):
"""Yields every file and/or symlink found.
Yields:
tuple(FileItem, relpath, metadata)
For a symlink, FileItem is None.
"""
# Current tar file bundle, if any.
root = file_path.get_native_path_case(root)
bundle = TarBundle(root, algo)
for relpath, issymlink in isolated_format.expand_directory_and_symlink(
root,
u'.' + os.path.sep,
denylist,
follow_symlinks=(sys.platform != 'win32')):
filepath = os.path.join(root, relpath)
if issymlink:
# TODO(maruel): Do not call this.
meta = isolated_format.file_to_metadata(filepath, False)
yield None, relpath, meta
continue
prio = relpath.endswith('.isolated')
if bundle.try_add(FileItem(path=filepath, algo=algo, high_priority=prio)):
# The file was added to the current pending tarball and won't be archived
# individually.
continue
# Flush and reset the bundle.
for i, p, m in bundle.yield_item_path_meta():
yield i, p, m
bundle = TarBundle(root, algo)
# Yield the file individually.
item = FileItem(path=filepath, algo=algo, size=None, high_priority=prio)
yield item, relpath, item.meta
for i, p, m in bundle.yield_item_path_meta():
yield i, p, m
def _print_upload_stats(items, missing):
"""Prints upload stats."""
total = len(items)
total_size = sum(f.size for f in items)
logging.info(
'Total: %6d, %9.1fkiB', total, total_size / 1024.)
cache_hit = set(items).difference(missing)
cache_hit_size = sum(f.size for f in cache_hit)
logging.info(
'cache hit: %6d, %9.1fkiB, %6.2f%% files, %6.2f%% size',
len(cache_hit),
cache_hit_size / 1024.,
len(cache_hit) * 100. / total,
cache_hit_size * 100. / total_size if total_size else 0)
cache_miss = missing
cache_miss_size = sum(f.size for f in cache_miss)
logging.info('cache miss: %6d, %9.1fkiB, %6.2f%% files, %6.2f%% size',
len(cache_miss), cache_miss_size / 1024.,
len(cache_miss) * 100. / total,
cache_miss_size * 100. / total_size if total_size else 0)
def _enqueue_dir(dirpath, denylist, hash_algo, hash_algo_name):
"""Called by archive_files_to_storage for a directory.
Create an .isolated file.
Yields:
FileItem for every file found, plus one for the .isolated file itself.
"""
files = {}
for item, relpath, meta in _directory_to_metadata(dirpath, hash_algo,
denylist):
# item is None for a symlink.
files[relpath] = meta
if item:
yield item
# TODO(maruel): If there' not file, don't yield an .isolated file.
data = {
'algo': hash_algo_name,
'files': files,
'version': isolated_format.ISOLATED_FILE_VERSION,
}
# Keep the file in memory. This is fine because .isolated files are relatively
# small.
yield BufferItem(
tools.format_json(data, True).encode(),
algo=hash_algo,
high_priority=True)
def _archive_files_to_storage_internal(storage,
files,
denylist,
verify_push=False):
"""Stores every entry into remote storage and returns stats.
Arguments:
storage: a Storage object that communicates with the remote object store.
files: iterable of files to upload. If a directory is specified (with a
trailing slash), a .isolated file is created and its hash is returned.
Duplicates are skipped.
denylist: function that returns True if a file should be omitted.
verify_push: verify files are uploaded correctly by fetching from server.
Returns:
tuple(OrderedDict(path: hash), list(FileItem cold), list(FileItem hot)).
The first file in the first item is always the .isolated file.
Raises:
Re-raises the exception in upload_items(), if there is any.
"""
# Dict of path to hash.
results = collections.OrderedDict()
hash_algo = storage.server_ref.hash_algo
hash_algo_name = storage.server_ref.hash_algo_name
# Generator of FileItem to pass to upload_items() concurrent operation.
channel = threading_utils.TaskChannel()
exc_channel = threading_utils.TaskChannel()
uploaded_digests = set()
def _upload_items():
try:
results = storage.upload_items(channel, verify_push)
uploaded_digests.update(f.digest for f in results)
except Exception:
exc_channel.send_exception()
t = threading.Thread(target=_upload_items)
t.start()
# Keep track locally of the items to determine cold and hot items.
items_found = []
try:
for f in files:
assert isinstance(f, six.text_type), repr(f)
if f in results:
# Duplicate
continue
try:
filepath = os.path.abspath(f)
if fs.isdir(filepath):
# Uploading a whole directory.
item = None
for item in _enqueue_dir(filepath, denylist, hash_algo,
hash_algo_name):
channel.send_result(item)
items_found.append(item)
# The very last item will be the .isolated file.
if not item:
# There was no file in the directory.
continue
elif fs.isfile(filepath):
item = FileItem(
path=filepath,
algo=hash_algo,
size=None,
high_priority=f.endswith('.isolated'))
channel.send_result(item)
items_found.append(item)
else:
raise Error('%s is neither a file or directory.' % f)
results[f] = item.digest
except OSError:
raise Error('Failed to process %s.' % f)
finally:
# Stops the generator, so _upload_items() can exit.
channel.send_done()
t.join()
exc_channel.send_done()
try:
for _ in exc_channel:
pass
except Exception:
# log items when failed to upload files.
for item in items_found:
if isinstance(item, FileItem):
logging.error('FileItem path: %s, digest:%s, re-calculated digest:%s',
item.path, item.digest,
isolated_format.hash_file(item.path, item.algo))
continue
logging.error('Item digest:%s', item.digest)
raise
cold = []
hot = []
for i in items_found:
# Note that multiple FileItem may have the same .digest.
if i.digest in uploaded_digests:
cold.append(i)
else:
hot.append(i)
return results, cold, hot
# TODO(crbug.com/1073832):
# remove this if process leak in coverage build was fixed.
def archive_files_to_storage(storage, files, denylist, verify_push=False):
"""Calls _archive_files_to_storage_internal with retry.
Arguments:
See Arguments section in _archive_files_to_storage_internal
Returns:
See Returns section in _archive_files_to_storage_internal
Raises:
Re-raises the exception in _archive_files_to_storage_internal if all retry
failed.
"""
# Will do exponential backoff.
# e.g. 10, 20, 40, 80
backoff = 10
while True:
try:
return _archive_files_to_storage_internal(storage, files, denylist,
verify_push)
except Exception:
if backoff > 100:
raise
on_error.report('error before %d second backoff' % backoff)
logging.exception(
'failed to run _archive_files_to_storage_internal,'
' will retry after %d seconds', backoff)
time.sleep(backoff)
backoff *= 2
@subcommand.usage('<file1..fileN> or - to read from stdin')
def CMDarchive(parser, args):
"""Archives data to the server.
If a directory is specified, a .isolated file is created the whole directory
is uploaded. Then this .isolated file can be included in another one to run
commands.
The commands output each file that was processed with its content hash. For
directories, the .isolated generated for the directory is listed as the
directory entry itself.
"""
add_isolate_server_options(parser)
add_archive_options(parser)
options, files = parser.parse_args(args)
process_isolate_server_options(parser, options, True)
server_ref = isolate_storage.ServerRef(
options.isolate_server, options.namespace)
if files == ['-']:
files = (l.rstrip('\n\r') for l in sys.stdin)
if not files:
parser.error('Nothing to upload')
files = (six.ensure_text(f) for f in files)
denylist = tools.gen_denylist(options.blacklist)
try:
with get_storage(server_ref) as storage:
results, _cold, _hot = archive_files_to_storage(storage, files, denylist)
except (Error, local_caching.NoMoreSpace) as e:
parser.error(e.args[0])
print('\n'.join('%s %s' % (h, f) for f, h in results.items()))
return 0
def CMDdownload(parser, args):
"""Download data from the server.
It can either download individual files or a complete tree from a .isolated
file.
"""
add_isolate_server_options(parser)
parser.add_option(
'-s', '--isolated', metavar='HASH',
help='hash of an isolated file, .isolated file content is discarded, use '
'--file if you need it')
parser.add_option(
'-f',
'--file',
metavar='HASH DEST',
default=[],
action='append',
nargs=2,
help='hash and destination of a file, can be used multiple times')
parser.add_option(
'-t',
'--target',
metavar='DIR',
default='download',
help='destination directory')
parser.add_option(
'--use-symlinks',
action='store_true',
help='Use symlinks instead of hardlinks')
add_cache_options(parser)
options, args = parser.parse_args(args)
if args:
parser.error('Unsupported arguments: %s' % args)
if not file_path.enable_symlink():
logging.warning('Symlink support is not enabled')
process_isolate_server_options(parser, options, True)
if bool(options.isolated) == bool(options.file):
parser.error('Use one of --isolated or --file, and only one.')
if not options.cache and options.use_symlinks:
parser.error('--use-symlinks require the use of a cache with --cache')
cache = process_cache_options(options, trim=True)
cache.cleanup()
options.target = six.text_type(os.path.abspath(options.target))
if options.isolated:
if (fs.isfile(options.target) or
(fs.isdir(options.target) and fs.listdir(options.target))):
parser.error(
'--target \'%s\' exists, please use another target' % options.target)
server_ref = isolate_storage.ServerRef(
options.isolate_server, options.namespace)
with get_storage(server_ref) as storage:
# Fetching individual files.
if options.file:
# TODO(maruel): Enable cache in this case too.
channel = threading_utils.TaskChannel()
pending = {}
for digest, dest in options.file:
dest = six.text_type(dest)
pending[digest] = dest
storage.async_fetch(
channel, threading_utils.PRIORITY_MED, digest,
local_caching.UNKNOWN_FILE_SIZE,
functools.partial(local_caching.file_write,
os.path.join(options.target, dest)))
while pending:
fetched = channel.next()
dest = pending.pop(fetched)
logging.info('%s: %s', fetched, dest)
# Fetching whole isolated tree.
if options.isolated:
bundle = fetch_isolated(
isolated_hash=options.isolated,
storage=storage,
cache=cache,
outdir=options.target,
use_symlinks=options.use_symlinks)
cache.trim()
if bundle.command:
rel = os.path.join(options.target, bundle.relative_cwd)
print('To run this test please run from the directory %s:' %
os.path.join(options.target, rel))
print(' ' + ' '.join(bundle.command))
return 0
def add_archive_options(parser):
parser.add_option(
'--blacklist',
action='append',
default=list(DEFAULT_DENYLIST),
help='List of regexp to use as denylist filter when uploading '
'directories')
def add_isolate_server_options(parser):
"""Adds --isolate-server and --namespace options to parser."""
parser.add_option(
'-I', '--isolate-server',
metavar='URL', default=os.environ.get('ISOLATE_SERVER', ''),
help='URL of the Isolate Server to use. Defaults to the environment '
'variable ISOLATE_SERVER if set. No need to specify https://, this '
'is assumed.')
parser.add_option(
'--namespace',
default='default-gzip',
help='The namespace to use on the Isolate Server, default: %default')
def process_isolate_server_options(parser, options, required):
"""Processes the --isolate-server option.
Returns the identity as determined by the server.
"""
if not options.isolate_server:
if required:
parser.error('--isolate-server is required.')
return
try:
options.isolate_server = net.fix_url(options.isolate_server)
except ValueError as e:
parser.error('--isolate-server %s' % e)
try:
return auth.ensure_logged_in(options.isolate_server)
except ValueError as e:
parser.error(str(e))
return None
def add_cache_options(parser):
cache_group = optparse.OptionGroup(parser, 'Isolated cache management')
cache_group.add_option(
'--cache', metavar='DIR', default='cache',
help='Directory to keep a local cache of the files. Accelerates download '
'by reusing already downloaded files. Default=%default')
cache_group.add_option(
'--max-cache-size',
type='int',
metavar='NNN',
default=50*1024*1024*1024,
help='Trim if the cache gets larger than this value, default=%default')
cache_group.add_option(
'--min-free-space',
type='int',
metavar='NNN',
default=2*1024*1024*1024,
help='Trim if disk free space becomes lower than this value, '
'default=%default')
cache_group.add_option(
'--max-items',
type='int',
metavar='NNN',
default=100000,
help='Trim if more than this number of items are in the cache '
'default=%default')
parser.add_option_group(cache_group)
def process_cache_options(options, trim, **kwargs):
if options.cache:
policies = local_caching.CachePolicies(
options.max_cache_size,
options.min_free_space,
options.max_items,
# 3 weeks.
max_age_secs=21 * 24 * 60 * 60)
# |options.cache| path may not exist until DiskContentAddressedCache()
# instance is created.
return local_caching.DiskContentAddressedCache(
six.text_type(os.path.abspath(options.cache)), policies, trim, **kwargs)
return local_caching.MemoryContentAddressedCache()
class OptionParserIsolateServer(logging_utils.OptionParserWithLogging):
def __init__(self, **kwargs):
logging_utils.OptionParserWithLogging.__init__(
self,
version=__version__,
prog=os.path.basename(sys.modules[__name__].__file__),
**kwargs)
auth.add_auth_options(self)
def parse_args(self, *args, **kwargs):
options, args = logging_utils.OptionParserWithLogging.parse_args(
self, *args, **kwargs)
auth.process_auth_options(self, options)
return options, args
def main(args):
dispatcher = subcommand.CommandDispatcher(__name__)
return dispatcher.execute(OptionParserIsolateServer(), args)
if __name__ == '__main__':
subprocess42.inhibit_os_error_reporting()
fix_encoding.fix_encoding()
tools.disable_buffering()
colorama.init()
net.set_user_agent('isolateserver.py/' + __version__)
sys.exit(main(sys.argv[1:]))
| []
| []
| [
"ISOLATE_SERVER"
]
| [] | ["ISOLATE_SERVER"] | python | 1 | 0 | |
templates/pre-21.0.py | #!/usr/bin/env python
#
# Hi There!
#
# You may be wondering what this giant blob of binary data here is, you might
# even be worried that we're up to something nefarious (good for you for being
# paranoid!). This is a base85 encoding of a zip file, this zip file contains
# an entire copy of pip (version {installed_version}).
#
# Pip is a thing that installs packages, pip itself is a package that someone
# might want to install, especially if they're looking to run this get-pip.py
# script. Pip has a lot of code to deal with the security of installing
# packages, various edge cases on various platforms, and other such sort of
# "tribal knowledge" that has been encoded in its code base. Because of this
# we basically include an entire copy of pip inside this blob. We do this
# because the alternatives are attempt to implement a "minipip" that probably
# doesn't do things correctly and has weird edge cases, or compress pip itself
# down into a single file.
#
# If you're wondering how this is created, it is generated using
# `scripts/generate.py` in https://github.com/pypa/get-pip.
import os.path
import pkgutil
import shutil
import sys
import struct
import tempfile
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
iterbytes = iter
else:
def iterbytes(buf):
return (ord(byte) for byte in buf)
try:
from base64 import b85decode
except ImportError:
_b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{{|}}~")
def b85decode(b):
_b85dec = [None] * 256
for i, c in enumerate(iterbytes(_b85alphabet)):
_b85dec[c] = i
padding = (-len(b)) % 5
b = b + b'~' * padding
out = []
packI = struct.Struct('!I').pack
for i in range(0, len(b), 5):
chunk = b[i:i + 5]
acc = 0
try:
for c in iterbytes(chunk):
acc = acc * 85 + _b85dec[c]
except TypeError:
for j, c in enumerate(iterbytes(chunk)):
if _b85dec[c] is None:
raise ValueError(
'bad base85 character at position %d' % (i + j)
)
raise
try:
out.append(packI(acc))
except struct.error:
raise ValueError('base85 overflow in hunk starting at byte %d'
% i)
result = b''.join(out)
if padding:
result = result[:-padding]
return result
def bootstrap(tmpdir=None):
# Import pip so we can use it to install pip and maybe setuptools too
from pip._internal.cli.main import main as pip_entry_point
from pip._internal.commands.install import InstallCommand
from pip._internal.req.constructors import install_req_from_line
# Wrapper to provide default certificate with the lowest priority
# Due to pip._internal.commands.commands_dict structure, a monkeypatch
# seems the simplest workaround.
install_parse_args = InstallCommand.parse_args
def cert_parse_args(self, args):
# If cert isn't specified in config or environment, we provide our
# own certificate through defaults.
# This allows user to specify custom cert anywhere one likes:
# config, environment variable or argv.
if not self.parser.get_default_values().cert:
self.parser.defaults["cert"] = cert_path # calculated below
return install_parse_args(self, args)
InstallCommand.parse_args = cert_parse_args
implicit_pip = True
implicit_setuptools = True
implicit_wheel = True
# Check if the user has requested us not to install setuptools
if "--no-setuptools" in sys.argv or os.environ.get("PIP_NO_SETUPTOOLS"):
args = [x for x in sys.argv[1:] if x != "--no-setuptools"]
implicit_setuptools = False
else:
args = sys.argv[1:]
# Check if the user has requested us not to install wheel
if "--no-wheel" in args or os.environ.get("PIP_NO_WHEEL"):
args = [x for x in args if x != "--no-wheel"]
implicit_wheel = False
# We only want to implicitly install setuptools and wheel if they don't
# already exist on the target platform.
if implicit_setuptools:
try:
import setuptools # noqa
implicit_setuptools = False
except ImportError:
pass
if implicit_wheel:
try:
import wheel # noqa
implicit_wheel = False
except ImportError:
pass
# We want to support people passing things like 'pip<8' to get-pip.py which
# will let them install a specific version. However because of the dreaded
# DoubleRequirement error if any of the args look like they might be a
# specific for one of our packages, then we'll turn off the implicit
# install of them.
for arg in args:
try:
req = install_req_from_line(arg)
except Exception:
continue
if implicit_pip and req.name == "pip":
implicit_pip = False
elif implicit_setuptools and req.name == "setuptools":
implicit_setuptools = False
elif implicit_wheel and req.name == "wheel":
implicit_wheel = False
# Add any implicit installations to the end of our args
if implicit_pip:
args += ["pip{pip_version}"]
if implicit_setuptools:
args += ["setuptools{setuptools_version}"]
if implicit_wheel:
args += ["wheel{wheel_version}"]
# Add our default arguments
args = ["install", "--upgrade", "--force-reinstall"] + args
delete_tmpdir = False
try:
# Create a temporary directory to act as a working directory if we were
# not given one.
if tmpdir is None:
tmpdir = tempfile.mkdtemp()
delete_tmpdir = True
# We need to extract the SSL certificates from requests so that they
# can be passed to --cert
cert_path = os.path.join(tmpdir, "cacert.pem")
with open(cert_path, "wb") as cert:
cert.write(pkgutil.get_data("pip._vendor.certifi", "cacert.pem"))
# Execute the included pip and use it to install the latest pip and
# setuptools from PyPI
sys.exit(pip_entry_point(args))
finally:
# Remove our temporary directory
if delete_tmpdir and tmpdir:
shutil.rmtree(tmpdir, ignore_errors=True)
def main():
tmpdir = None
try:
# Create a temporary working directory
tmpdir = tempfile.mkdtemp()
# Unpack the zipfile into the temporary directory
pip_zip = os.path.join(tmpdir, "pip.zip")
with open(pip_zip, "wb") as fp:
fp.write(b85decode(DATA.replace(b"\n", b"")))
# Add the zipfile to sys.path so that we can import it
sys.path.insert(0, pip_zip)
# Run the bootstrap
bootstrap(tmpdir=tmpdir)
finally:
# Clean up our temporary working directory
if tmpdir:
shutil.rmtree(tmpdir, ignore_errors=True)
DATA = b"""
{zipfile}
"""
if __name__ == "__main__":
main()
| []
| []
| [
"PIP_NO_WHEEL",
"PIP_NO_SETUPTOOLS"
]
| [] | ["PIP_NO_WHEEL", "PIP_NO_SETUPTOOLS"] | python | 2 | 0 | |
cmd/dolistasafado/main.go | package main
import (
"net/http"
"os"
"time"
"github.com/iatistas/dolista-safado/service"
"github.com/sirupsen/logrus"
)
const noToken = "no-token"
func main() {
telegramToken := os.Getenv("TELEGRAM_TOKEN")
if telegramToken == "" || telegramToken == noToken {
logrus.Error("telegram token not provided")
return
}
router := http.NewServeMux()
router.HandleFunc("/message", service.GetMessageHandler(telegramToken))
server := &http.Server{
Addr: ":80",
ReadTimeout: 10 * time.Second,
WriteTimeout: 60 * time.Second,
}
logrus.Info("starting server on port 80")
logrus.Error(server.ListenAndServe())
}
| [
"\"TELEGRAM_TOKEN\""
]
| []
| [
"TELEGRAM_TOKEN"
]
| [] | ["TELEGRAM_TOKEN"] | go | 1 | 0 | |
app/database/db_connection.go | package database
import (
"app/models"
"os"
_ "github.com/go-sql-driver/mysql"
"gorm.io/driver/mysql"
"gorm.io/gorm"
)
var (
DbConn *gorm.DB
)
func Connect() error {
dsn := os.Getenv("DB_USER") + ":" + os.Getenv("DB_PASSWORD") + "@tcp(" + os.Getenv("DB_HOST") + ":" + os.Getenv("DB_PORT") + ")/" + os.Getenv("DB_NAME") + "?charset=utf8mb4&parseTime=True&loc=Local"
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})
if err != nil {
return err
}
if err != nil {
panic("failed to connect database")
}
DbConn = db
// Creates the tables, missing foreign keys, constraints, columns and indexes for the specified models
db.AutoMigrate(&models.User{}, &models.Article{}, &models.Comment{})
return nil
}
| [
"\"DB_USER\"",
"\"DB_PASSWORD\"",
"\"DB_HOST\"",
"\"DB_PORT\"",
"\"DB_NAME\""
]
| []
| [
"DB_PASSWORD",
"DB_HOST",
"DB_PORT",
"DB_NAME",
"DB_USER"
]
| [] | ["DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_NAME", "DB_USER"] | go | 5 | 0 | |
internal/acceptance/acceptance.go | package acceptance
import (
"fmt"
"os"
"testing"
"github.com/databrickslabs/databricks-terraform/common"
"github.com/databrickslabs/databricks-terraform/internal"
"github.com/databrickslabs/databricks-terraform/provider"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
)
func AccTest(t *testing.T, tc resource.TestCase) {
// each test - create new instance of provider.
tc.Providers = map[string]terraform.ResourceProvider{
"databricks": provider.DatabricksProvider(),
}
// this allows to debug from VSCode if it's launched with CLOUD_ENV var
cloudEnv := os.Getenv("CLOUD_ENV")
tc.IsUnitTest = cloudEnv != ""
if cloudEnv != "" {
// let's be more chatty in integration test logs
for i, s := range tc.Steps {
if s.Config != "" {
t.Logf("Test %s (%s) step %d config is:\n%s",
t.Name(), cloudEnv, i,
internal.TrimLeadingWhitespace(s.Config))
}
}
}
resource.Test(t, tc)
}
// ResourceCheck calls back a function with client and resource id
func ResourceCheck(name string,
cb func(client *common.DatabricksClient, id string) error) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Not found: %s", name)
}
client := common.CommonEnvironmentClient()
return cb(client, rs.Primary.ID)
}
}
| [
"\"CLOUD_ENV\""
]
| []
| [
"CLOUD_ENV"
]
| [] | ["CLOUD_ENV"] | go | 1 | 0 | |
main.go | package main
import (
"context"
"fmt"
"io"
"log"
"os"
"os/signal"
"runtime"
"runtime/pprof"
"sort"
"time"
"github.com/gopasspw/gopass/internal/action/pwgen"
_ "github.com/gopasspw/gopass/internal/backend/crypto"
"github.com/gopasspw/gopass/internal/backend/crypto/gpg"
_ "github.com/gopasspw/gopass/internal/backend/storage"
"github.com/gopasspw/gopass/internal/queue"
"github.com/gopasspw/gopass/pkg/ctxutil"
"github.com/gopasspw/gopass/pkg/protect"
"github.com/blang/semver/v4"
"github.com/fatih/color"
colorable "github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
ap "github.com/gopasspw/gopass/internal/action"
"github.com/gopasspw/gopass/internal/config"
"github.com/gopasspw/gopass/internal/out"
"github.com/gopasspw/gopass/internal/store/leaf"
"github.com/gopasspw/gopass/pkg/termio"
)
const (
name = "gopass"
)
var (
// Version is the released version of gopass
version string
// BuildTime is the time the binary was built
date string
// Commit is the git hash the binary was built from
commit string
)
func main() {
if cp := os.Getenv("GOPASS_CPU_PROFILE"); cp != "" {
f, err := os.Create(cp)
if err != nil {
log.Fatalf("could not create CPU profile at %s: %s", cp, err)
}
defer f.Close()
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatalf("could not start CPU profile: %s", err)
}
defer pprof.StopCPUProfile()
}
if err := protect.Pledge("stdio rpath wpath cpath tty proc exec"); err != nil {
panic(err)
}
ctx := context.Background()
// trap Ctrl+C and call cancel on the context
ctx, cancel := context.WithCancel(ctx)
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt)
defer func() {
signal.Stop(sigChan)
cancel()
}()
go func() {
select {
case <-sigChan:
cancel()
case <-ctx.Done():
}
}()
cli.ErrWriter = errorWriter{
out: colorable.NewColorableStderr(),
}
sv := getVersion()
cli.VersionPrinter = makeVersionPrinter(os.Stdout, sv)
q := queue.New(ctx)
ctx = queue.WithQueue(ctx, q)
ctx, app := setupApp(ctx, sv)
if err := app.RunContext(ctx, os.Args); err != nil {
log.Fatal(err)
}
q.Wait(ctx)
if mp := os.Getenv("GOPASS_MEM_PROFILE"); mp != "" {
f, err := os.Create(mp)
if err != nil {
log.Fatalf("could not write mem profile to %s: %s", mp, err)
}
defer f.Close()
runtime.GC()
if err := pprof.WriteHeapProfile(f); err != nil {
log.Fatalf("could not write heap profile: %s", err)
}
}
}
func setupApp(ctx context.Context, sv semver.Version) (context.Context, *cli.App) {
// try to read config (if it exists)
cfg := config.LoadWithFallback()
// set config values
ctx = initContext(ctx, cfg)
// initialize action handlers
action, err := ap.New(cfg, sv)
if err != nil {
out.Errorf(ctx, "failed to initialize gopass: %s", err)
os.Exit(ap.ExitUnknown)
}
// set some action callbacks
if !cfg.AutoImport {
ctx = ctxutil.WithImportFunc(ctx, termio.AskForKeyImport)
}
ctx = leaf.WithFsckFunc(ctx, termio.AskForConfirmation)
app := cli.NewApp()
app.Name = name
app.Version = sv.String()
app.Usage = "The standard unix password manager - rewritten in Go"
app.EnableBashCompletion = true
app.BashComplete = func(c *cli.Context) {
cli.DefaultAppComplete(c)
action.Complete(c)
}
app.Flags = ap.ShowFlags()
app.Action = func(c *cli.Context) error {
if err := action.IsInitialized(c); err != nil {
return err
}
if c.Args().Present() {
return action.Show(c)
}
return action.REPL(c)
}
app.Commands = getCommands(action, app)
return ctx, app
}
func getCommands(action *ap.Action, app *cli.App) []*cli.Command {
cmds := []*cli.Command{
{
Name: "completion",
Usage: "Bash and ZSH completion",
Description: "" +
"Source the output of this command with bash or zsh to get auto completion",
Subcommands: []*cli.Command{{
Name: "bash",
Usage: "Source for auto completion in bash",
Action: action.CompletionBash,
}, {
Name: "zsh",
Usage: "Source for auto completion in zsh",
Action: func(c *cli.Context) error {
return action.CompletionZSH(app)
},
}, {
Name: "fish",
Usage: "Source for auto completion in fish",
Action: func(c *cli.Context) error {
return action.CompletionFish(app)
},
}, {
Name: "openbsdksh",
Usage: "Source for auto completion in OpenBSD's ksh",
Action: func(c *cli.Context) error {
return action.CompletionOpenBSDKsh(app)
},
}},
},
}
cmds = append(cmds, action.GetCommands()...)
cmds = append(cmds, pwgen.GetCommands()...)
sort.Slice(cmds, func(i, j int) bool { return cmds[i].Name < cmds[j].Name })
return cmds
}
func makeVersionPrinter(out io.Writer, sv semver.Version) func(c *cli.Context) {
return func(c *cli.Context) {
buildtime := ""
if bt, err := time.Parse("2006-01-02T15:04:05-0700", date); err == nil {
buildtime = bt.Format("2006-01-02 15:04:05")
}
buildInfo := ""
if commit != "" {
buildInfo = commit
}
if buildtime != "" {
if buildInfo != "" {
buildInfo += " "
}
buildInfo += buildtime
}
if buildInfo != "" {
buildInfo = "(" + buildInfo + ") "
}
fmt.Fprintf(
out,
"%s %s %s%s %s %s\n",
name,
sv.String(),
buildInfo,
runtime.Version(),
runtime.GOOS,
runtime.GOARCH,
)
}
}
type errorWriter struct {
out io.Writer
}
func (e errorWriter) Write(p []byte) (int, error) {
return e.out.Write([]byte("\n" + color.RedString("Error: %s", p)))
}
func initContext(ctx context.Context, cfg *config.Config) context.Context {
// initialize from config, may be overridden by env vars
ctx = cfg.WithContext(ctx)
// always trust
ctx = gpg.WithAlwaysTrust(ctx, true)
// check recipients conflicts with always trust, make sure it's not enabled
// when always trust is
if gpg.IsAlwaysTrust(ctx) {
ctx = leaf.WithCheckRecipients(ctx, false)
}
// only emit color codes when stdout is a terminal
if !isatty.IsTerminal(os.Stdout.Fd()) {
color.NoColor = true
ctx = ctxutil.WithTerminal(ctx, false)
ctx = ctxutil.WithInteractive(ctx, false)
}
// reading from stdin?
if info, err := os.Stdin.Stat(); err == nil && info.Mode()&os.ModeCharDevice == 0 {
ctx = ctxutil.WithInteractive(ctx, false)
ctx = ctxutil.WithStdin(ctx, true)
}
// disable colored output on windows since cmd.exe doesn't support ANSI color
// codes. Other terminal may do, but until we can figure that out better
// disable this for all terms on this platform
if runtime.GOOS == "windows" {
color.NoColor = true
}
return ctx
}
| [
"\"GOPASS_CPU_PROFILE\"",
"\"GOPASS_MEM_PROFILE\""
]
| []
| [
"GOPASS_CPU_PROFILE",
"GOPASS_MEM_PROFILE"
]
| [] | ["GOPASS_CPU_PROFILE", "GOPASS_MEM_PROFILE"] | go | 2 | 0 | |
cmd/store.go | package cmd
import (
"io/ioutil"
"os"
"strings"
"github.com/joemiller/vault-token-helper/pkg/store"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
// storeCmd represents the store command
var storeCmd = &cobra.Command{
Use: "store",
Short: "(For use by vault) Store a token (from stdin) for the current $VAULT_ADDR",
SilenceUsage: true, // Don't show help on error, just print the error
RunE: func(cmd *cobra.Command, args []string) error {
if err := initBackend(); err != nil {
return err
}
vaultAddr := os.Getenv("VAULT_ADDR")
if vaultAddr == "" {
return errors.New("Missing VAULT_ADDR environment variable")
}
vaultNamespace := os.Getenv("VAULT_NAMESPACE")
if vaultNamespace != "" {
vaultAddr += "/"
vaultAddr += vaultNamespace
}
stdin, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return errors.Wrap(err, "Failed to read token from STDIN")
}
token := store.Token{
VaultAddr: vaultAddr,
Token: strings.TrimSuffix(string(stdin), "\n"),
}
if err := backend.Store(token); err != nil {
return err
}
return nil
},
}
func init() {
RootCmd.AddCommand(storeCmd)
}
| [
"\"VAULT_ADDR\"",
"\"VAULT_NAMESPACE\""
]
| []
| [
"VAULT_ADDR",
"VAULT_NAMESPACE"
]
| [] | ["VAULT_ADDR", "VAULT_NAMESPACE"] | go | 2 | 0 | |
session/speaker-change/finetune-vggvox-v2.py | import os
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '../gcs/mesolitica-storage.json'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import tensorflow as tf
import malaya_speech.train as train
import malaya_speech.train.model.vggvox_v2 as vggvox_v2
import malaya_speech
from glob import glob
import librosa
import numpy as np
def lin_spectogram_from_wav(wav, hop_length, win_length, n_fft=1024):
linear = librosa.stft(
wav, n_fft=n_fft, win_length=win_length, hop_length=hop_length
) # linear spectrogram
return linear.T
def load_data(
wav,
win_length=400,
sr=16000,
hop_length=50,
n_fft=512,
spec_len=250,
mode='train',
):
linear_spect = lin_spectogram_from_wav(wav, hop_length, win_length, n_fft)
mag, _ = librosa.magphase(linear_spect) # magnitude
mag_T = mag.T
freq, time = mag_T.shape
if mode == 'train':
if time > spec_len:
randtime = np.random.randint(0, time - spec_len)
spec_mag = mag_T[:, randtime: randtime + spec_len]
else:
spec_mag = np.pad(mag_T, ((0, 0), (0, spec_len - time)), 'constant')
else:
spec_mag = mag_T
# preprocessing, subtract mean, divided by time-wise var
mu = np.mean(spec_mag, 0, keepdims=True)
std = np.std(spec_mag, 0, keepdims=True)
return (spec_mag - mu) / (std + 1e-5)
DIMENSION = 257
def calc(v):
r = load_data(v, mode='eval')
return r
def preprocess_inputs(example):
s = tf.compat.v1.numpy_function(calc, [example['inputs']], tf.float32)
s = tf.reshape(s, (DIMENSION, -1, 1))
example['inputs'] = s
return example
def parse(serialized_example):
data_fields = {
'inputs': tf.VarLenFeature(tf.float32),
'targets': tf.VarLenFeature(tf.int64),
}
features = tf.parse_single_example(
serialized_example, features=data_fields
)
for k in features.keys():
features[k] = features[k].values
features = preprocess_inputs(features)
keys = list(features.keys())
for k in keys:
if k not in ['inputs', 'targets']:
features.pop(k, None)
return features
def get_dataset(files, batch_size=32, shuffle_size=1024, thread_count=24):
def get():
dataset = tf.data.TFRecordDataset(files)
dataset = dataset.map(parse, num_parallel_calls=thread_count)
dataset = dataset.shuffle(shuffle_size)
dataset = dataset.padded_batch(
batch_size,
padded_shapes={
'inputs': tf.TensorShape([DIMENSION, None, 1]),
'targets': tf.TensorShape([None]),
},
padding_values={
'inputs': tf.constant(0, dtype=tf.float32),
'targets': tf.constant(0, dtype=tf.int64),
},
)
dataset = dataset.repeat()
return dataset
return get
learning_rate = 1e-5
init_checkpoint = '../vggvox-speaker-identification/v2/vggvox.ckpt'
def model_fn(features, labels, mode, params):
Y = tf.cast(features['targets'][:, 0], tf.int32)
model = vggvox_v2.Model(features['inputs'], num_class=2, mode='train')
logits = model.logits
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=Y
)
)
tf.identity(loss, 'train_loss')
accuracy = tf.metrics.accuracy(
labels=Y, predictions=tf.argmax(logits, axis=1)
)
tf.identity(accuracy[1], name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy[1])
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
variables = [v for v in variables if 'prediction' not in v.name]
assignment_map, initialized_variable_names = train.get_assignment_map_from_checkpoint(
variables, init_checkpoint
)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={'accuracy': accuracy},
)
return estimator_spec
train_hooks = [
tf.train.LoggingTensorHook(
['train_accuracy', 'train_loss'], every_n_iter=1
)
]
files = tf.io.gfile.glob(
'gs://mesolitica-general/speaker-change/data/*.tfrecords'
)
train_dataset = get_dataset(files)
save_directory = 'output-vggvox-v2-speaker-change'
train.run_training(
train_fn=train_dataset,
model_fn=model_fn,
model_dir=save_directory,
num_gpus=1,
log_step=1,
save_checkpoint_step=25000,
max_steps=300000,
train_hooks=train_hooks,
)
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["CUDA_VISIBLE_DEVICES", "GOOGLE_APPLICATION_CREDENTIALS"] | python | 2 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'codedaddies_list.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/greenlet/tests/leakcheck.py | # Copyright (c) 2018 gevent community
# Copyright (c) 2021 greenlet community
#
# This was originally part of gevent's test suite. The main author
# (Jason Madden) vendored a copy of it into greenlet.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import os
import sys
import gc
from functools import wraps
import unittest
import objgraph
# graphviz 0.18 (Nov 7 2021), available only on Python 3.6 and newer,
# has added type hints (sigh). It wants to use ``typing.Literal`` for
# some stuff, but that's only available on Python 3.9+. If that's not
# found, it creates a ``unittest.mock.MagicMock`` object and annotates
# with that. These are GC'able objects, and doing almost *anything*
# with them results in an explosion of objects. For example, trying to
# compare them for equality creates new objects. This causes our
# leakchecks to fail, with reports like:
#
# greenlet.tests.leakcheck.LeakCheckError: refcount increased by [337, 1333, 343, 430, 530, 643, 769]
# _Call 1820 +546
# dict 4094 +76
# MagicProxy 585 +73
# tuple 2693 +66
# _CallList 24 +3
# weakref 1441 +1
# function 5996 +1
# type 736 +1
# cell 592 +1
# MagicMock 8 +1
#
# To avoid this, we *could* filter this type of object out early. In
# principle it could leak, but we don't use mocks in greenlet, so it
# doesn't leak from us. However, a further issue is that ``MagicMock``
# objects have subobjects that are also GC'able, like ``_Call``, and
# those create new mocks of their own too. So we'd have to filter them
# as well, and they're not public. That's OK, we can workaround the
# problem by being very careful to never compare by equality or other
# user-defined operators, only using object identity or other builtin
# functions.
RUNNING_ON_GITHUB_ACTIONS = os.environ.get('GITHUB_ACTIONS')
RUNNING_ON_TRAVIS = os.environ.get('TRAVIS') or RUNNING_ON_GITHUB_ACTIONS
RUNNING_ON_APPVEYOR = os.environ.get('APPVEYOR')
RUNNING_ON_CI = RUNNING_ON_TRAVIS or RUNNING_ON_APPVEYOR
RUNNING_ON_MANYLINUX = os.environ.get('GREENLET_MANYLINUX')
SKIP_LEAKCHECKS = RUNNING_ON_MANYLINUX or os.environ.get('GREENLET_SKIP_LEAKCHECKS')
SKIP_FAILING_LEAKCHECKS = os.environ.get('GREENLET_SKIP_FAILING_LEAKCHECKS')
ONLY_FAILING_LEAKCHECKS = os.environ.get('GREENLET_ONLY_FAILING_LEAKCHECKS')
def ignores_leakcheck(func):
"""
Ignore the given object during leakchecks.
Can be applied to a method, in which case the method will run, but
will not be subject to leak checks.
If applied to a class, the entire class will be skipped during leakchecks. This
is intended to be used for classes that are very slow and cause problems such as
test timeouts; typically it will be used for classes that are subclasses of a base
class and specify variants of behaviour (such as pool sizes).
"""
func.ignore_leakcheck = True
return func
def fails_leakcheck(func):
"""
Mark that the function is known to leak.
"""
func.fails_leakcheck = True
if SKIP_FAILING_LEAKCHECKS:
func = unittest.skip("Skipping known failures")(func)
return func
class LeakCheckError(AssertionError):
pass
if hasattr(sys, 'getobjects'):
# In a Python build with ``--with-trace-refs``, make objgraph
# trace *all* the objects, not just those that are tracked by the
# GC
class _MockGC(object):
def get_objects(self):
return sys.getobjects(0) # pylint:disable=no-member
def __getattr__(self, name):
return getattr(gc, name)
objgraph.gc = _MockGC()
fails_strict_leakcheck = fails_leakcheck
else:
def fails_strict_leakcheck(func):
"""
Decorator for a function that is known to fail when running
strict (``sys.getobjects()``) leakchecks.
This type of leakcheck finds all objects, even those, such as
strings, which are not tracked by the garbage collector.
"""
return func
class ignores_types_in_strict_leakcheck(object):
def __init__(self, types):
self.types = types
def __call__(self, func):
func.leakcheck_ignore_types = self.types
return func
class _RefCountChecker(object):
# Some builtin things that we ignore
# XXX: Those things were ignored by gevent, but they're important here,
# presumably.
IGNORED_TYPES = () #(tuple, dict, types.FrameType, types.TracebackType)
def __init__(self, testcase, function):
self.testcase = testcase
self.function = function
self.deltas = []
self.peak_stats = {}
self.ignored_types = ()
# The very first time we are called, we have already been
# self.setUp() by the test runner, so we don't need to do it again.
self.needs_setUp = False
def _include_object_p(self, obj):
# pylint:disable=too-many-return-statements
#
# See the comment block at the top. We must be careful to
# avoid invoking user-defined operations.
if obj is self:
return False
kind = type(obj)
# ``self._include_object_p == obj`` returns NotImplemented
# for non-function objects, which causes the interpreter
# to try to reverse the order of arguments...which leads
# to the explosion of mock objects. We don't want that, so we implement
# the check manually.
if kind == type(self._include_object_p):
try:
# pylint:disable=not-callable
exact_method_equals = self._include_object_p.__eq__(obj)
except AttributeError:
# Python 2.7 methods may only have __cmp__, and that raises a
# TypeError for non-method arguments
# pylint:disable=no-member
exact_method_equals = self._include_object_p.__cmp__(obj) == 0
if exact_method_equals is not NotImplemented and exact_method_equals:
return False
# Similarly, we need to check identity in our __dict__ to avoid mock explosions.
for x in self.__dict__.values():
if obj is x:
return False
if kind in self.ignored_types or kind in self.IGNORED_TYPES:
return False
return True
def _growth(self):
return objgraph.growth(limit=None, peak_stats=self.peak_stats,
filter=self._include_object_p)
def _report_diff(self, growth):
if not growth:
return "<Unable to calculate growth>"
lines = []
width = max(len(name) for name, _, _ in growth)
for name, count, delta in growth:
lines.append('%-*s%9d %+9d' % (width, name, count, delta))
diff = '\n'.join(lines)
return diff
def _run_test(self, args, kwargs):
gc_enabled = gc.isenabled()
gc.disable()
if self.needs_setUp:
self.testcase.setUp()
self.testcase.skipTearDown = False
try:
self.function(self.testcase, *args, **kwargs)
finally:
self.testcase.tearDown()
self.testcase.doCleanups()
self.testcase.skipTearDown = True
self.needs_setUp = True
if gc_enabled:
gc.enable()
def _growth_after(self):
# Grab post snapshot
if 'urlparse' in sys.modules:
sys.modules['urlparse'].clear_cache()
if 'urllib.parse' in sys.modules:
sys.modules['urllib.parse'].clear_cache()
return self._growth()
def _check_deltas(self, growth):
# Return false when we have decided there is no leak,
# true if we should keep looping, raises an assertion
# if we have decided there is a leak.
deltas = self.deltas
if not deltas:
# We haven't run yet, no data, keep looping
return True
if gc.garbage:
raise LeakCheckError("Generated uncollectable garbage %r" % (gc.garbage,))
# the following configurations are classified as "no leak"
# [0, 0]
# [x, 0, 0]
# [... a, b, c, d] where a+b+c+d = 0
#
# the following configurations are classified as "leak"
# [... z, z, z] where z > 0
if deltas[-2:] == [0, 0] and len(deltas) in (2, 3):
return False
if deltas[-3:] == [0, 0, 0]:
return False
if len(deltas) >= 4 and sum(deltas[-4:]) == 0:
return False
if len(deltas) >= 3 and deltas[-1] > 0 and deltas[-1] == deltas[-2] and deltas[-2] == deltas[-3]:
diff = self._report_diff(growth)
raise LeakCheckError('refcount increased by %r\n%s' % (deltas, diff))
# OK, we don't know for sure yet. Let's search for more
if sum(deltas[-3:]) <= 0 or sum(deltas[-4:]) <= 0 or deltas[-4:].count(0) >= 2:
# this is suspicious, so give a few more runs
limit = 11
else:
limit = 7
if len(deltas) >= limit:
raise LeakCheckError('refcount increased by %r\n%s'
% (deltas,
self._report_diff(growth)))
# We couldn't decide yet, keep going
return True
def __call__(self, args, kwargs):
for _ in range(3):
gc.collect()
expect_failure = getattr(self.function, 'fails_leakcheck', False)
if expect_failure:
self.testcase.expect_greenlet_leak = True
self.ignored_types = getattr(self.function, "leakcheck_ignore_types", ())
# Capture state before; the incremental will be
# updated by each call to _growth_after
growth = self._growth()
try:
while self._check_deltas(growth):
self._run_test(args, kwargs)
growth = self._growth_after()
self.deltas.append(sum((stat[2] for stat in growth)))
except LeakCheckError:
if not expect_failure:
raise
else:
if expect_failure:
raise LeakCheckError("Expected %s to leak but it did not." % (self.function,))
def wrap_refcount(method):
if getattr(method, 'ignore_leakcheck', False) or SKIP_LEAKCHECKS:
return method
@wraps(method)
def wrapper(self, *args, **kwargs): # pylint:disable=too-many-branches
if getattr(self, 'ignore_leakcheck', False):
raise unittest.SkipTest("This class ignored during leakchecks")
if ONLY_FAILING_LEAKCHECKS and not getattr(method, 'fails_leakcheck', False):
raise unittest.SkipTest("Only running tests that fail leakchecks.")
return _RefCountChecker(self, method)(args, kwargs)
return wrapper
| []
| []
| [
"APPVEYOR",
"GITHUB_ACTIONS",
"GREENLET_ONLY_FAILING_LEAKCHECKS",
"GREENLET_SKIP_FAILING_LEAKCHECKS",
"GREENLET_SKIP_LEAKCHECKS",
"TRAVIS",
"GREENLET_MANYLINUX"
]
| [] | ["APPVEYOR", "GITHUB_ACTIONS", "GREENLET_ONLY_FAILING_LEAKCHECKS", "GREENLET_SKIP_FAILING_LEAKCHECKS", "GREENLET_SKIP_LEAKCHECKS", "TRAVIS", "GREENLET_MANYLINUX"] | python | 7 | 0 | |
notifier.py | #!/usr/bin/env python3
import datetime
import json
import logging
import os
import sqlite3
import subprocess
import sys
import time
from sqlite3 import Error
import requests
import telegram
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s] [%(levelname)s] (%(threadName)-10s) %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# SYNO.API urls
synoApiLoginUrl = "{}/webapi/auth.cgi?api=SYNO.API.Auth&method=Login&version=2" \
"&session=SurveillanceStation&format=sid&account={}&passwd={}"
synoApiCamerasInfoUrl = "{}/webapi/entry.cgi?api=SYNO.SurveillanceStation.Camera&method=List&version=1&_sid={}"
synoApiEventQueryUrl = "{}/webapi/entry.cgi?api=SYNO.SurveillanceStation.Event&method=List" \
"&version=4&locked=0&reason=2&limit=1&cameraIds={}&_sid={}"
synoApiEventDownloadUrl = "{}/webapi/entry.cgi?api=SYNO.SurveillanceStation.Event" \
"&method=Download&version=4&analyevent=false&mountId=0" \
"&eventId={}&_sid={}"
sql_create_processed_events_table = """ CREATE TABLE IF NOT EXISTS processed_events (
id integer PRIMARY KEY,
camera_id text NOT NULL,
last_event_id int NOT NULL,
processed_date timestamp NOT NULL
); """
sql_create_processed_events_table_unique = """ CREATE UNIQUE INDEX IF NOT EXISTS idx_processed_events_camera \
ON processed_events (camera_id); """
def parse_config(config_path):
with open(config_path, 'r') as config_file:
config_data = json.load(config_file)
return config_data
def create_connection(data_folder):
try:
conn = sqlite3.connect(data_folder + '/processed_events.db')
print(sqlite3.version)
return conn
except Error as e:
logging.error("CANNOT CREATE DB", e)
return None
def create_processed_events_table(conn):
try:
c = conn.cursor()
c.execute(sql_create_processed_events_table)
c.execute(sql_create_processed_events_table_unique)
except Error as e:
logging.error("CANNOT CREATE TABLE", e)
def check_already_processed_event_by_camera(conn, camera_id, event_id):
cur = conn.cursor()
cur.execute("SELECT * FROM processed_events WHERE camera_id=? AND last_event_id >=?", (camera_id, event_id))
rows = cur.fetchall()
already_processed = False
for row in rows:
logging.error("Event %s already processed %s", event_id, row)
already_processed = True
return already_processed
def replace_processed_events(conn, processed_event):
sql = ''' REPLACE INTO processed_events(camera_id, last_event_id ,processed_date)
VALUES(?,?,?) '''
cur = conn.cursor()
cur.execute(sql, processed_event)
conn.commit()
return cur.lastrowid
def syno_login(base_url, user, password):
login_response = requests.get(synoApiLoginUrl.format(base_url, user, password), verify=False)
logging.info('login_response status_code %s', login_response.status_code)
if login_response.ok:
login_data = json.loads(login_response.content.decode('utf-8'))
if login_data["success"]:
logging.info('login_response got sinotoken %s', login_data["data"]["sid"])
return login_data["data"]["sid"]
else:
return ""
else:
login_response.raise_for_status()
def syno_info(base_url, sid):
info_response = requests.get(synoApiCamerasInfoUrl.format(base_url, sid), verify=False)
logging.info('info_response status_code %s', info_response.status_code)
if info_response.ok:
info_data = json.loads(info_response.content.decode('utf-8'))
return info_data
else:
info_response.raise_for_status()
def syno_last_event(base_url, camera_id, sid):
event_response = requests.get(synoApiEventQueryUrl.format(base_url, camera_id, sid),
verify=False)
logging.info('event_response status_code %s', event_response.status_code)
if event_response.ok:
event_data = json.loads(event_response.content.decode('utf-8'))
if len(event_data["data"]["events"]) > 0 and event_data["data"]["events"][0]["cameraId"] == camera_id:
logging.info('found event for camera %s', event_data["data"]["events"][0]["camera_name"])
return event_data["data"]["events"][0]["eventId"]
else:
return -1
else:
event_response.raise_for_status()
def syno_download_video(download_dir, base_url, event_id, sid):
outfile_gif = '{}/{}.mp4'.format(download_dir, event_id)
with open(outfile_gif, "wb") as f:
logging.info('Downloading video for event id %i to %s .....', event_id, outfile_gif)
download_response = requests.get(synoApiEventDownloadUrl.format(base_url, event_id, sid),
verify=False, stream=True)
logging.info('download_response status_code %s', download_response.status_code)
if download_response.ok:
total_length = download_response.headers.get('content-length')
if total_length is None: # no content length header
f.write(download_response.content)
else:
dl = 0
total_length = int(total_length)
for data in download_response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = int(50 * dl / total_length)
sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' * (50 - done)))
sys.stdout.flush()
logging.info('Downloading video for event id %i to %s .....DONE', event_id, outfile_gif)
return outfile_gif
else:
download_response.raise_for_status()
def convert_video_gif(scale, skip_first_n_secs, max_length_secs, input_video, output_gif):
logging.info('convert_video_gif scale %i skip_first_n_secs %i max_length_secs %i input_video %s output_gif %s',
scale, skip_first_n_secs, max_length_secs, input_video, output_gif)
retcode = subprocess.call([
"ffmpeg", "-stats", "-i", input_video, "-vf",
"fps=30,scale={}:-1:flags=lanczos".format(scale),
"-ss", "00:00:" + "{}".format(skip_first_n_secs).zfill(2), "-t", "{}".format(max_length_secs), "-y",
str(output_gif)
])
os.remove(input_video)
return retcode
class CameraMotionEventHandler:
def __init__(self, processed_events_conn, base_url, camera, config, sid):
self.base_url = base_url
self.camera = camera
self.config = config
self.ffmpeg_folder = self.config["ffmpeg_working_folder"]
self.sid = sid
self.bot = telegram.Bot(self.config["telegram_bot_token"])
self.message = self.config["message"]
# Keep a FIFO of files processed so we can guard against duplicate
# events
self.processed_events_conn = processed_events_conn
self.silence = self.config["silence"]
def publish_telegram_document(self, event_id):
logging.info('publish_telegram_document %s %s', event_id, self.camera["id"])
retcode = False
try:
self.bot.send_document(chat_id=self.config["chat_id"],
document=open('{}/{}'.format(self.ffmpeg_folder, event_id), 'rb'),
caption='{} {}'.format(self.message, self.camera['name']),
disable_notification=self.silence)
os.remove('{}/{}'.format(self.ffmpeg_folder, event_id))
retcode = True
except Error as e:
logging.error("CANNOT SEND DOCUMENT", e)
return retcode
def poll_event(self):
logging.info('Start getting last camera event for camera %s %s', self.camera["id"], self.camera["name"])
event_id = syno_last_event(self.base_url, self.camera["id"], self.sid)
if event_id > -1:
if check_already_processed_event_by_camera(self.processed_events_conn, self.camera["id"], event_id):
logging.info('Event %s already processed', event_id)
return None, None
logging.info('Start downloading event video for event_id %s', event_id)
mp4_file = syno_download_video(self.config["ffmpeg_working_folder"], self.base_url, event_id, self.sid)
outfile_gif = '{}/{}.gif'.format(self.config["ffmpeg_working_folder"], event_id)
convert_retcode = convert_video_gif(self.camera["scale"],
self.camera["skip_first_n_secs"],
self.camera["max_length_secs"],
mp4_file, outfile_gif)
if convert_retcode == 0:
public_retcode = self.publish_telegram_document('{}.gif'.format(event_id))
if public_retcode:
processed_event = (self.camera["id"], event_id, datetime.datetime.now())
replace_processed_events(self.processed_events_conn, processed_event)
logging.info('Done processing event_id %i', event_id)
else:
logging.error('Invalid return code from mqtt publish for event id %i camera topic %s', event_id,
self.camera["name"])
else:
logging.error('Invalid return code from ffmpeg subprocess call for event id %i', event_id)
else:
logging.info('No event found for camera %s %s', self.camera["id"], self.camera["name"])
def main():
config_file = os.environ['config_file']
config = parse_config(config_file)
config_data_folder = ''
if 'data_folder' in config:
config_data_folder = config["data_folder"]
if config_data_folder == '':
config_data_folder = "/data"
logging.info('Creating/Opening processed_events database on file %s', config_data_folder)
processed_events_conn = create_connection(config_data_folder)
if processed_events_conn is not None:
# create processed_events table
create_processed_events_table(processed_events_conn)
else:
logging.error('Error! cannot create the database connection.')
return
logged_in = False
try:
while True:
time.sleep(10)
if not logged_in:
sid = syno_login(config["synology_base_api_url"], config["synology_user"], config["synology_password"])
if sid == "":
logging.error('Synology credentials not valid')
continue
else:
logged_in = True
logging.info('Synology Auth ok %s', sid)
info_data = syno_info(config["synology_base_api_url"], sid)
for camera_info in info_data["data"]["cameras"]:
logging.info('Synology Info Camera Id %s Name %s IP %s', camera_info["id"], camera_info["name"],
camera_info["host"])
for camera in config["synology_cameras"]:
logging.info('CameraMotionEventHandler poll_event %s %s', camera["id"], camera["name"])
camera_handler = CameraMotionEventHandler(processed_events_conn, config["synology_base_api_url"],
camera,
config, sid)
camera_handler.poll_event()
except KeyboardInterrupt:
logging.info('KeyboardInterrupt')
logging.info('Ending')
if __name__ == "__main__":
main()
| []
| []
| [
"config_file"
]
| [] | ["config_file"] | python | 1 | 0 | |
app.py | from flask import Flask, request, render_template, make_response
from flask_sqlalchemy import SQLAlchemy
import os
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from models import Listing
@app.route("/", methods=['GET'])
def index():
new_listings = db.session.query(Listing) \
.filter(Listing.status == "NEW") \
.order_by(Listing.scraped_dt)
int_listings = db.session.query(Listing) \
.filter(Listing.status == "INTERESTED") \
.order_by(Listing.scraped_dt)
app_listings = db.session.query(Listing) \
.filter(Listing.status == "APPLIED") \
.order_by(Listing.scraped_dt)
intv_listings = db.session.query(Listing) \
.filter(Listing.status == "INTERVIEW") \
.order_by(Listing.scraped_dt)
off_listings = db.session.query(Listing) \
.filter(Listing.status == "OFFER") \
.order_by(Listing.scraped_dt)
mbl_listings = db.session.query(Listing) \
.filter(Listing.status == "LATER") \
.order_by(Listing.scraped_dt)
return render_template('index.html', listings={
"NEW": new_listings,
"INTERESTED": int_listings,
"APPLIED": app_listings,
"INTERVIEW": intv_listings,
"OFFER": off_listings,
"LATER": mbl_listings})
@app.route("/update-status", methods=['POST'])
def update_status():
"""
A view that receives a listing to change the status of and
return the datetime of the Listing to insertBefore
"""
# Grab listing and new status
listing = db.session.query(Listing).get(request.form['id_num'])
status = request.form['status']
# Grab new Listing list
listings = db.session.query(Listing) \
.filter(Listing.status == status) \
.order_by(Listing.scraped_dt)
# Set new status and commit change
listing.status = status
db.session.commit()
# Find next listing in order by scraped datetime
# Return as a string if it exists
for l in listings:
if(l.scraped_dt > listing.scraped_dt):
return str(l.scraped_dt)
return make_response("")
if __name__ == "__main__":
app.run()
| []
| []
| [
"APP_SETTINGS"
]
| [] | ["APP_SETTINGS"] | python | 1 | 0 | |
couchbase_v2/tests/cases/cluster_t.py | #
# Copyright 2017, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import SkipTest
from couchbase_tests.base import CouchbaseTestCase
from couchbase_core.connstr import ConnectionString
from couchbase_core.cluster import _Cluster as Cluster
from couchbase.auth import MixedAuthException, PasswordAuthenticator, ClassicAuthenticator, CertAuthenticator
import os
import warnings
from couchbase.exceptions import NetworkException, CouchbaseFatalException, CouchbaseInputException, CouchbaseException
CERT_PATH = os.getenv("PYCBC_CERT_PATH")
class ClusterTest(CouchbaseTestCase):
def _create_cluster(self):
connargs = self.make_connargs()
connstr = ConnectionString.parse(
str(connargs.pop('connection_string')))
connstr.clear_option('username')
bucket = connstr.bucket
connstr.bucket = None
password = connargs.get('password', '')
# Can I open a new bucket via open_bucket?
cluster = Cluster(connstr, bucket_factory=self.factory)
cluster.authenticate(
ClassicAuthenticator(
buckets={
bucket: password},
cluster_password=self.cluster_info.admin_password,
cluster_username=self.cluster_info.admin_username))
return cluster, bucket
def test_cluster(self):
cluster, bucket_name = self._create_cluster()
cb = cluster.open_bucket(bucket_name)
key = self.gen_key('cluster_test')
cb.upsert(key, 'cluster test')
def test_no_mixed_auth(self):
cluster, bucket_name = self._create_cluster()
auther = PasswordAuthenticator(bucket_name,
self.cluster_info.bucket_password)
cluster.authenticate(auther)
cb1 = cluster.open_bucket(bucket_name)
self.assertRaises(MixedAuthException, cluster.open_bucket, bucket_name,
password=self.cluster_info.bucket_password)
cluster2, bucket_name = self._create_cluster()
cb2 = cluster2.open_bucket(bucket_name,
password=self.cluster_info.bucket_password)
def test_PYCBC_488(self):
cluster = Cluster(
'couchbases://10.142.175.101?certpath=/Users/daschl/tmp/ks/chain.pem&keypath=/Users/daschl/tmp/ks/pkey.key')
with self.assertRaises(MixedAuthException) as maerr:
cluster.open_bucket("pixels",
password=self.cluster_info.bucket_password)
exception = maerr.exception
self.assertIsInstance(exception, MixedAuthException)
self.assertRegex(exception.message, r'.*CertAuthenticator.*password.*')
def test_PYCBC_489(self):
from couchbase_v2.cluster import Cluster
with self.assertRaises(MixedAuthException) as maerr:
cluster = Cluster(
'couchbases://10.142.175.101?certpath=/Users/daschl/tmp/ks/chain.pem&keypath=/Users/daschl/tmp/ks/pkey.key')
cb = cluster.open_bucket('pixels', password='foo')
cb.upsert('u:king_arthur',
{'name': 'Arthur',
'email': '[email protected]',
'interests': ['Holy Grail',
'African Swallows']})
exception = maerr.exception
self.assertIsInstance(exception, MixedAuthException)
self.assertRegex(
exception.message,
r'.*CertAuthenticator-style.*password.*')
def test_no_mixed_cert_auth(self):
cluster3, bucket_name = self._create_cluster()
auther_cert = CertAuthenticator(cert_path="dummy", key_path="dummy2")
cluster3.authenticate(auther_cert)
with self.assertRaises(MixedAuthException) as maerr:
cluster3.open_bucket(bucket_name,
password=self.cluster_info.bucket_password)
exception = maerr.exception
self.assertIsInstance(exception, MixedAuthException)
self.assertRegex(exception.message, r'.*CertAuthenticator.*password.*')
def _create_cluster_clean(self, authenticator):
connargs = self.make_connargs()
connstr = ConnectionString.parse(
str(connargs.pop('connection_string')))
connstr.clear_option('username')
bucket = connstr.bucket
connstr.bucket = None
password = connargs.get('password', None)
keys_to_skip = authenticator.get_credentials(bucket)['options'].keys()
for entry in keys_to_skip:
connstr.clear_option(entry)
cluster = Cluster(connstr, bucket_factory=self.factory)
cluster.authenticate(ClassicAuthenticator(buckets={bucket: password}))
return cluster, bucket
def test_cert_auth(self):
certpath = getattr(self.cluster_info, 'certpath', None)
keypath = getattr(self.cluster_info, 'keypath', None)
auther_cert = CertAuthenticator(
cert_path=certpath or "dummy",
key_path=keypath or "dummy")
cluster3, bucket_name = self._create_cluster_clean(auther_cert)
cluster3.authenticate(auther_cert)
try:
cluster3.open_bucket(bucket_name)
except CouchbaseException as e:
self.assertRegex(str(e), r'.*LCB_ERR_SSL_ERROR.*')
if self.is_realserver and certpath and keypath:
raise e
else:
raise SkipTest("SSL error but expected so skipping")
def test_pathless_connstr(self):
# Not strictly a cluster test, but relevant
connstr = ConnectionString.parse(
'couchbase://localhost?opt1=val1&opt2=val2')
self.assertTrue('opt1' in connstr.options)
self.assertTrue('opt2' in connstr.options)
def test_validate_authenticate(self):
cluster, bucket_name = self._create_cluster()
self.assertRaises(
ValueError,
cluster.authenticate,
username=None,
password=None)
self.assertRaises(
ValueError,
cluster.authenticate,
username='',
password='')
self.assertRaises(
ValueError,
cluster.authenticate,
username='username',
password=None)
self.assertRaises(
ValueError,
cluster.authenticate,
username='username',
password='')
self.assertRaises(
ValueError,
cluster.authenticate,
username=None,
password='password')
self.assertRaises(
ValueError,
cluster.authenticate,
username='',
password='password')
def test_can_authenticate_with_username_password(self):
cluster, bucket_name = self._create_cluster()
cluster.authenticate(username='Administrator', password='password')
bucket = cluster.open_bucket(bucket_name)
self.assertIsNotNone(bucket)
def _test_allow_cert_path_with_SSL_mock_errors(
self, func, *args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
if self.is_realserver and CERT_PATH:
raise
try:
raise e
except NetworkException as f:
self.assertRegex(str(e), r'.*(refused the connection).*')
except CouchbaseFatalException as f:
self.assertRegex(str(e), r'.*(SSL subsystem).*')
except CouchbaseInputException as f:
self.assertRegex(str(e), r'.*(not supported).*')
except CouchbaseException as f:
self.assertRegex(
str(e), r'.*(LCB_ERR_SSL_ERROR|LCB_ERR_SDK_FEATURE_UNAVAILABLE).*')
warnings.warn(
"Got exception {} but acceptable error for Mock with SSL+cert_path tests".format(str(e)))
def test_can_authenticate_with_cert_path_and_username_password_via_PasswordAuthenticator(
self):
cluster = Cluster(
'couchbases://{host}?certpath={certpath}'.format(host=self.cluster_info.host, certpath=CERT_PATH))
authenticator = PasswordAuthenticator(
self.cluster_info.admin_username,
self.cluster_info.admin_password)
cluster.authenticate(authenticator)
self._test_allow_cert_path_with_SSL_mock_errors(
cluster.open_bucket, self.cluster_info.bucket_name)
def test_can_authenticate_with_cert_path_and_username_password_via_ClassicAuthenticator(
self):
cluster = Cluster(
'couchbases://{host}?certpath={certpath}'.format(host=self.cluster_info.host, certpath=CERT_PATH))
authenticator = ClassicAuthenticator(buckets={self.cluster_info.bucket_name: self.cluster_info.bucket_password},
cluster_username=self.cluster_info.admin_username,
cluster_password=self.cluster_info.admin_password)
cluster.authenticate(authenticator)
self._test_allow_cert_path_with_SSL_mock_errors(
cluster.open_bucket, self.cluster_info.bucket_name)
def test_can_authenticate_with_cert_path_and_username_password_via_kwargs(
self):
cluster = Cluster(
'couchbases://{host}?certpath={certpath}'.format(host=self.cluster_info.host, certpath=CERT_PATH))
self._test_allow_cert_path_with_SSL_mock_errors(cluster.open_bucket, self.cluster_info.bucket_name,
username=self.cluster_info.admin_username,
password=self.cluster_info.admin_password)
| []
| []
| [
"PYCBC_CERT_PATH"
]
| [] | ["PYCBC_CERT_PATH"] | python | 1 | 0 | |
api/server/cmd/migrate.go | package cmd
import (
"database/sql"
"fmt"
"log"
"os"
_ "github.com/lib/pq"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
"github.com/urfave/cli/v2"
)
var (
// Number of migrations to run, it should either be empty
// or a string such as "+n" or "-n" where n is an integer
migrationsNum int
// The path from which to pick up the migrations files
migrationsPath string
migrateCommand = &cli.Command{
Name: "migrate",
Usage: "Run migrations",
Action: migrateCmd,
Flags: []cli.Flag{
&cli.IntFlag{
Name: "num",
Usage: "number of migrations to apply of the format +n" +
"or -n. Where n is an integer. Without a value all" +
"migrations will be applied.",
Required: false,
Destination: &migrationsNum,
},
&cli.StringFlag{
Name: "migrationsPath",
Usage: "number of migrations to apply of the format +n" +
"or -n. Where n is an integer. Without a value all" +
"migrations will be applied.",
Required: true,
Destination: &migrationsPath,
},
},
}
)
func migrateCmd(c *cli.Context) error {
db, err := sql.Open("postgres",
fmt.Sprintf("postgres://%s:%s@%s:%s/%s?sslmode=disable",
os.Getenv("DB_USER"),
os.Getenv("DB_PASS"),
os.Getenv("DB_HOST"),
os.Getenv("DB_PORT"),
os.Getenv("DB_NAME"),
))
if err != nil {
log.Fatalf("failed to connect db:%s", err.Error())
}
path := fmt.Sprintf("file://%s", migrationsPath)
driver, err := postgres.WithInstance(db, &postgres.Config{})
m, err := migrate.NewWithDatabaseInstance(
path,
"postgres",
driver)
if err != nil {
log.Fatalf("failed db connect: %s", err.Error())
}
if migrationsNum == 0 {
if err := m.Up(); err != nil {
if err == migrate.ErrNoChange {
log.Println("no change")
} else {
log.Fatalf("failed migration: %s", err.Error())
}
}
} else {
if err := m.Steps(migrationsNum); err != nil {
log.Fatalf("failed migration steps: %s", err.Error())
}
}
return nil
}
func init() {
register(migrateCommand)
}
| [
"\"DB_USER\"",
"\"DB_PASS\"",
"\"DB_HOST\"",
"\"DB_PORT\"",
"\"DB_NAME\""
]
| []
| [
"DB_HOST",
"DB_PORT",
"DB_NAME",
"DB_PASS",
"DB_USER"
]
| [] | ["DB_HOST", "DB_PORT", "DB_NAME", "DB_PASS", "DB_USER"] | go | 5 | 0 | |
tests/ssg_test_suite/oscap.py | #!/usr/bin/env python2
from __future__ import print_function
import logging
import os.path
import re
import collections
import xml.etree.ElementTree
import json
from ssg_test_suite.log import LogHelper
from ssg_test_suite import test_env
from ssg_test_suite import common
logging.getLogger(__name__).addHandler(logging.NullHandler())
_CONTEXT_RETURN_CODES = {'pass': 0,
'fail': 2,
'error': 1,
'notapplicable': 0,
'fixed': 0}
_ANSIBLE_TEMPLATE = 'urn:xccdf:fix:script:ansible'
_BASH_TEMPLATE = 'urn:xccdf:fix:script:sh'
_XCCDF_NS = 'http://checklists.nist.gov/xccdf/1.2'
def analysis_to_serializable(analysis):
result = dict(analysis)
for key, value in analysis.items():
if type(value) == set:
result[key] = tuple(value)
return result
def save_analysis_to_json(analysis, output_fname):
analysis2 = analysis_to_serializable(analysis)
with open(output_fname, "w") as f:
json.dump(analysis2, f)
def triage_xml_results(fname):
tree = xml.etree.ElementTree.parse(fname)
all_xml_results = tree.findall(".//{%s}rule-result" % _XCCDF_NS)
triaged = collections.defaultdict(set)
for result in list(all_xml_results):
idref = result.get("idref")
status = result.find("{%s}result" % _XCCDF_NS).text
triaged[status].add(idref)
return triaged
def send_files_remote(verbose_path, remote_dir, domain_ip, *files):
"""Upload files to VM."""
# files is a list of absolute paths on the host
success = True
destination = 'root@{0}:{1}'.format(domain_ip, remote_dir)
files_string = ' '.join(files)
logging.debug('Uploading files {0} to {1}'.format(files_string,
destination))
command = ['scp'] + common.IGNORE_KNOWN_HOSTS_OPTIONS + list(files) + [destination]
if common.run_cmd_local(command, verbose_path)[0] != 0:
logging.error('Failed to upload files {0}'.format(files_string))
success = False
return success
def get_file_remote(verbose_path, local_dir, domain_ip, remote_path):
"""Download a file from VM."""
# remote_path is an absolute path of a file on remote machine
success = True
source = 'root@{0}:{1}'.format(domain_ip, remote_path)
logging.debug('Downloading file {0} to {1}'
.format(source, local_dir))
command = ['scp'] + common.IGNORE_KNOWN_HOSTS_OPTIONS + [source, local_dir]
if common.run_cmd_local(command, verbose_path)[0] != 0:
logging.error('Failed to download file {0}'.format(remote_path))
success = False
return success
def find_result_id_in_output(output):
match = re.search('result id.*$', output, re.IGNORECASE | re.MULTILINE)
if match is None:
return None
# Return the right most word of the match which is the result id.
return match.group(0).split()[-1]
def ansible_playbook_set_hosts(playbook):
"""Updates ansible playbok to apply to all hosts."""
with open(playbook, 'r') as f:
lines = f.readlines()
lines.insert(1, ' - hosts: all\n')
with open(playbook, 'w') as f:
for line in lines:
f.write(line)
def get_result_id_from_arf(arf_path, verbose_path):
command = ['oscap', 'info', arf_path]
command_string = ' '.join(command)
returncode, output = common.run_cmd_local(command, verbose_path)
if returncode != 0:
raise RuntimeError('{0} returned {1} exit code'.
format(command_string, returncode))
res_id = find_result_id_in_output(output)
if res_id is None:
raise RuntimeError('Failed to find result ID in {0}'
.format(arf_path))
return res_id
def generate_fixes_remotely(formatting, verbose_path):
command_base = ['oscap', 'xccdf', 'generate', 'fix']
command_options = [
'--benchmark-id', formatting['benchmark_id'],
'--profile', formatting['profile'],
'--template', formatting['output_template'],
'--output', '/{output_file}'.format(** formatting),
]
command_operands = ['/{arf_file}'.format(** formatting)]
if 'result_id' in formatting:
command_options.extend(['--result-id', formatting['result_id']])
command_string = ' '.join(command_base + command_options + command_operands)
rc, stdout = common.run_cmd_remote(
command_string, formatting['domain_ip'], verbose_path)
if rc != 0:
msg = ('Command {0} ended with return code {1} (expected 0).'
.format(command_string, rc))
raise RuntimeError(msg)
def run_stage_remediation_ansible(run_type, formatting, verbose_path):
"""
Returns False on error, or True in case of successful bash scripts
run."""
formatting['output_template'] = _ANSIBLE_TEMPLATE
send_arf_to_remote_machine_and_generate_remediations_there(
run_type, formatting, verbose_path)
if not get_file_remote(verbose_path, LogHelper.LOG_DIR,
formatting['domain_ip'],
'/' + formatting['output_file']):
return False
ansible_playbook_set_hosts(formatting['playbook'])
command = (
'ansible-playbook', '-i', '{0},'.format(formatting['domain_ip']),
'-u' 'root', formatting['playbook'])
command_string = ' '.join(command)
returncode, output = common.run_cmd_local(command, verbose_path)
# Appends output of ansible-playbook to the verbose_path file.
with open(verbose_path, 'a') as f:
f.write('Stdout of "{}":'.format(command_string))
f.write(output)
if returncode != 0:
msg = (
'Ansible playbook remediation run has '
'exited with return code {} instead of expected 0'
.format(returncode))
LogHelper.preload_log(logging.ERROR, msg, 'fail')
return False
return True
def run_stage_remediation_bash(run_type, formatting, verbose_path):
"""
Returns False on error, or True in case of successful Ansible playbook
run."""
formatting['output_template'] = _BASH_TEMPLATE
send_arf_to_remote_machine_and_generate_remediations_there(
run_type, formatting, verbose_path)
if not get_file_remote(verbose_path, LogHelper.LOG_DIR,
formatting['domain_ip'],
'/' + formatting['output_file']):
return False
command_string = '/bin/bash /{output_file}'.format(** formatting)
returncode, output = common.run_cmd_remote(
command_string, formatting['domain_ip'], verbose_path)
# Appends output of script execution to the verbose_path file.
with open(verbose_path, 'a') as f:
f.write('Stdout of "{}":'.format(command_string))
f.write(output)
if returncode != 0:
msg = (
'Bash script remediation run has exited with return code {} '
'instead of expected 0'.format(returncode))
LogHelper.preload_log(logging.ERROR, msg, 'fail')
return False
return True
def send_arf_to_remote_machine_and_generate_remediations_there(
run_type, formatting, verbose_path):
if run_type == 'rule':
try:
res_id = get_result_id_from_arf(formatting['arf'], verbose_path)
except Exception as exc:
logging.error(str(exc))
return False
formatting['result_id'] = res_id
if not send_files_remote(
verbose_path, '/', formatting['domain_ip'], formatting['arf']):
return False
try:
generate_fixes_remotely(formatting, verbose_path)
except Exception as exc:
logging.error(str(exc))
return False
class GenericRunner(object):
def __init__(self, environment, profile, datastream, benchmark_id):
self.environment = environment
self.profile = profile
self.datastream = datastream
self.benchmark_id = benchmark_id
self.arf_file = ''
self.arf_path = ''
self.verbose_path = ''
self.report_path = ''
self.results_path = ''
self.stage = 'undefined'
self.clean_files = False
self._filenames_to_clean_afterwards = set()
self.command_base = []
self.command_options = []
self.command_operands = []
def _make_arf_path(self):
self.arf_file = self._get_arf_file()
self.arf_path = os.path.join(LogHelper.LOG_DIR, self.arf_file)
def _get_arf_file(self):
raise NotImplementedError()
def _make_verbose_path(self):
verbose_file = self._get_verbose_file()
verbose_path = os.path.join(LogHelper.LOG_DIR, verbose_file)
self.verbose_path = LogHelper.find_name(verbose_path, '.verbose.log')
def _get_verbose_file(self):
raise NotImplementedError()
def _make_report_path(self):
report_file = self._get_report_file()
report_path = os.path.join(LogHelper.LOG_DIR, report_file)
self.report_path = LogHelper.find_name(report_path, '.html')
def _get_report_file(self):
raise NotImplementedError()
def _make_results_path(self):
results_file = self._get_results_file()
results_path = os.path.join(LogHelper.LOG_DIR, results_file)
self.results_path = LogHelper.find_name(results_path, '.xml')
def _get_results_file(self):
raise NotImplementedError()
def _generate_report_file(self):
self.command_options.extend([
'--report', self.report_path,
])
self._filenames_to_clean_afterwards.add(self.report_path)
def prepare_online_scanning_arguments(self):
self.command_options.extend([
'--benchmark-id', self.benchmark_id,
'--profile', self.profile,
'--verbose', 'DEVEL',
'--progress', '--oval-results',
])
self.command_operands.append(self.datastream)
def run_stage(self, stage):
self.stage = stage
self._make_verbose_path()
self._make_report_path()
self._make_arf_path()
self._make_results_path()
self.command_base = []
self.command_options = []
self.command_operands = []
result = None
if stage == 'initial':
result = self.initial()
elif stage == 'remediation':
result = self.remediation()
elif stage == 'final':
result = self.final()
else:
raise RuntimeError('Unknown stage: {}.'.format(stage))
if self.clean_files:
for fname in tuple(self._filenames_to_clean_afterwards):
try:
os.remove(fname)
except OSError as exc:
logging.error(
"Failed to cleanup file '{0}'"
.format(fname))
finally:
self._filenames_to_clean_afterwards.remove(fname)
if result:
LogHelper.log_preloaded('pass')
else:
LogHelper.log_preloaded('fail')
return result
@property
def get_command(self):
return self.command_base + self.command_options + self.command_operands
def make_oscap_call(self):
raise NotImplementedError()
def initial(self):
self.command_options += ['--results', self.results_path]
result = self.make_oscap_call()
return result
def remediation(self):
raise NotImplementedError()
def final(self):
self.command_options += ['--results', self.results_path]
result = self.make_oscap_call()
return result
def analyze(self, stage):
triaged_results = triage_xml_results(self.results_path)
triaged_results["stage"] = stage
triaged_results["runner"] = self.__class__.__name__
return triaged_results
def _get_formatting_dict_for_remediation(self):
formatting = {
'domain_ip': self.environment.domain_ip,
'profile': self.profile,
'datastream': self.datastream,
'benchmark_id': self.benchmark_id
}
formatting['arf'] = self.arf_path
formatting['arf_file'] = self.arf_file
return formatting
class ProfileRunner(GenericRunner):
def _get_arf_file(self):
return '{0}-initial-arf.xml'.format(self.profile)
def _get_verbose_file(self):
return '{0}-{1}'.format(self.profile, self.stage)
def _get_report_file(self):
return '{0}-{1}'.format(self.profile, self.stage)
def _get_results_file(self):
return '{0}-{1}-results'.format(self.profile, self.stage)
def make_oscap_call(self):
self.prepare_online_scanning_arguments()
self._generate_report_file()
env = dict(SSH_ADDITIONAL_OPTIONS=" ".join(common.IGNORE_KNOWN_HOSTS_OPTIONS))
env.update(os.environ)
returncode = common.run_cmd_local(self.get_command, self.verbose_path, env=env)[0]
if returncode not in [0, 2]:
logging.error(('Profile run should end with return code 0 or 2 '
'not "{0}" as it did!').format(returncode))
return False
return True
class RuleRunner(GenericRunner):
def __init__(
self, environment, profile, datastream, benchmark_id,
rule_id, script_name, dont_clean):
super(RuleRunner, self).__init__(
environment, profile, datastream, benchmark_id,
)
self.rule_id = rule_id
self.context = None
self.script_name = script_name
self.clean_files = not dont_clean
self._oscap_output = ''
def _get_arf_file(self):
return '{0}-initial-arf.xml'.format(self.rule_id)
def _get_verbose_file(self):
return '{0}-{1}-{2}'.format(self.rule_id, self.script_name, self.stage)
def _get_report_file(self):
return '{0}-{1}-{2}'.format(self.rule_id, self.script_name, self.stage)
def _get_results_file(self):
return '{0}-{1}-{2}-results-{3}'.format(
self.rule_id, self.script_name, self.profile, self.stage)
def make_oscap_call(self):
self.prepare_online_scanning_arguments()
self._generate_report_file()
self.command_options.extend(
['--rule', self.rule_id])
returncode, self._oscap_output = self.environment.scan(
self.command_options + self.command_operands, self.verbose_path)
expected_return_code = _CONTEXT_RETURN_CODES[self.context]
if returncode != expected_return_code:
msg = (
'Scan has exited with return code {0}, '
'instead of expected {1} during stage {2}'
.format(returncode, expected_return_code, self.stage)
)
LogHelper.preload_log(logging.ERROR, msg, 'fail')
return False
return True
def final(self):
success = super(RuleRunner, self).final()
success = success and self._analyze_output_of_oscap_call()
return success
def _analyze_output_of_oscap_call(self):
local_success = True
# check expected result
actual_results = re.findall('{0}:(.*)$'.format(self.rule_id),
self._oscap_output,
re.MULTILINE)
if actual_results:
if self.context not in actual_results:
LogHelper.preload_log(logging.ERROR,
('Rule result should have been '
'"{0}", but is "{1}"!'
).format(self.context,
', '.join(actual_results)),
'fail')
local_success = False
else:
msg = (
'Rule {0} has not been evaluated! Wrong profile selected?'
.format(self.rule_id))
LogHelper.preload_log(logging.ERROR, msg, 'fail')
local_success = False
return local_success
def _get_formatting_dict_for_remediation(self):
fmt = super(RuleRunner, self)._get_formatting_dict_for_remediation()
fmt['rule_id'] = self.rule_id
return fmt
def run_stage_with_context(self, stage, context):
self.context = context
return self.run_stage(stage)
class OscapProfileRunner(ProfileRunner):
def remediation(self):
self.command_options += ['--remediate']
return self.make_oscap_call()
class AnsibleProfileRunner(ProfileRunner):
def initial(self):
self.command_options += ['--results-arf', self.arf_path]
return super(AnsibleProfileRunner, self).initial()
def remediation(self):
formatting = self._get_formatting_dict_for_remediation()
formatting['output_file'] = '{0}.yml'.format(self.profile)
formatting['playbook'] = os.path.join(LogHelper.LOG_DIR,
formatting['output_file'])
return run_stage_remediation_ansible('profile', formatting, self.verbose_path)
class BashProfileRunner(ProfileRunner):
def initial(self):
self.command_options += ['--results-arf', self.arf_path]
return super(BashProfileRunner, self).initial()
def remediation(self):
formatting = self._get_formatting_dict_for_remediation()
formatting['output_file'] = '{0}.sh'.format(self.profile)
return run_stage_remediation_bash('profile', formatting, self.verbose_path)
class OscapRuleRunner(RuleRunner):
def remediation(self):
self.command_options += ['--remediate']
return self.make_oscap_call()
class BashRuleRunner(RuleRunner):
def initial(self):
self.command_options += ['--results-arf', self.arf_path]
return super(BashRuleRunner, self).initial()
def remediation(self):
formatting = self._get_formatting_dict_for_remediation()
formatting['output_file'] = '{0}.sh'.format(self.rule_id)
success = run_stage_remediation_bash('rule', formatting, self.verbose_path)
return success
class AnsibleRuleRunner(RuleRunner):
def initial(self):
self.command_options += ['--results-arf', self.arf_path]
return super(AnsibleRuleRunner, self).initial()
def remediation(self):
formatting = self._get_formatting_dict_for_remediation()
formatting['output_file'] = '{0}.yml'.format(self.rule_id)
formatting['playbook'] = os.path.join(LogHelper.LOG_DIR,
formatting['output_file'])
success = run_stage_remediation_ansible('rule', formatting, self.verbose_path)
return success
class Checker(object):
def __init__(self, test_env):
self.test_env = test_env
self.executed_tests = 0
self.datastream = ""
self.benchmark_id = ""
self.remediate_using = ""
def test_target(self, target):
self.start()
try:
self._test_target(target)
except KeyboardInterrupt:
logging.info("Terminating the test run due to keyboard interrupt.")
except RuntimeError as exc:
logging.error("Terminating due to error: {msg}.".format(msg=str(exc)))
finally:
self.finalize()
def run_test_for_all_profiles(self, profiles, test_data=None):
if len(profiles) > 1:
with test_env.SavedState.create_from_environment(self.test_env, "prepared") as state:
args_list = [(p, test_data) for p in profiles]
state.map_on_top(self._run_test, args_list)
elif profiles:
self._run_test(profiles[0], test_data)
def _test_target(self, target):
raise NotImplementedError()
def start(self):
self.executed_tests = 0
try:
self.test_env.start()
except Exception as exc:
msg = ("Failed to start test environment '{0}': {1}"
.format(self.test_env.name, str(exc)))
raise RuntimeError(msg)
def finalize(self):
if not self.executed_tests:
logging.error("Nothing has been tested!")
try:
self.test_env.finalize()
except Exception as exc:
msg = ("Failed to finalize test environment '{0}': {1}"
.format(self.test_env.name, str(exc)))
raise RuntimeError(msg)
REMEDIATION_PROFILE_RUNNERS = {
'oscap': OscapProfileRunner,
'bash': BashProfileRunner,
'ansible': AnsibleProfileRunner,
}
REMEDIATION_RULE_RUNNERS = {
'oscap': OscapRuleRunner,
'bash': BashRuleRunner,
'ansible': AnsibleRuleRunner,
}
REMEDIATION_RUNNER_TO_REMEDIATION_MEANS = {
'oscap': 'bash',
'bash': 'bash',
'ansible': 'ansible',
}
| []
| []
| []
| [] | [] | python | 0 | 0 | |
main_test.go | package main
import (
"context"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
)
var db *pgxpool.Pool
var dbsetup = false
func TestMain(m *testing.M) {
viper.Set("DbConnection", os.Getenv("TEST_DATABASE_URL"))
db, err := DbConnect()
if err != nil {
os.Exit(1)
}
sql := "CREATE EXTENSION IF NOT EXISTS postgis"
_, err = db.Exec(context.Background(), sql)
if err != nil {
os.Exit(1)
}
dbsetup = true
os.Exit(m.Run())
}
func TestDBNoTables(t *testing.T) {
if !dbsetup {
t.Skip("DB integration test suite setup failed, skipping")
}
r := TileRouter()
request, _ := http.NewRequest("GET", "/index.json", nil)
response := httptest.NewRecorder()
r.ServeHTTP(response, request)
assert.Equal(t, 200, response.Code, "OK response is expected")
json_result := strings.TrimSpace(response.Body.String())
json_expect := "{}"
assert.Equal(t, json_expect, json_result, "empty json response is expected")
}
| [
"\"TEST_DATABASE_URL\""
]
| []
| [
"TEST_DATABASE_URL"
]
| [] | ["TEST_DATABASE_URL"] | go | 1 | 0 | |
docusign_test.go | // Copyright 2015 James Cote and Liberty Fund, Inc.
// All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// docusign implements a service to use the version 2 Docusign
// rest api. Api documentation may be found at:
// https://www.docusign.com/p/RESTAPIGuide/RESTAPIGuide.htm
// You must define an environment variables for the test to run properly.
// The necessary variables are:
// DOCUSIGN_HOST=XXXXXX (set to 'demo.docusign.net' for non-prod testing)
// DOCUSIGN_USERNAME=XXXXXXXXXX
// DOCUSIGN_PASSWORD=XXXXXXXXXXx
// DOCUSIGN_ACCTID=XXXXXX
// DOCUSING_INT_KEY=XXXX-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
// DOCUSIGN_TESTENVID=XXXXXXXXX
// DOCUSIGN_TEMPLATEID=XxxXXXXXX
//
//
// If you wish to skip generating an oauth2 token, you may define an environment
// variable named DOCUSIGN_TOKEN which contains an existing token.
//
// A draft envelope will be created in the Docusign demo environment with the subject "Created by Go Test".
package docusign
import (
"bytes"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"mime/multipart"
"net/http"
"os"
"testing"
"time"
"golang.org/x/net/context"
)
var testEnvId string
var testTemplateId string
var testCtx context.Context
func TestCalls(t *testing.T) {
ctx := context.WithValue(context.Background(), HTTPClient, http.DefaultClient)
testEnvId = os.Getenv("DOCUSIGN_TESTENVID")
testTemplateId = os.Getenv("DOCUSIGN_TEMPLATEID")
testToken := os.Getenv("DOCUSIGN_TOKEN")
testTemplateId = "321d2832-1244-48f7-a6db-949c2cd319c0"
cfg := &Config{
UserName: os.Getenv("DOCUSIGN_USERNAME"),
Password: os.Getenv("DOCUSIGN_PASSWORD"),
IntegratorKey: os.Getenv("DOCUSIGN_APIKEY"),
AccountId: os.Getenv("DOCUSIGN_ACCTID"),
Host: os.Getenv("DOCUSIGN_HOST"),
}
cfg.UserName = "0ba0d798-49ca-43c3-88dc-840d6bcb37af"
cfg.Password = "1DLfrdLa/68U4uzty+pAhM3TUTg="
if cfg.UserName == "" || cfg.Password == "" || cfg.IntegratorKey == "" || cfg.AccountId == "" {
t.Errorf("Invalid Config")
return
}
var err error
var c *OauthCredential
if testToken > "" {
c = &OauthCredential{AccessToken: testToken, AccountId: cfg.AccountId, Host: cfg.Host}
} else {
c, err = cfg.OauthCredential(ctx)
if err != nil {
t.Errorf("Ouauth2 token fail: %v", err)
return
}
t.Logf("Token: %#v\n", c)
defer func() {
if err := c.Revoke(ctx); err != nil {
t.Errorf("Revoke token failed: %v", err)
}
}()
}
sv := New(c, "")
_, err = sv.GetTemplate(ctx, testTemplateId)
if err != nil {
t.Errorf("GetTemplate: %v", err)
return
}
r, err := sv.TemplateSearch(ctx)
if err != nil {
t.Errorf("TemplateSearch: %v", err)
return
}
for _, et := range r.EnvelopeTemplates {
t.Logf("%s %s", et.TemplateId, et.Name)
}
// Get Draft Folder
var draftFolder string
fl, err := sv.FolderList(ctx, FolderTemplatesInclude)
if err != nil {
t.Errorf("GetFolderList: %v", err)
return
}
for _, fd := range fl.Folders {
if fd.Name == "Draft" {
draftFolder = fd.FolderId
}
}
if draftFolder == "" {
t.Errorf("Unable to find Draft folder")
return
}
_, err = sv.AccountCustomFields(ctx)
if err != nil {
t.Errorf("AccountCustomFields error: %v", err)
return
}
_, err = sv.EnvelopeStatusChanges(ctx, StatusChangeToDate(time.Now()), StatusChangeFromDate(time.Now().AddDate(0, 0, -1)),
StatusChangeStatusCode("created"), StatusChangeFromToStatus("created"), StatusChangeCustomField("PID", "123456"))
//(time.Now().Add(time.Hour*24*-30)), StatusChangeToDate(time.Now()))
if err != nil {
t.Errorf("EnvelopeStatusChanges error: %v", err)
return
}
_, err = sv.EnvelopeSearch(ctx, SearchFolderDrafts, EnvelopeSearchCount(3), EnvelopeSearchFromDate(time.Now().AddDate(0, -1, 0)),
EnvelopeSearchToDate(time.Now()), EnvelopeSearchIncludeRecipients)
if err != nil {
t.Errorf("EnvelopeSearch error: %v", err)
return
}
testEnv := testEnvelopePayload(cfg.UserName)
file, err := os.Open("testdata/TestDocument.pdf")
if err != nil {
t.Errorf("Unable to open TestDocument.pdf: %v", err)
}
defer file.Close()
u := &UploadFile{
ContentType: "application/pdf",
FileName: "TestData.pdf",
Id: "1",
Data: file,
}
ex, err := sv.EnvelopeCreate(ctx, testEnv, u)
if err != nil {
t.Errorf("CreateEnvelope: %v", err)
return
}
testEnvId = ex.EnvelopeId
t.Logf("Envelope: %s", testEnvId)
return
aTab := &Tabs{
SignerAttachmentTabs: []SignerAttachmentTab{
SignerAttachmentTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "attTab",
},
BasePosTab: BasePosTab{
AnchorString: "SignatureA:",
AnchorXOffset: "240",
AnchorYOffset: "10",
AnchorUnits: "pixels",
PageNumber: "1",
TabId: "9985fd9a-a660-4ff3-983d-eb43706d496d",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "1",
},
Optional: true,
},
},
TextTabs: []TextTab{
TextTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "deleteThisTab",
},
BasePosTab: BasePosTab{
PageNumber: "1",
XPosition: "300",
YPosition: "350",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "1",
},
},
},
}
aTab, err = sv.RecipientTabsAdd(ctx, testEnvId, "1", aTab)
if err != nil {
t.Errorf("Add Tabs error: %v", err)
return
}
var deleteTabId string
if len(aTab.TextTabs) == 1 {
deleteTabId = aTab.TextTabs[0].TabId
}
recList, err := sv.Recipients(ctx, testEnvId, RecipientsIncludeTabs)
if err != nil {
t.Errorf("GetRecipients error: %v\n", err)
return
}
if recList == nil || len(recList.Signers) != 2 {
t.Errorf("Invalid recipients returned.")
return
}
mTabs := &Tabs{
RadioGroupTabs: recList.Signers[1].Tabs.RadioGroupTabs,
ListTabs: recList.Signers[1].Tabs.ListTabs,
TextTabs: []TextTab{
TextTab{Value: "ASFDAFD", BasePosTab: BasePosTab{TabId: "e611bf5f-339c-4ed0-8c71-87ec7f77fdc5"}},
},
}
for i, rd := range mTabs.RadioGroupTabs[0].Radios {
if rd.Value == "val2" {
mTabs.RadioGroupTabs[0].Radios[i].Selected = true
} else {
mTabs.RadioGroupTabs[0].Radios[i].Selected = false
}
}
for i, li := range mTabs.ListTabs[0].ListItems {
xval := DSBool(false)
if li.Value == "Y" {
xval = true
}
mTabs.ListTabs[0].ListItems[i].Selected = xval
}
mTabs.ListTabs[0].Value = "Y Val"
mTabs, err = sv.RecipientTabsModify(ctx, testEnvId, "2", mTabs)
if err != nil {
t.Errorf("Modify Tabs Error: %v", err)
return
}
if len(mTabs.TextTabs) != 1 || mTabs.TextTabs[0].ErrorDetails == nil {
t.Errorf("Wanted INVALID_TAB_OPERATION on TextTab[0]; got nil")
return
}
rTabs := &Tabs{
TextTabs: []TextTab{
TextTab{
BasePosTab: BasePosTab{
TabId: deleteTabId,
},
},
},
}
rTabs, err = sv.RecipientTabsRemove(ctx, testEnvId, "1", rTabs)
if err != nil {
t.Errorf("Error Deleting Tab: %v", err)
return
}
newRecipients := &RecipientList{
Signers: []Signer{
Signer{
EmailRecipient: EmailRecipient{
Email: "[email protected]",
Recipient: Recipient{
Name: "Extra Name",
Note: "This is the ,Note for Extra Name",
EmailNotification: &EmailNotification{EmailBody: "This is the recipient 3 email blurb", EmailSubject: "This is the Subject for recipient 3"},
RecipientId: "3",
RoleName: "Role3",
RoutingOrder: "6",
},
},
},
},
CarbonCopies: []CarbonCopy{
CarbonCopy{
EmailRecipient: EmailRecipient{
Email: "[email protected]",
Recipient: Recipient{
Name: "CC Name",
Note: "This is the ,Note for CCName",
EmailNotification: &EmailNotification{EmailBody: "This is the recipient 4 email blurb", EmailSubject: "This is the Subject for recipient 4"},
RecipientId: "4",
RoleName: "Role4",
RoutingOrder: "5",
},
},
},
},
}
newRecipients, err = sv.RecipientsAdd(ctx, testEnvId, newRecipients)
if err != nil {
t.Errorf("Recipients Add Error: %v", err)
return
}
for i := range newRecipients.Signers {
if newRecipients.Signers[i].RecipientId == "3" {
newRecipients.Signers[i].Name = "Modified Name"
}
}
modRec, err := sv.RecipientsModify(ctx, testEnvId, newRecipients)
if err != nil {
t.Errorf("Recipients Modify Error: %v", err)
return
}
for _, rur := range modRec.recipientUpdateResults {
if rur.ErrorDetails != nil && rur.ErrorDetails.Err == "SUCCESS" {
continue
}
t.Errorf("RecipientsModify error: %v", rur.ErrorDetails)
return
}
return
}
func testEnvelopePayload(userName string) *Envelope {
return &Envelope{
Status: "created",
CustomFields: &CustomFieldList{
TextCustomFields: []CustomField{
CustomField{Name: "PID", Value: "123456"},
CustomField{Name: "Project", Value: "P1"},
},
},
Documents: []Document{
Document{
DocumentFields: []NmVal{
NmVal{Name: "Pid", Value: "122312"},
NmVal{Name: "DocType", Value: "TestDoc"},
},
DocumentId: "1",
Name: "TestDoc.pdf",
Order: "1",
},
},
EmailSubject: "Created by Go Test",
EmailBlurb: "Dear Person: Please read <strong>this</strong>.",
Recipients: &RecipientList{
Signers: []Signer{
Signer{
EmailRecipient: EmailRecipient{
Email: userName,
Recipient: Recipient{
Name: "My Name",
Note: "This is the ,Note for My Name",
EmailNotification: &EmailNotification{EmailBody: "This is the recipient 1 email blurb", EmailSubject: "This is the Subject for recipient 1"},
RecipientId: "1",
RoleName: "Role1",
RoutingOrder: "1",
},
},
BaseSigner: BaseSigner{
Tabs: &Tabs{
TextTabs: []TextTab{
TextTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "txtTextFieldA",
},
BasePosTab: BasePosTab{
AnchorString: "TextFieldA:",
AnchorXOffset: "40",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "1",
},
Value: "Value 1",
},
},
SignHereTabs: []SignHereTab{
SignHereTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "signHereA",
},
BasePosTab: BasePosTab{
AnchorString: "SignatureA:",
AnchorXOffset: "40",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "1",
},
},
},
DateSignedTabs: []DateSignedTab{
DateSignedTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "dtSignedA",
},
BasePosTab: BasePosTab{
AnchorString: "DateSignedA:",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "1",
},
},
},
},
},
},
Signer{
EmailRecipient: EmailRecipient{
Email: "[email protected]",
Recipient: Recipient{
Name: "XXX YYYY",
Note: "Note for Recipient 2",
EmailNotification: &EmailNotification{EmailBody: "This is the recipient 2 email blurb", EmailSubject: "This is the Subject for recipient 2"},
RecipientId: "2",
RoleName: "Role2",
RoutingOrder: "2",
},
},
BaseSigner: BaseSigner{
Tabs: &Tabs{
TextTabs: []TextTab{
TextTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "txtTextFieldB",
},
BasePosTab: BasePosTab{
AnchorString: "TextFieldB:",
AnchorXOffset: "40",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "2",
},
Value: "Value 2",
},
},
SignHereTabs: []SignHereTab{
SignHereTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "signHereA",
},
BasePosTab: BasePosTab{
AnchorString: "SignatureB:",
AnchorXOffset: "40",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "2",
},
},
},
DateSignedTabs: []DateSignedTab{
DateSignedTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "dtSignedB",
},
BasePosTab: BasePosTab{
AnchorString: "DateSignedB:",
AnchorXOffset: "40",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "2",
},
},
},
CheckboxTabs: []CheckboxTab{
CheckboxTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "cbTest",
},
BasePosTab: BasePosTab{
AnchorString: "Checkbox:",
AnchorXOffset: "40",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "2",
},
Selected: true,
},
},
RadioGroupTabs: []RadioGroupTab{
RadioGroupTab{
GroupName: "rbGrp",
RecipientID: "2",
DocumentID: "1",
Radios: []Radio{
Radio{
BasePosTab: BasePosTab{
AnchorString: "rbA",
AnchorXOffset: "28",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
Selected: false,
Value: "val1",
},
Radio{
BasePosTab: BasePosTab{
AnchorString: "rbB",
AnchorXOffset: "28",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
Selected: true,
Value: "val2",
},
Radio{
BasePosTab: BasePosTab{
AnchorString: "rbC",
AnchorXOffset: "28",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
Selected: false,
Value: "val3",
},
},
},
},
ListTabs: []ListTab{
ListTab{
BaseTab: BaseTab{
DocumentID: "1",
TabLabel: "dlDrop",
},
BasePosTab: BasePosTab{
AnchorString: "DropdownList:",
AnchorXOffset: "40",
AnchorYOffset: "-7",
AnchorUnits: "pixels",
PageNumber: "1",
},
BaseTemplateTab: BaseTemplateTab{
RecipientID: "2",
},
//Value: "X",
ListItems: []ListItem{
ListItem{
Selected: true,
Text: "X Val",
Value: "X",
},
ListItem{
Selected: false,
Text: "Y Val",
Value: "Y",
},
ListItem{
Selected: false,
Text: "Z Val",
Value: "Z",
},
},
},
},
},
},
},
},
},
}
}
func TestXML(t *testing.T) {
_ = bytes.NewBufferString("")
f, err := os.Open("testdata/connect.xml")
if err != nil {
t.Fatalf("Open Connect.xml: %v", err)
return
}
var v *ConnectData = &ConnectData{}
decoder := xml.NewDecoder(f)
err = decoder.Decode(v)
if err != nil {
t.Fatalf("XML Decode: %v", err)
return
}
if v.EnvelopeStatus.DocumentStatuses[0].Name != "Docusign1.pdf" {
t.Errorf("Invalid Document Name in Connect XML: %s", v.EnvelopeStatus.DocumentStatuses[0].Name)
}
return
}
func TestMultiBody(t *testing.T) {
var payload struct {
A string `json:"a,omitempty"`
B int `json:"b,omitempty"`
}
payload.A = "A"
payload.B = 999
files := []*UploadFile{
&UploadFile{Data: newReadCloser("XXXX"), ContentType: "text/plain", FileName: "fn1", Id: "1"},
&UploadFile{Data: newReadCloser("XXXX"), ContentType: "text/plain", FileName: "fn2", Id: "2"},
&UploadFile{Data: newReadCloser("XXXX"), ContentType: "text/plain", FileName: "fn3", Id: "3"},
}
r, ct := multiBody(payload, files)
defer r.(io.ReadCloser).Close()
mpr := multipart.NewReader(r, ct[30:])
pt, err := mpr.NextPart()
if err != nil {
t.Errorf("Unable to parse part from multireader: %v", err)
return
}
payload.A = ""
payload.B = 0
if err := json.NewDecoder(pt).Decode(&payload); err != nil {
t.Errorf("JSON Unmarshal: %v", err)
return
} else {
if payload.A != "A" || payload.B != 999 {
t.Errorf("Expect A=A and B=999; got %s %d", payload.A, payload.B)
return
}
}
for cnt := 0; cnt < len(files); cnt++ {
if pt, err = mpr.NextPart(); err != nil {
t.Errorf("Unable to parse multipart reader: %v", err)
return
}
if pt.Header.Get("content-disposition") != fmt.Sprintf("file; filename=\"%s\";documentid=%s", files[cnt].FileName, files[cnt].Id) {
t.Errorf("Invalid content-dispostion: %s", pt.Header.Get("content-dispostion"))
}
bx := make([]byte, 4)
if _, err = pt.Read(bx); err != nil {
t.Errorf("Expected EOF: got %v", err)
} else if string(bx) != "XXXX" {
t.Errorf("expectd XXXX; got %s", string(bx))
}
}
}
func newReadCloser(s string) io.ReadCloser {
return byteReadCloser{Buffer: bytes.NewBufferString(s)}
}
type byteReadCloser struct {
*bytes.Buffer
}
func (b byteReadCloser) Close() error {
return nil
}
| [
"\"DOCUSIGN_TESTENVID\"",
"\"DOCUSIGN_TEMPLATEID\"",
"\"DOCUSIGN_TOKEN\"",
"\"DOCUSIGN_USERNAME\"",
"\"DOCUSIGN_PASSWORD\"",
"\"DOCUSIGN_APIKEY\"",
"\"DOCUSIGN_ACCTID\"",
"\"DOCUSIGN_HOST\""
]
| []
| [
"DOCUSIGN_HOST",
"DOCUSIGN_USERNAME",
"DOCUSIGN_TOKEN",
"DOCUSIGN_TEMPLATEID",
"DOCUSIGN_ACCTID",
"DOCUSIGN_PASSWORD",
"DOCUSIGN_APIKEY",
"DOCUSIGN_TESTENVID"
]
| [] | ["DOCUSIGN_HOST", "DOCUSIGN_USERNAME", "DOCUSIGN_TOKEN", "DOCUSIGN_TEMPLATEID", "DOCUSIGN_ACCTID", "DOCUSIGN_PASSWORD", "DOCUSIGN_APIKEY", "DOCUSIGN_TESTENVID"] | go | 8 | 0 | |
mysite/mysite/wsgi.py | """
WSGI config for mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.production")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/gomfweb/main.go | // Copyright (c) 2015 Andy Leap, Google
// SPDX-License-Identifier: MIT
// The gomfweb command runs a simple web server that demonstrates the use of
// the go microformats library. It can parse the microformats found at a URL
// or in a provided snippet of HTML.
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"html/template"
"io"
"log"
"net/http"
"net/url"
"os"
"strings"
"willnorris.com/go/microformats"
)
var addr = flag.String("addr", ":4001", "Address and port to listen on")
func main() {
flag.Parse()
http.Handle("/", http.HandlerFunc(index))
if port := os.Getenv("PORT"); port != "" {
*addr = ":" + port
}
fmt.Printf("gomfweb listening on %s\n", *addr)
log.Fatal(http.ListenAndServe(*addr, nil))
}
func index(w http.ResponseWriter, r *http.Request) {
var parsedURL *url.URL
var err error
u := strings.TrimSpace(r.FormValue("url"))
if u != "" {
parsedURL, err = url.Parse(u)
if err != nil {
http.Error(w, fmt.Sprintf("error parsing url: %v", err), http.StatusBadRequest)
}
}
buf := new(bytes.Buffer)
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
enc.SetIndent("", " ")
if r.Method == "GET" && parsedURL != nil {
resp, err := http.Get(parsedURL.String())
if err != nil {
http.Error(w, fmt.Sprintf("error fetching url content: %v", err), http.StatusInternalServerError)
}
defer resp.Body.Close()
mf := microformats.Parse(resp.Body, parsedURL)
if err := enc.Encode(mf); err != nil {
http.Error(w, fmt.Sprintf("error marshaling json: %v", err), http.StatusInternalServerError)
}
if callback := r.FormValue("callback"); callback != "" {
fmt.Fprintf(w, "%s(%s)", callback, buf.String())
} else {
w.Header().Set("Content-Type", "application/mf2+json")
if _, err := io.Copy(w, buf); err != nil {
log.Print(err)
}
}
return
}
html := r.FormValue("html")
if html != "" {
mf := microformats.Parse(strings.NewReader(html), parsedURL)
if err := enc.Encode(mf); err != nil {
http.Error(w, fmt.Sprintf("error marshaling json: %v", err), http.StatusInternalServerError)
}
}
data := struct {
HTML string
URL string
JSON string
}{
html,
u,
buf.String(),
}
if err := tpl.Execute(w, data); err != nil {
log.Print(err)
}
}
var tpl = template.Must(template.New("").Parse(`<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<title>Go Microformats Parser</title>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" integrity="sha384-rwoIResjU2yc3z8GV/NPeZWAv56rSmLldC3R/AZzGRnGxQQKnKkoFVhFQhNUwEyJ" crossorigin="anonymous">
<style>
form label { font-weight: bold; }
form textarea, form input[type=url] { font-family: "SF Mono", Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; }
form .form-control:disabled { cursor: default; background: #efefef; color: black; }
</style>
</head>
<body>
<main class="container">
<h1 class="mt-5 mb-3">Microformats Parser (Go)</h1>
<form method="get">
<div class="form-group">
<label for="url">Enter a URL</label>
<input name="url" type="url" placeholder="https://indieweb.org" class="form-control form-control-lg" />
</div>
<button type="submit" class="btn btn-lg btn-success">Parse</button>
</form>
<h2 class="h4 my-5">OR parse just a snippet of HTML</h2>
<form method="post" class="mb-5">
<div class="form-group">
<label for="html">HTML</label>
<textarea id="html" name="html" rows="6" class="form-control form-control-lg">{{ .HTML }}</textarea>
</div>
<div class="form-group">
<label for="base-url">Base URL</label>
<input id="base-url" name="base-url" type="url" value="{{ .URL }}" placeholder="https://indieweb.org" class="form-control form-control-lg" />
</div>
<button type="submit" class="btn btn-lg btn-success">Parse</button>
</form>
{{ with .JSON }}
<div class="form-group mb-5">
<label for="json">JSON</label>
<textarea id="json" name="json" rows="10" class="form-control form-control-lg" disabled="disabled">{{ . }}</textarea>
</div>
{{ end }}
<footer class="mb-5">
<ul>
<li><a href="https://microformats.io">About Microformats</a></li>
<li><a href="https://github.com/willnorris/microformats/tree/master/cmd/gomfweb">Source code for this site</a></li>
<li><a href="https://github.com/willnorris/microformats">Source code for the Microformats Go Parser</a></li>
<li>
Other Microformats Parser websites:
<a href="https://node.microformats.io">Node</a>,
<a href="https://php.microformats.io">PHP</a>,
<a href="https://python.microformats.io">Python</a>, and
<a href="https://ruby.microformats.io">Ruby</a>.
</li>
<li><a href="https://microformats.org/wiki/microformats2#Parsers">More Microformats parsers</a></li>
</ul>
</footer>
</main>
</body>
</html>`))
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
kubetest/main.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"os/exec"
"os/signal"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/spf13/pflag"
"k8s.io/test-infra/boskos/client"
"k8s.io/test-infra/kubetest/conformance"
"k8s.io/test-infra/kubetest/kind"
"k8s.io/test-infra/kubetest/kubeadmdind"
"k8s.io/test-infra/kubetest/process"
"k8s.io/test-infra/kubetest/util"
)
// Hardcoded in ginkgo-e2e.sh
const defaultGinkgoParallel = 25
var (
artifacts = filepath.Join(os.Getenv("WORKSPACE"), "_artifacts")
interrupt = time.NewTimer(time.Duration(0)) // interrupt testing at this time.
terminate = time.NewTimer(time.Duration(0)) // terminate testing at this time.
verbose = false
timeout = time.Duration(0)
boskos, _ = client.NewClient(os.Getenv("JOB_NAME"), "http://boskos.test-pods.svc.cluster.local.", "", "")
control = process.NewControl(timeout, interrupt, terminate, verbose)
)
type options struct {
build buildStrategy
charts bool
checkLeaks bool
checkSkew bool
cluster string
clusterIPRange string
deployment string
down bool
dump string
dumpPreTestLogs string
extract extractStrategies
extractSource bool
flushMemAfterBuild bool
focusRegex string
gcpCloudSdk string
gcpMasterImage string
gcpMasterSize string
gcpNetwork string
gcpNodeImage string
gcpImageFamily string
gcpImageProject string
gcpNodes string
gcpNodeSize string
gcpProject string
gcpProjectType string
gcpServiceAccount string
// gcpSSHProxyInstanceName is the name of the vm instance which ip address will be used to set the
// KUBE_SSH_BASTION env. If set, it will result in proxying ssh connections in tests through the
// "bastion". It's useful for clusters with nodes without public ssh access, e.g. nodes without
// public ip addresses. Works only for gcp providers (gce, gke).
gcpSSHProxyInstanceName string
gcpRegion string
gcpZone string
ginkgoParallel ginkgoParallelValue
kubecfg string
kubemark bool
kubemarkMasterSize string
kubemarkNodes string // TODO(fejta): switch to int after migration
logexporterGCSPath string
metadataSources string
noAllowDup bool
nodeArgs string
nodeTestArgs string
nodeTests bool
provider string
publish string
runtimeConfig string
save string
skew bool
skipRegex string
soak bool
soakDuration time.Duration
sshUser string
stage stageStrategy
test bool
testArgs string
testCmd string
testCmdName string
testCmdArgs []string
up bool
upgradeArgs string
boskosWaitDuration time.Duration
}
func defineFlags() *options {
o := options{}
flag.Var(&o.build, "build", "Rebuild k8s binaries, optionally forcing (release|quick|bazel) strategy")
flag.BoolVar(&o.charts, "charts", false, "If true, run charts tests")
flag.BoolVar(&o.checkSkew, "check-version-skew", true, "Verify client and server versions match")
flag.BoolVar(&o.checkLeaks, "check-leaked-resources", false, "Ensure project ends with the same resources")
flag.StringVar(&o.cluster, "cluster", "", "Cluster name. Must be set for --deployment=gke (TODO: other deployments).")
flag.StringVar(&o.clusterIPRange, "cluster-ip-range", "", "Specifies CLUSTER_IP_RANGE value during --up and --test (only relevant for --deployment=bash). Auto-calculated if empty.")
flag.StringVar(&o.deployment, "deployment", "bash", "Choices: none/bash/conformance/gke/eks/kind/kops/kubernetes-anywhere/node/local")
flag.BoolVar(&o.down, "down", false, "If true, tear down the cluster before exiting.")
flag.StringVar(&o.dump, "dump", "", "If set, dump bring-up and cluster logs to this location on test or cluster-up failure")
flag.StringVar(&o.dumpPreTestLogs, "dump-pre-test-logs", "", "If set, dump cluster logs to this location before running tests")
flag.Var(&o.extract, "extract", "Extract k8s binaries from the specified release location")
flag.BoolVar(&o.extractSource, "extract-source", false, "Extract k8s src together with other tarballs")
flag.BoolVar(&o.flushMemAfterBuild, "flush-mem-after-build", false, "If true, try to flush container memory after building")
flag.Var(&o.ginkgoParallel, "ginkgo-parallel", fmt.Sprintf("Run Ginkgo tests in parallel, default %d runners. Use --ginkgo-parallel=N to specify an exact count.", defaultGinkgoParallel))
flag.StringVar(&o.gcpCloudSdk, "gcp-cloud-sdk", "", "Install/upgrade google-cloud-sdk to the gs:// path if set")
flag.StringVar(&o.gcpProject, "gcp-project", "", "For use with gcloud commands")
flag.StringVar(&o.gcpProjectType, "gcp-project-type", "", "Explicitly indicate which project type to select from boskos")
flag.StringVar(&o.gcpServiceAccount, "gcp-service-account", "", "Service account to activate before using gcloud")
flag.StringVar(&o.gcpZone, "gcp-zone", "", "For use with gcloud commands")
flag.StringVar(&o.gcpRegion, "gcp-region", "", "For use with gcloud commands")
flag.StringVar(&o.gcpNetwork, "gcp-network", "", "Cluster network. Must be set for --deployment=gke (TODO: other deployments).")
flag.StringVar(&o.gcpMasterImage, "gcp-master-image", "", "Master image type (cos|debian on GCE, n/a on GKE)")
flag.StringVar(&o.gcpMasterSize, "gcp-master-size", "", "(--provider=gce only) Size of master to create (e.g n1-standard-1). Auto-calculated if left empty.")
flag.StringVar(&o.gcpNodeImage, "gcp-node-image", "", "Node image type (cos|container_vm on GKE, cos|debian on GCE)")
flag.StringVar(&o.gcpImageFamily, "image-family", "", "Node image family from which to use the latest image, required when --gcp-node-image=CUSTOM")
flag.StringVar(&o.gcpImageProject, "image-project", "", "Project containing node image family, required when --gcp-node-image=CUSTOM")
flag.StringVar(&o.gcpNodes, "gcp-nodes", "", "(--provider=gce only) Number of nodes to create.")
flag.StringVar(&o.gcpNodeSize, "gcp-node-size", "", "(--provider=gce only) Size of nodes to create (e.g n1-standard-1).")
flag.StringVar(&o.gcpSSHProxyInstanceName, "gcp-ssh-proxy-instance-name", "", "(--provider=gce|gke only) If set, will result in proxing the ssh connections via the provided instance name while running tests")
flag.StringVar(&o.kubecfg, "kubeconfig", "", "The location of a kubeconfig file.")
flag.StringVar(&o.focusRegex, "ginkgo-focus", "", "The ginkgo regex to focus. Currently only respected for (dind).")
flag.StringVar(&o.skipRegex, "ginkgo-skip", "", "The ginkgo regex to skip. Currently only respected for (dind).")
flag.BoolVar(&o.kubemark, "kubemark", false, "If true, run kubemark tests.")
flag.StringVar(&o.kubemarkMasterSize, "kubemark-master-size", "", "Kubemark master size (only relevant if --kubemark=true). Auto-calculated based on '--kubemark-nodes' if left empty.")
flag.StringVar(&o.kubemarkNodes, "kubemark-nodes", "5", "Number of kubemark nodes to start (only relevant if --kubemark=true).")
flag.StringVar(&o.logexporterGCSPath, "logexporter-gcs-path", "", "Path to the GCS artifacts directory to dump logs from nodes. Logexporter gets enabled if this is non-empty")
flag.StringVar(&o.metadataSources, "metadata-sources", "images.json", "Comma-separated list of files inside ./artifacts to merge into metadata.json")
flag.StringVar(&o.nodeArgs, "node-args", "", "Args for node e2e tests.")
flag.StringVar(&o.nodeTestArgs, "node-test-args", "", "Test args specifically for node e2e tests.")
flag.BoolVar(&o.noAllowDup, "no-allow-dup", false, "if set --allow-dup will not be passed to push-build and --stage will error if the build already exists on the gcs path")
flag.BoolVar(&o.nodeTests, "node-tests", false, "If true, run node-e2e tests.")
flag.StringVar(&o.provider, "provider", "", "Kubernetes provider such as gce, gke, aws, eks, etc")
flag.StringVar(&o.publish, "publish", "", "Publish version to the specified gs:// path on success")
flag.StringVar(&o.runtimeConfig, "runtime-config", "batch/v2alpha1=true", "If set, API versions can be turned on or off while bringing up the API server.")
flag.StringVar(&o.stage.dockerRegistry, "registry", "", "Push images to the specified docker registry (e.g. gcr.io/a-test-project)")
flag.StringVar(&o.save, "save", "", "Save credentials to gs:// path on --up if set (or load from there if not --up)")
flag.BoolVar(&o.skew, "skew", false, "If true, run tests in another version at ../kubernetes/hack/e2e.go")
flag.BoolVar(&o.soak, "soak", false, "If true, job runs in soak mode")
flag.DurationVar(&o.soakDuration, "soak-duration", 7*24*time.Hour, "Maximum age of a soak cluster before it gets recycled")
flag.Var(&o.stage, "stage", "Upload binaries to gs://bucket/devel/job-suffix if set")
flag.StringVar(&o.stage.versionSuffix, "stage-suffix", "", "Append suffix to staged version when set")
flag.BoolVar(&o.test, "test", false, "Run Ginkgo tests.")
flag.StringVar(&o.testArgs, "test_args", "", "Space-separated list of arguments to pass to Ginkgo test runner.")
flag.StringVar(&o.testCmd, "test-cmd", "", "command to run against the cluster instead of Ginkgo e2e tests")
flag.StringVar(&o.testCmdName, "test-cmd-name", "", "name to log the test command as in xml results")
flag.DurationVar(&timeout, "timeout", time.Duration(0), "Terminate testing after the timeout duration (s/m/h)")
flag.BoolVar(&o.up, "up", false, "If true, start the e2e cluster. If cluster is already up, recreate it.")
flag.StringVar(&o.upgradeArgs, "upgrade_args", "", "If set, run upgrade tests before other tests")
flag.DurationVar(&o.boskosWaitDuration, "boskos-wait-duration", 5*time.Minute, "Defines how long it waits until quit getting Boskos resoure, default 5 minutes")
// The "-v" flag was also used by glog, which is used by k8s.io/client-go. Duplicate flags cause panics.
// 1. Even if we could convince glog to change, they have too many consumers to ever do so.
// 2. The glog lib parses flags during init. It is impossible to dynamically rewrite the args before they're parsed by glog.
// 3. The glog lib takes an int value, so "-v false" is an error.
// 4. It's possible, but unlikely, we could convince k8s.io/client-go to use a logging shim, because a library shouldn't force a logging implementation. This would take a major version release for the lib.
//
// The most reasonable solution is to accept that we shouldn't have made a single-letter global, and rename all references to this variable.
flag.BoolVar(&verbose, "verbose-commands", true, "If true, print all command output.")
// go flag does not support StringArrayVar
pflag.StringArrayVar(&o.testCmdArgs, "test-cmd-args", []string{}, "args for test-cmd")
return &o
}
var suite util.TestSuite
func validWorkingDirectory() error {
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("could not get pwd: %v", err)
}
acwd, err := filepath.Abs(cwd)
if err != nil {
return fmt.Errorf("failed to convert %s to an absolute path: %v", cwd, err)
}
// This also matches "kubernetes_skew" for upgrades.
if !strings.Contains(filepath.Base(acwd), "kubernetes") {
return fmt.Errorf("must run from kubernetes directory root. current: %v", acwd)
}
return nil
}
type deployer interface {
Up() error
IsUp() error
DumpClusterLogs(localPath, gcsPath string) error
TestSetup() error
Down() error
GetClusterCreated(gcpProject string) (time.Time, error)
KubectlCommand() (*exec.Cmd, error)
}
// publisher is implemented by deployers that want to publish status on success
type publisher interface {
// Publish is called when the tests were successful; the deployer should publish a success file
Publish() error
}
func getDeployer(o *options) (deployer, error) {
switch o.deployment {
case "bash":
return newBash(&o.clusterIPRange, o.gcpProject, o.gcpZone, o.gcpSSHProxyInstanceName, o.provider), nil
case "conformance":
return conformance.NewDeployer(o.kubecfg)
case "gke":
return newGKE(o.provider, o.gcpProject, o.gcpZone, o.gcpRegion, o.gcpNetwork, o.gcpNodeImage, o.gcpImageFamily, o.gcpImageProject, o.cluster, o.gcpSSHProxyInstanceName, &o.testArgs, &o.upgradeArgs)
case "eks":
return newEKS(timeout, verbose)
case "kind":
return kind.NewDeployer(control, string(o.build))
case "kops":
return newKops(o.provider, o.gcpProject, o.cluster)
case "kubeadm-dind":
return kubeadmdind.NewDeployer(control)
case "kubernetes-anywhere":
return newKubernetesAnywhere(o.gcpProject, o.gcpZone)
case "node":
return nodeDeploy{}, nil
case "none":
return noneDeploy{}, nil
case "local":
return newLocalCluster(), nil
case "aksengine":
return newAKSEngine()
//TODO: Remove acs related lines after baking period
case "acsengine":
return newAKSEngine()
default:
return nil, fmt.Errorf("unknown deployment strategy %q", o.deployment)
}
}
func validateFlags(o *options) error {
if !o.extract.Enabled() && o.extractSource {
return errors.New("--extract-source flag cannot be passed without --extract")
}
return nil
}
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
// Initialize global pseudo random generator. Initializing it to select random AWS Zones.
rand.Seed(time.Now().UnixNano())
pflag.CommandLine = pflag.NewFlagSet(os.Args[0], pflag.ContinueOnError)
o := defineFlags()
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
if err := pflag.CommandLine.Parse(os.Args[1:]); err != nil {
log.Fatalf("Flag parse failed: %v", err)
}
if err := validateFlags(o); err != nil {
log.Fatalf("Flags validation failed. err: %v", err)
}
control = process.NewControl(timeout, interrupt, terminate, verbose)
// do things when we know we are running in the kubetest image
if os.Getenv("KUBETEST_IN_DOCKER") == "true" {
o.flushMemAfterBuild = true
}
// sanity fix for kind deployer, not set for other deployers to avoid
// breaking changes...
if o.deployment == "kind" {
// always default --dump for kind, in CI use $ARTIFACTS
artifacts := os.Getenv("ARTIFACTS")
if artifacts == "" {
artifacts = "./_artifacts"
}
o.dump = artifacts
}
err := complete(o)
if boskos.HasResource() {
if berr := boskos.ReleaseAll("dirty"); berr != nil {
log.Fatalf("[Boskos] Fail To Release: %v, kubetest err: %v", berr, err)
}
}
if err != nil {
log.Fatalf("Something went wrong: %v", err)
}
}
func complete(o *options) error {
if !terminate.Stop() {
<-terminate.C // Drain the value if necessary.
}
if !interrupt.Stop() {
<-interrupt.C // Drain value
}
if timeout > 0 {
log.Printf("Limiting testing to %s", timeout)
interrupt.Reset(timeout)
}
if o.dump != "" {
defer writeMetadata(o.dump, o.metadataSources)
defer control.WriteXML(&suite, o.dump, time.Now())
}
if o.logexporterGCSPath != "" {
o.testArgs += fmt.Sprintf(" --logexporter-gcs-path=%s", o.logexporterGCSPath)
}
if err := prepare(o); err != nil {
return fmt.Errorf("failed to prepare test environment: %v", err)
}
// Get the deployer before we acquire k8s so any additional flag
// verifications happen early.
deploy, err := getDeployer(o)
if err != nil {
return fmt.Errorf("error creating deployer: %v", err)
}
// Check soaking before run tests
if o.soak {
if created, err := deploy.GetClusterCreated(o.gcpProject); err != nil {
// continue, but log the error
log.Printf("deploy %v, GetClusterCreated failed: %v", o.deployment, err)
} else {
if time.Now().After(created.Add(o.soakDuration)) {
// flip up on - which will tear down previous cluster and start a new one
log.Printf("Previous soak cluster created at %v, will recreate the cluster", created)
o.up = true
}
}
}
if err := acquireKubernetes(o, deploy); err != nil {
return fmt.Errorf("failed to acquire k8s binaries: %v", err)
}
if o.extract.Enabled() {
// If we specified `--extract-source` we will already be in the correct directory
if !o.extractSource {
if err := os.Chdir("kubernetes"); err != nil {
return fmt.Errorf("failed to chdir to kubernetes dir: %v", err)
}
}
}
if err := validWorkingDirectory(); err != nil {
return fmt.Errorf("called from invalid working directory: %v", err)
}
if o.down {
// listen for signals such as ^C and gracefully attempt to clean up
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for range c {
log.Print("Captured ^C, gracefully attempting to cleanup resources..")
if err = deploy.Down(); err != nil {
log.Printf("Tearing down deployment failed: %v", err)
}
if err != nil {
os.Exit(1)
}
os.Exit(2)
}
}()
}
if err := run(deploy, *o); err != nil {
return err
}
// Publish the successfully tested version when requested
if o.publish != "" {
if err := publish(o.publish); err != nil {
return err
}
}
return nil
}
func acquireKubernetes(o *options, d deployer) error {
// Potentially build kubernetes
if o.build.Enabled() {
var err error
// kind deployer manages build
if k, ok := d.(*kind.Deployer); ok {
err = control.XMLWrap(&suite, "Build", k.Build)
} else if c, ok := d.(*Cluster); ok { // Azure deployer
err = control.XMLWrap(&suite, "Build", func() error {
return c.BuildK8s(o.build)
})
} else {
err = control.XMLWrap(&suite, "Build", o.build.Build)
}
if o.flushMemAfterBuild {
util.FlushMem()
}
if err != nil {
return err
}
}
// Potentially stage build binaries somewhere on GCS
if o.stage.Enabled() {
if err := control.XMLWrap(&suite, "Stage", func() error {
return o.stage.Stage(o.noAllowDup)
}); err != nil {
return err
}
}
// Potentially download existing binaries and extract them.
if o.extract.Enabled() {
err := control.XMLWrap(&suite, "Extract", func() error {
// Should we restore a previous state?
// Restore if we are not upping the cluster
if o.save != "" {
if !o.up {
// Restore version and .kube/config from --up
log.Printf("Overwriting extract strategy to load kubeconfig and version from %s", o.save)
o.extract = extractStrategies{
extractStrategy{
mode: load,
option: o.save,
},
}
}
}
// New deployment, extract new version
return o.extract.Extract(o.gcpProject, o.gcpZone, o.gcpRegion, o.extractSource)
})
if err != nil {
return err
}
}
return nil
}
// Returns the k8s version name
func findVersion() string {
// The version may be in a version file
if _, err := os.Stat("version"); err == nil {
b, err := ioutil.ReadFile("version")
if err == nil {
return strings.TrimSpace(string(b))
}
log.Printf("Failed to read version: %v", err)
}
// We can also get it from the git repo.
if _, err := os.Stat("hack/lib/version.sh"); err == nil {
// TODO(fejta): do this in go. At least we removed the upload-to-gcs.sh dep.
gross := `. hack/lib/version.sh && KUBE_ROOT=. kube::version::get_version_vars && echo "${KUBE_GIT_VERSION-}"`
b, err := control.Output(exec.Command("bash", "-c", gross))
if err == nil {
return strings.TrimSpace(string(b))
}
log.Printf("Failed to get_version_vars: %v", err)
}
return "unknown" // Sad trombone
}
// maybeMergeMetadata will add new keyvals into the map; quietly eats errors.
func maybeMergeJSON(meta map[string]string, path string) {
if data, err := ioutil.ReadFile(path); err == nil {
json.Unmarshal(data, &meta)
}
}
// Write metadata.json, including version and env arg data.
func writeMetadata(path, metadataSources string) error {
m := make(map[string]string)
// Look for any sources of metadata and load 'em
for _, f := range strings.Split(metadataSources, ",") {
maybeMergeJSON(m, filepath.Join(path, f))
}
ver := findVersion()
m["job-version"] = ver // TODO(krzyzacy): retire
m["revision"] = ver
re := regexp.MustCompile(`^BUILD_METADATA_(.+)$`)
for _, e := range os.Environ() {
p := strings.SplitN(e, "=", 2)
r := re.FindStringSubmatch(p[0])
if r == nil {
continue
}
k, v := strings.ToLower(r[1]), p[1]
m[k] = v
}
f, err := os.Create(filepath.Join(path, "metadata.json"))
if err != nil {
return err
}
defer f.Close()
e := json.NewEncoder(f)
return e.Encode(m)
}
// Install cloudsdk tarball to location, updating PATH
func installGcloud(tarball string, location string) error {
if err := os.MkdirAll(location, 0775); err != nil {
return err
}
if err := control.FinishRunning(exec.Command("tar", "xzf", tarball, "-C", location)); err != nil {
return err
}
if err := control.FinishRunning(exec.Command(filepath.Join(location, "google-cloud-sdk", "install.sh"), "--disable-installation-options", "--bash-completion=false", "--path-update=false", "--usage-reporting=false")); err != nil {
return err
}
if err := util.InsertPath(filepath.Join(location, "google-cloud-sdk", "bin")); err != nil {
return err
}
if err := control.FinishRunning(exec.Command("gcloud", "components", "install", "alpha")); err != nil {
return err
}
if err := control.FinishRunning(exec.Command("gcloud", "components", "install", "beta")); err != nil {
return err
}
if err := control.FinishRunning(exec.Command("gcloud", "info")); err != nil {
return err
}
return nil
}
func migrateGcpEnvAndOptions(o *options) error {
var network string
var zone string
switch o.provider {
case "gke":
network = "KUBE_GKE_NETWORK"
zone = "ZONE"
default:
network = "KUBE_GCE_NETWORK"
zone = "KUBE_GCE_ZONE"
}
return util.MigrateOptions([]util.MigratedOption{
{
Env: "PROJECT",
Option: &o.gcpProject,
Name: "--gcp-project",
},
{
Env: zone,
Option: &o.gcpZone,
Name: "--gcp-zone",
},
{
Env: "REGION",
Option: &o.gcpRegion,
Name: "--gcp-region",
},
{
Env: "GOOGLE_APPLICATION_CREDENTIALS",
Option: &o.gcpServiceAccount,
Name: "--gcp-service-account",
},
{
Env: network,
Option: &o.gcpNetwork,
Name: "--gcp-network",
},
{
Env: "KUBE_NODE_OS_DISTRIBUTION",
Option: &o.gcpNodeImage,
Name: "--gcp-node-image",
},
{
Env: "KUBE_MASTER_OS_DISTRIBUTION",
Option: &o.gcpMasterImage,
Name: "--gcp-master-image",
},
{
Env: "NUM_NODES",
Option: &o.gcpNodes,
Name: "--gcp-nodes",
},
{
Env: "NODE_SIZE",
Option: &o.gcpNodeSize,
Name: "--gcp-node-size",
},
{
Env: "MASTER_SIZE",
Option: &o.gcpMasterSize,
Name: "--gcp-master-size",
},
{
Env: "CLOUDSDK_BUCKET",
Option: &o.gcpCloudSdk,
Name: "--gcp-cloud-sdk",
SkipPush: true,
},
})
}
func prepareGcp(o *options) error {
if err := migrateGcpEnvAndOptions(o); err != nil {
return err
}
if o.provider == "gce" {
if distro := os.Getenv("KUBE_OS_DISTRIBUTION"); distro != "" {
log.Printf("Please use --gcp-master-image=%s --gcp-node-image=%s (instead of deprecated KUBE_OS_DISTRIBUTION)",
distro, distro)
// Note: KUBE_OS_DISTRIBUTION takes precedence over
// KUBE_{MASTER,NODE}_OS_DISTRIBUTION, so override here
// after the migration above.
o.gcpNodeImage = distro
o.gcpMasterImage = distro
if err := os.Setenv("KUBE_NODE_OS_DISTRIBUTION", distro); err != nil {
return fmt.Errorf("could not set KUBE_NODE_OS_DISTRIBUTION=%s: %v", distro, err)
}
if err := os.Setenv("KUBE_MASTER_OS_DISTRIBUTION", distro); err != nil {
return fmt.Errorf("could not set KUBE_MASTER_OS_DISTRIBUTION=%s: %v", distro, err)
}
}
hasGCPImageFamily, hasGCPImageProject := len(o.gcpImageFamily) != 0, len(o.gcpImageProject) != 0
if hasGCPImageFamily != hasGCPImageProject {
return fmt.Errorf("--image-family and --image-project must be both set or unset")
}
if hasGCPImageFamily && hasGCPImageProject {
out, err := control.Output(exec.Command("gcloud", "compute", "images", "describe-from-family", o.gcpImageFamily, "--project", o.gcpImageProject))
if err != nil {
return fmt.Errorf("failed to get latest image from family %q in project %q: %s", o.gcpImageFamily, o.gcpImageProject, err)
}
latestImage := ""
latestImageRegexp := regexp.MustCompile("^name: *(\\S+)")
for _, line := range strings.Split(string(out), "\n") {
matches := latestImageRegexp.FindStringSubmatch(line)
if len(matches) == 2 {
latestImage = matches[1]
break
}
}
if len(latestImage) == 0 {
return fmt.Errorf("failed to get latest image from family %q in project %q", o.gcpImageFamily, o.gcpImageProject)
}
if o.deployment == "node" {
o.nodeArgs += fmt.Sprintf(" --images=%s --image-project=%s", latestImage, o.gcpImageProject)
} else {
os.Setenv("KUBE_GCE_NODE_IMAGE", latestImage)
os.Setenv("KUBE_GCE_NODE_PROJECT", o.gcpImageProject)
}
}
} else if o.provider == "gke" {
if o.deployment == "" {
o.deployment = "gke"
}
if o.deployment != "gke" {
return fmt.Errorf("expected --deployment=gke for --provider=gke, found --deployment=%s", o.deployment)
}
if o.gcpNodeImage == "" {
return fmt.Errorf("--gcp-node-image must be set for GKE")
}
if o.gcpMasterImage != "" {
return fmt.Errorf("expected --gcp-master-image to be empty for --provider=gke, found --gcp-master-image=%s", o.gcpMasterImage)
}
if o.gcpNodes != "" {
return fmt.Errorf("--gcp-nodes cannot be set on GKE, use --gke-shape instead")
}
if o.gcpNodeSize != "" {
return fmt.Errorf("--gcp-node-size cannot be set on GKE, use --gke-shape instead")
}
if o.gcpMasterSize != "" {
return fmt.Errorf("--gcp-master-size cannot be set on GKE, where it's auto-computed")
}
// TODO(kubernetes/test-infra#3536): This is used by the
// ginkgo-e2e.sh wrapper.
nod := o.gcpNodeImage
if nod == "container_vm" {
// gcloud container clusters create understands
// "container_vm", e2es understand "debian".
nod = "debian"
}
if nod == "cos_containerd" {
// gcloud container clusters create understands
// "cos_containerd", e2es only understand
// "gci"/"cos",
nod = "gci"
}
os.Setenv("NODE_OS_DISTRIBUTION", nod)
}
if o.gcpProject == "" {
log.Print("--gcp-project is missing, trying to fetch a project from boskos.\n" +
"(for local runs please set --gcp-project to your dev project)")
var resType string
if o.gcpProjectType != "" {
resType = o.gcpProjectType
} else if o.provider == "gke" {
resType = "gke-project"
} else {
resType = "gce-project"
}
log.Printf("provider %v, will acquire project type %v from boskos", o.provider, resType)
// let's retry 5min to get next available resource
ctx, cancel := context.WithTimeout(context.Background(), o.boskosWaitDuration)
defer cancel()
p, err := boskos.AcquireWait(ctx, resType, "free", "busy")
if err != nil {
return fmt.Errorf("--provider=%s boskos failed to acquire project: %v", o.provider, err)
}
if p == nil {
return fmt.Errorf("boskos does not have a free %s at the moment", resType)
}
go func(c *client.Client, proj string) {
for range time.Tick(time.Minute * 5) {
if err := c.UpdateOne(p.Name, "busy", nil); err != nil {
log.Printf("[Boskos] Update of %s failed with %v", p.Name, err)
}
}
}(boskos, p.Name)
o.gcpProject = p.Name
}
if err := os.Setenv("CLOUDSDK_CORE_PRINT_UNHANDLED_TRACEBACKS", "1"); err != nil {
return fmt.Errorf("could not set CLOUDSDK_CORE_PRINT_UNHANDLED_TRACEBACKS=1: %v", err)
}
if err := control.FinishRunning(exec.Command("gcloud", "config", "set", "project", o.gcpProject)); err != nil {
return fmt.Errorf("fail to set project %s : err %v", o.gcpProject, err)
}
// TODO(krzyzacy):Remove this when we retire migrateGcpEnvAndOptions
// Note that a lot of scripts are still depend on this env in k/k repo.
if err := os.Setenv("PROJECT", o.gcpProject); err != nil {
return fmt.Errorf("fail to set env var PROJECT %s : err %v", o.gcpProject, err)
}
// gcloud creds may have changed
if err := activateServiceAccount(o.gcpServiceAccount); err != nil {
return err
}
// Ensure ssh keys exist
log.Print("Checking existing of GCP ssh keys...")
k := filepath.Join(util.Home(".ssh"), "google_compute_engine")
if _, err := os.Stat(k); err != nil {
return err
}
pk := k + ".pub"
if _, err := os.Stat(pk); err != nil {
return err
}
log.Printf("Checking presence of public key in %s", o.gcpProject)
if out, err := control.Output(exec.Command("gcloud", "compute", "--project="+o.gcpProject, "project-info", "describe")); err != nil {
return err
} else if b, err := ioutil.ReadFile(pk); err != nil {
return err
} else if !strings.Contains(string(out), string(b)) {
log.Print("Uploading public ssh key to project metadata...")
if err = control.FinishRunning(exec.Command("gcloud", "compute", "--project="+o.gcpProject, "config-ssh")); err != nil {
return err
}
}
// Install custom gcloud version if necessary
if o.gcpCloudSdk != "" {
for i := 0; i < 3; i++ {
if err := control.FinishRunning(exec.Command("gsutil", "-mq", "cp", "-r", o.gcpCloudSdk, util.Home())); err == nil {
break // Success!
}
time.Sleep(1 << uint(i) * time.Second)
}
for _, f := range []string{util.Home(".gsutil"), util.Home("repo"), util.Home("cloudsdk")} {
if _, err := os.Stat(f); err == nil || !os.IsNotExist(err) {
if err = os.RemoveAll(f); err != nil {
return err
}
}
}
install := util.Home("repo", "google-cloud-sdk.tar.gz")
if strings.HasSuffix(o.gcpCloudSdk, ".tar.gz") {
install = util.Home(filepath.Base(o.gcpCloudSdk))
} else {
if err := os.Rename(util.Home(filepath.Base(o.gcpCloudSdk)), util.Home("repo")); err != nil {
return err
}
// Controls which gcloud components to install.
pop, err := util.PushEnv("CLOUDSDK_COMPONENT_MANAGER_SNAPSHOT_URL", "file://"+util.Home("repo", "components-2.json"))
if err != nil {
return err
}
defer pop()
}
if err := installGcloud(install, util.Home("cloudsdk")); err != nil {
return err
}
// gcloud creds may have changed
if err := activateServiceAccount(o.gcpServiceAccount); err != nil {
return err
}
}
if o.kubemark {
if p := os.Getenv("KUBEMARK_BAZEL_BUILD"); strings.ToLower(p) == "y" {
// we need docker-credential-gcr to get authed properly
// https://github.com/bazelbuild/rules_docker#authorization
if err := control.FinishRunning(exec.Command("gcloud", "components", "install", "docker-credential-gcr")); err != nil {
return err
}
if err := control.FinishRunning(exec.Command("docker-credential-gcr", "configure-docker")); err != nil {
return err
}
}
}
return nil
}
func prepareAws(o *options) error {
// gcloud creds may have changed
if err := activateServiceAccount(o.gcpServiceAccount); err != nil {
return err
}
return control.FinishRunning(exec.Command("pip", "install", "awscli"))
}
// Activate GOOGLE_APPLICATION_CREDENTIALS if set or do nothing.
func activateServiceAccount(path string) error {
if path == "" {
return nil
}
return control.FinishRunning(exec.Command("gcloud", "auth", "activate-service-account", "--key-file="+path))
}
// Make all artifacts world readable.
// The root user winds up owning the files when the container exists.
// Ensure that other users can read these files at that time.
func chmodArtifacts() error {
return control.FinishRunning(exec.Command("chmod", "-R", "o+r", artifacts))
}
func prepare(o *options) error {
if err := util.MigrateOptions([]util.MigratedOption{
{
Env: "KUBERNETES_PROVIDER",
Option: &o.provider,
Name: "--provider",
},
{
Env: "CLUSTER_NAME",
Option: &o.cluster,
Name: "--cluster",
},
}); err != nil {
return err
}
if err := prepareGinkgoParallel(&o.ginkgoParallel); err != nil {
return err
}
switch o.provider {
case "gce", "gke", "node":
if err := prepareGcp(o); err != nil {
return err
}
case "aws":
if err := prepareAws(o); err != nil {
return err
}
}
// For kubernetes-anywhere as the deployer, call prepareGcp()
// independent of the specified provider.
if o.deployment == "kubernetes-anywhere" {
if err := prepareGcp(o); err != nil {
return err
}
}
if o.kubemark {
if err := util.MigrateOptions([]util.MigratedOption{
{
Env: "KUBEMARK_NUM_NODES",
Option: &o.kubemarkNodes,
Name: "--kubemark-nodes",
},
{
Env: "KUBEMARK_MASTER_SIZE",
Option: &o.kubemarkMasterSize,
Name: "--kubemark-master-size",
},
}); err != nil {
return err
}
}
if err := os.MkdirAll(artifacts, 0777); err != nil { // Create artifacts
return err
}
return nil
}
type ginkgoParallelValue struct {
v int // 0 == not set (defaults to 1)
}
func (v *ginkgoParallelValue) IsBoolFlag() bool {
return true
}
func (v *ginkgoParallelValue) String() string {
if v.v == 0 {
return "1"
}
return strconv.Itoa(v.v)
}
func (v *ginkgoParallelValue) Set(s string) error {
if s == "" {
v.v = 0
return nil
}
if s == "true" {
v.v = defaultGinkgoParallel
return nil
}
p, err := strconv.Atoi(s)
if err != nil {
return fmt.Errorf("--ginkgo-parallel must be an integer, found %q", s)
}
if p < 1 {
return fmt.Errorf("--ginkgo-parallel must be >= 1, found %d", p)
}
v.v = p
return nil
}
func (v *ginkgoParallelValue) Type() string {
return "ginkgoParallelValue"
}
func (v *ginkgoParallelValue) Get() int {
if v.v == 0 {
return 1
}
return v.v
}
var _ flag.Value = &ginkgoParallelValue{}
// Hand migrate this option. GINKGO_PARALLEL => GINKGO_PARALLEL_NODES=25
func prepareGinkgoParallel(v *ginkgoParallelValue) error {
if p := os.Getenv("GINKGO_PARALLEL"); strings.ToLower(p) == "y" {
log.Printf("Please use kubetest --ginkgo-parallel (instead of deprecated GINKGO_PARALLEL=y)")
if err := v.Set("true"); err != nil {
return err
}
os.Unsetenv("GINKGO_PARALLEL")
}
if p := os.Getenv("GINKGO_PARALLEL_NODES"); p != "" {
log.Printf("Please use kubetest --ginkgo-parallel=%s (instead of deprecated GINKGO_PARALLEL_NODES=%s)", p, p)
if err := v.Set(p); err != nil {
return err
}
}
os.Setenv("GINKGO_PARALLEL_NODES", v.String())
return nil
}
func publish(pub string) error {
v, err := ioutil.ReadFile("version")
if err != nil {
return err
}
log.Printf("Set %s version to %s", pub, string(v))
return gcsWrite(pub, v)
}
| [
"\"WORKSPACE\"",
"\"JOB_NAME\"",
"\"KUBETEST_IN_DOCKER\"",
"\"ARTIFACTS\"",
"\"KUBE_OS_DISTRIBUTION\"",
"\"KUBEMARK_BAZEL_BUILD\"",
"\"GINKGO_PARALLEL\"",
"\"GINKGO_PARALLEL_NODES\""
]
| []
| [
"KUBEMARK_BAZEL_BUILD",
"ARTIFACTS",
"JOB_NAME",
"KUBETEST_IN_DOCKER",
"GINKGO_PARALLEL",
"GINKGO_PARALLEL_NODES",
"WORKSPACE",
"KUBE_OS_DISTRIBUTION"
]
| [] | ["KUBEMARK_BAZEL_BUILD", "ARTIFACTS", "JOB_NAME", "KUBETEST_IN_DOCKER", "GINKGO_PARALLEL", "GINKGO_PARALLEL_NODES", "WORKSPACE", "KUBE_OS_DISTRIBUTION"] | go | 8 | 0 | |
getitfixed/alembic/env.py | import os
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool, text
from alembic import context
from getitfixed.scripts import wait_for_db
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(context.config.config_file_name)
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def get_config():
conf = context.config.get_section(context.config.config_ini_section)
# Load config from c2cgeoportal if available
app_cfg = context.config.get_main_option("app.cfg")
if app_cfg:
from c2c.template.config import config
config.init(context.config.get_main_option("app.cfg"))
conf.update(config.get_config())
if "sqlalchemy.url" not in conf:
conf[
"sqlalchemy.url"
] = "postgresql://{PGUSER}:{PGPASSWORD}@{PGHOST}:{PGPORT}/{PGDATABASE}".format(
**os.environ
)
conf.update(
{"version_table_schema": conf.get("getitfixed", {}).get("schema", "getitfixed")}
)
# for 'autogenerate' support
from getitfixed import models # noqa
conf["target_metadata"] = models.meta.Base.metadata
return conf
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
conf = get_config()
context.configure(url=conf["sqlalchemy.url"], literal_binds=True, **conf)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
conf = get_config()
schema = conf.get("getitfixed", {}).get("schema", "getitfixed")
connectable = engine_from_config(
conf, prefix="sqlalchemy.", poolclass=pool.NullPool
)
def include_object(
obj, name, type_, reflected, compare_to
): # pylint: disable=unused-argument
if type_ == "table":
return obj.schema == schema
else:
return obj.table.schema == schema
wait_for_db(connectable)
with connectable.connect() as connection:
context.configure(
connection=connection,
include_schemas=True,
include_object=include_object,
**conf
)
connection.execute(text('CREATE SCHEMA IF NOT EXISTS "{}";'.format(schema)))
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/plugin/plugin.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
"k8s.io/kubernetes/pkg/kubectl/util/templates"
)
var (
plugin_long = templates.LongDesc(`
Provides utilities for interacting with plugins.
Plugins provide extended functionality that is not part of the major command-line distribution.
Please refer to the documentation and examples for more information about how write your own plugins.`)
plugin_list_long = templates.LongDesc(`
List all available plugin files on a user's PATH.
Available plugin files are those that are:
- executable
- anywhere on the user's PATH
- begin with "kubectl-"
`)
ValidPluginFilenamePrefixes = []string{"kubectl"}
)
func NewCmdPlugin(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
cmd := &cobra.Command{
Use: "plugin [flags]",
DisableFlagsInUseLine: true,
Short: i18n.T("Provides utilities for interacting with plugins."),
Long: plugin_long,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.DefaultSubCommandRun(streams.ErrOut)(cmd, args)
},
}
cmd.AddCommand(NewCmdPluginList(f, streams))
return cmd
}
type PluginListOptions struct {
Verifier PathVerifier
NameOnly bool
PluginPaths []string
genericclioptions.IOStreams
}
// NewCmdPluginList provides a way to list all plugin executables visible to kubectl
func NewCmdPluginList(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
o := &PluginListOptions{
IOStreams: streams,
}
cmd := &cobra.Command{
Use: "list",
Short: "list all visible plugin executables on a user's PATH",
Long: plugin_list_long,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(o.Complete(cmd))
cmdutil.CheckErr(o.Run())
},
}
cmd.Flags().BoolVar(&o.NameOnly, "name-only", o.NameOnly, "If true, display only the binary name of each plugin, rather than its full path")
return cmd
}
func (o *PluginListOptions) Complete(cmd *cobra.Command) error {
o.Verifier = &CommandOverrideVerifier{
root: cmd.Root(),
seenPlugins: make(map[string]string, 0),
}
o.PluginPaths = filepath.SplitList(os.Getenv("PATH"))
return nil
}
func (o *PluginListOptions) Run() error {
pluginsFound := false
isFirstFile := true
pluginErrors := []error{}
pluginWarnings := 0
for _, dir := range uniquePathsList(o.PluginPaths) {
files, err := ioutil.ReadDir(dir)
if err != nil {
if _, ok := err.(*os.PathError); ok && strings.Contains(err.Error(), "no such file") {
klog.V(3).Infof("unable to find directory %q in your PATH. Skipping...", dir)
continue
}
pluginErrors = append(pluginErrors, fmt.Errorf("error: unable to read directory %q in your PATH: %v", dir, err))
continue
}
for _, f := range files {
if f.IsDir() {
continue
}
if !hasValidPrefix(f.Name(), ValidPluginFilenamePrefixes) {
continue
}
if isFirstFile {
fmt.Fprintf(o.ErrOut, "The following compatible plugins are available:\n\n")
pluginsFound = true
isFirstFile = false
}
pluginPath := f.Name()
if !o.NameOnly {
pluginPath = filepath.Join(dir, pluginPath)
}
fmt.Fprintf(o.Out, "%s\n", pluginPath)
if errs := o.Verifier.Verify(filepath.Join(dir, f.Name())); len(errs) != 0 {
for _, err := range errs {
fmt.Fprintf(o.ErrOut, " - %s\n", err)
pluginWarnings++
}
}
}
}
if !pluginsFound {
pluginErrors = append(pluginErrors, fmt.Errorf("error: unable to find any kubectl plugins in your PATH"))
}
if pluginWarnings > 0 {
if pluginWarnings == 1 {
pluginErrors = append(pluginErrors, fmt.Errorf("error: one plugin warning was found"))
} else {
pluginErrors = append(pluginErrors, fmt.Errorf("error: %v plugin warnings were found", pluginWarnings))
}
}
if len(pluginErrors) > 0 {
fmt.Fprintln(o.ErrOut)
errs := bytes.NewBuffer(nil)
for _, e := range pluginErrors {
fmt.Fprintln(errs, e)
}
return fmt.Errorf("%s", errs.String())
}
return nil
}
// pathVerifier receives a path and determines if it is valid or not
type PathVerifier interface {
// Verify determines if a given path is valid
Verify(path string) []error
}
type CommandOverrideVerifier struct {
root *cobra.Command
seenPlugins map[string]string
}
// Verify implements PathVerifier and determines if a given path
// is valid depending on whether or not it overwrites an existing
// kubectl command path, or a previously seen plugin.
func (v *CommandOverrideVerifier) Verify(path string) []error {
if v.root == nil {
return []error{fmt.Errorf("unable to verify path with nil root")}
}
// extract the plugin binary name
segs := strings.Split(path, "/")
binName := segs[len(segs)-1]
cmdPath := strings.Split(binName, "-")
if len(cmdPath) > 1 {
// the first argument is always "kubectl" for a plugin binary
cmdPath = cmdPath[1:]
}
errors := []error{}
if isExec, err := isExecutable(path); err == nil && !isExec {
errors = append(errors, fmt.Errorf("warning: %s identified as a kubectl plugin, but it is not executable", path))
} else if err != nil {
errors = append(errors, fmt.Errorf("error: unable to identify %s as an executable file: %v", path, err))
}
if existingPath, ok := v.seenPlugins[binName]; ok {
errors = append(errors, fmt.Errorf("warning: %s is overshadowed by a similarly named plugin: %s", path, existingPath))
} else {
v.seenPlugins[binName] = path
}
if cmd, _, err := v.root.Find(cmdPath); err == nil {
errors = append(errors, fmt.Errorf("warning: %s overwrites existing command: %q", binName, cmd.CommandPath()))
}
return errors
}
func isExecutable(fullPath string) (bool, error) {
info, err := os.Stat(fullPath)
if err != nil {
return false, err
}
if runtime.GOOS == "windows" {
fileExt := strings.ToLower(filepath.Ext(fullPath))
switch fileExt {
case ".bat", ".cmd", ".com", ".exe", ".ps1":
return true, nil
}
return false, nil
}
if m := info.Mode(); !m.IsDir() && m&0111 != 0 {
return true, nil
}
return false, nil
}
// uniquePathsList deduplicates a given slice of strings without
// sorting or otherwise altering its order in any way.
func uniquePathsList(paths []string) []string {
seen := map[string]bool{}
newPaths := []string{}
for _, p := range paths {
if seen[p] {
continue
}
seen[p] = true
newPaths = append(newPaths, p)
}
return newPaths
}
func hasValidPrefix(filepath string, validPrefixes []string) bool {
for _, prefix := range validPrefixes {
if !strings.HasPrefix(filepath, prefix+"-") {
continue
}
return true
}
return false
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
staidelta/wallet/puzzles/load_clvm.py | import importlib
import inspect
import os
import pathlib
import pkg_resources
from clvm_tools.clvmc import compile_clvm as compile_clvm_py
from staidelta.types.blockchain_format.program import Program, SerializedProgram
compile_clvm = compile_clvm_py
# Handle optional use of clvm_tools_rs if available and requested
if "CLVM_TOOLS_RS" in os.environ:
try:
def sha256file(f):
import hashlib
m = hashlib.sha256()
m.update(open(f).read().encode("utf8"))
return m.hexdigest()
from clvm_tools_rs import compile_clvm as compile_clvm_rs
def translate_path(p_):
p = str(p_)
if os.path.isdir(p):
return p
else:
module_object = importlib.import_module(p)
return os.path.dirname(inspect.getfile(module_object))
def rust_compile_clvm(full_path, output, search_paths=[]):
treated_include_paths = list(map(translate_path, search_paths))
print("compile_clvm_rs", full_path, output, treated_include_paths)
compile_clvm_rs(str(full_path), str(output), treated_include_paths)
if os.environ["CLVM_TOOLS_RS"] == "check":
orig = str(output) + ".orig"
compile_clvm_py(full_path, orig, search_paths=search_paths)
orig256 = sha256file(orig)
rs256 = sha256file(output)
if orig256 != rs256:
print("Compiled %s: %s vs %s\n" % (full_path, orig256, rs256))
print("Aborting compilation due to mismatch with rust")
assert orig256 == rs256
compile_clvm = rust_compile_clvm
finally:
pass
def load_serialized_clvm(clvm_filename, package_or_requirement=__name__) -> SerializedProgram:
"""
This function takes a .clvm file in the given package and compiles it to a
.clvm.hex file if the .hex file is missing or older than the .clvm file, then
returns the contents of the .hex file as a `Program`.
clvm_filename: file name
package_or_requirement: usually `__name__` if the clvm file is in the same package
"""
hex_filename = f"{clvm_filename}.hex"
try:
if pkg_resources.resource_exists(package_or_requirement, clvm_filename):
full_path = pathlib.Path(pkg_resources.resource_filename(package_or_requirement, clvm_filename))
output = full_path.parent / hex_filename
compile_clvm(full_path, output, search_paths=[full_path.parent])
except NotImplementedError:
# pyinstaller doesn't support `pkg_resources.resource_exists`
# so we just fall through to loading the hex clvm
pass
clvm_hex = pkg_resources.resource_string(package_or_requirement, hex_filename).decode("utf8")
clvm_blob = bytes.fromhex(clvm_hex)
return SerializedProgram.from_bytes(clvm_blob)
def load_clvm(clvm_filename, package_or_requirement=__name__) -> Program:
return Program.from_bytes(bytes(load_serialized_clvm(clvm_filename, package_or_requirement=package_or_requirement)))
| []
| []
| [
"CLVM_TOOLS_RS"
]
| [] | ["CLVM_TOOLS_RS"] | python | 1 | 0 | |
router.go | package main
import (
"net/http"
"os"
"github.com/ahub-tech/hub-profile-api/db"
"github.com/ahub-tech/hub-profile-api/profile"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
)
func Route(router *echo.Echo, port string) {
router.Use(middleware.Logger())
router.Use(middleware.Recover())
router.POST("/register", func(c echo.Context) error {
rProfile := profile.NewProfile(c.QueryParam("fullname"), c.QueryParam("age"), c.QueryParam("corp"), c.QueryParam("exp"), c.QueryParam("langs"), c.QueryParam("lkin"), c.QueryParam("tw"), c.QueryParam("fb"), c.QueryParam("ig"), c.QueryParam("aut"))
err := db.AddProfile(rProfile)
if err != nil {
c.Error(err)
}
return c.String(http.StatusOK, "Registered sucesfully")
})
router.GET("/search/:name", func(c echo.Context) error {
name := c.Param("name")
profile, _ := db.SearchProfile(name)
return c.JSON(http.StatusOK, profile)
})
router.GET("/all", func(c echo.Context) error {
return c.JSON(http.StatusOK, db.AllProfiles())
})
router.Start(":" + os.Getenv("PORT"))
} | [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
runsc/container/container_test.go | // Copyright 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package container_test
import (
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"syscall"
"testing"
"time"
specs "github.com/opencontainers/runtime-spec/specs-go"
"golang.org/x/sys/unix"
"gvisor.googlesource.com/gvisor/pkg/abi/linux"
"gvisor.googlesource.com/gvisor/pkg/log"
"gvisor.googlesource.com/gvisor/pkg/sentry/control"
"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth"
"gvisor.googlesource.com/gvisor/pkg/unet"
"gvisor.googlesource.com/gvisor/runsc/boot"
"gvisor.googlesource.com/gvisor/runsc/container"
"gvisor.googlesource.com/gvisor/runsc/specutils"
"gvisor.googlesource.com/gvisor/runsc/test/testutil"
)
func init() {
log.SetLevel(log.Debug)
if err := testutil.ConfigureExePath(); err != nil {
panic(err.Error())
}
}
// waitForProcessList waits for the given process list to show up in the container.
func waitForProcessList(s *container.Container, expected []*control.Process) error {
var got []*control.Process
for start := time.Now(); time.Now().Sub(start) < 10*time.Second; {
var err error
got, err = s.Processes()
if err != nil {
return fmt.Errorf("error getting process data from container: %v", err)
}
if procListsEqual(got, expected) {
return nil
}
// Process might not have started, try again...
time.Sleep(10 * time.Millisecond)
}
return fmt.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expected))
}
// procListsEqual is used to check whether 2 Process lists are equal for all
// implemented fields.
func procListsEqual(got, want []*control.Process) bool {
if len(got) != len(want) {
return false
}
for i := range got {
pd1 := got[i]
pd2 := want[i]
// Zero out unimplemented and timing dependant fields.
pd1.Time = ""
pd1.STime = ""
pd1.C = 0
if *pd1 != *pd2 {
return false
}
}
return true
}
// getAndCheckProcLists is similar to waitForProcessList, but does not wait and retry the
// test for equality. This is because we already confirmed that exec occurred.
func getAndCheckProcLists(cont *container.Container, want []*control.Process) error {
got, err := cont.Processes()
if err != nil {
return fmt.Errorf("error getting process data from container: %v", err)
}
if procListsEqual(got, want) {
return nil
}
return fmt.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(want))
}
func procListToString(pl []*control.Process) string {
strs := make([]string, 0, len(pl))
for _, p := range pl {
strs = append(strs, fmt.Sprintf("%+v", p))
}
return fmt.Sprintf("[%s]", strings.Join(strs, ","))
}
// createWriteableOutputFile creates an output file that can be read and written to in the sandbox.
func createWriteableOutputFile(path string) (*os.File, error) {
outputFile, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
if err != nil {
return nil, fmt.Errorf("error creating file: %q, %v", path, err)
}
// Chmod to allow writing after umask.
if err := outputFile.Chmod(0666); err != nil {
return nil, fmt.Errorf("error chmoding file: %q, %v", path, err)
}
return outputFile, nil
}
func waitForFile(f *os.File) error {
op := func() error {
fi, err := f.Stat()
if err != nil {
return err
}
if fi.Size() == 0 {
return fmt.Errorf("file %q is empty", f.Name())
}
return nil
}
return testutil.Poll(op, 5*time.Second)
}
func readOutputNum(f *os.File, first bool) (int, error) {
// Wait until file has contents.
if err := waitForFile(f); err != nil {
return 0, err
}
// Read the first number in the new file
b, err := ioutil.ReadAll(f)
if err != nil {
return 0, fmt.Errorf("error reading file: %v", err)
}
if len(b) == 0 {
return 0, fmt.Errorf("error no content was read")
}
nums := strings.Split(string(b), "\n")
var num int
if first {
num, err = strconv.Atoi(nums[0])
} else {
num, err = strconv.Atoi(nums[len(nums)-2])
}
if err != nil {
return 0, fmt.Errorf("error getting number from file: %v", err)
}
return num, nil
}
// run starts the sandbox and waits for it to exit, checking that the
// application succeeded.
func run(spec *specs.Spec, conf *boot.Config) error {
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
if err != nil {
return fmt.Errorf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
// Create, start and wait for the container.
s, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
if err != nil {
return fmt.Errorf("error creating container: %v", err)
}
defer s.Destroy()
if err := s.Start(conf); err != nil {
return fmt.Errorf("error starting container: %v", err)
}
ws, err := s.Wait()
if err != nil {
return fmt.Errorf("error waiting on container: %v", err)
}
if !ws.Exited() || ws.ExitStatus() != 0 {
return fmt.Errorf("container failed, waitStatus: %v", ws)
}
return nil
}
type configOptions int
const (
overlay configOptions = 1 << iota
kvm
)
const all = overlay | kvm
// configs generates different configurations to run tests.
func configs(opts configOptions) []*boot.Config {
cs := []*boot.Config{testutil.TestConfig()}
if opts&overlay != 0 {
c := testutil.TestConfig()
c.Overlay = true
cs = append(cs, c)
}
// TODO: KVM doesn't work with --race.
if !testutil.RaceEnabled && opts&kvm != 0 {
c := testutil.TestConfig()
c.Platform = boot.PlatformKVM
cs = append(cs, c)
}
return cs
}
// TestLifecycle tests the basic Create/Start/Signal/Destroy container lifecycle.
// It verifies after each step that the container can be loaded from disk, and
// has the correct status.
func TestLifecycle(t *testing.T) {
for _, conf := range configs(all) {
t.Logf("Running test with conf: %+v", conf)
// The container will just sleep for a long time. We will kill it before
// it finishes sleeping.
spec := testutil.NewSpecWithArgs("sleep", "100")
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
// expectedPL lists the expected process state of the container.
expectedPL := []*control.Process{
{
UID: 0,
PID: 1,
PPID: 0,
C: 0,
Cmd: "sleep",
},
}
// Create the container.
id := testutil.UniqueContainerID()
if _, err := container.Create(id, spec, conf, bundleDir, "", ""); err != nil {
t.Fatalf("error creating container: %v", err)
}
// Load the container from disk and check the status.
s, err := container.Load(rootDir, id)
if err != nil {
t.Fatalf("error loading container: %v", err)
}
if got, want := s.Status, container.Created; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
// List should return the container id.
ids, err := container.List(rootDir)
if err != nil {
t.Fatalf("error listing containers: %v", err)
}
if got, want := ids, []string{id}; !reflect.DeepEqual(got, want) {
t.Errorf("container list got %v, want %v", got, want)
}
// Start the container.
if err := s.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Load the container from disk and check the status.
s, err = container.Load(rootDir, id)
if err != nil {
t.Fatalf("error loading container: %v", err)
}
if got, want := s.Status, container.Running; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
// Verify that "sleep 100" is running.
if err := waitForProcessList(s, expectedPL); err != nil {
t.Error(err)
}
// Wait on the container.
var wg sync.WaitGroup
wg.Add(1)
ch := make(chan struct{})
go func() {
ch <- struct{}{}
ws, err := s.Wait()
if err != nil {
t.Fatalf("error waiting on container: %v", err)
}
if got, want := ws.Signal(), syscall.SIGTERM; got != want {
t.Fatalf("got signal %v, want %v", got, want)
}
wg.Done()
}()
// Wait a bit to ensure that we've started waiting on the container
// before we signal.
<-ch
time.Sleep(100 * time.Millisecond)
// Send the container a SIGTERM which will cause it to stop.
if err := s.Signal(syscall.SIGTERM); err != nil {
t.Fatalf("error sending signal %v to container: %v", syscall.SIGTERM, err)
}
// Wait for it to die.
wg.Wait()
// The sandbox process should have exited by now, but it is a zombie.
// In normal runsc usage, it will be parented to init, and init will
// reap the sandbox. However, in this case the test runner is the
// parent and will not reap the sandbox process, so we must do it
// ourselves.
p, _ := os.FindProcess(s.Sandbox.Pid)
p.Wait()
g, _ := os.FindProcess(s.Sandbox.GoferPid)
g.Wait()
// Load the container from disk and check the status.
s, err = container.Load(rootDir, id)
if err != nil {
t.Fatalf("error loading container: %v", err)
}
if got, want := s.Status, container.Stopped; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
// Destroy the container.
if err := s.Destroy(); err != nil {
t.Fatalf("error destroying container: %v", err)
}
// List should not return the container id.
ids, err = container.List(rootDir)
if err != nil {
t.Fatalf("error listing containers: %v", err)
}
if len(ids) != 0 {
t.Errorf("expected container list to be empty, but got %v", ids)
}
// Loading the container by id should fail.
if _, err = container.Load(rootDir, id); err == nil {
t.Errorf("expected loading destroyed container to fail, but it did not")
}
}
}
// Test the we can execute the application with different path formats.
func TestExePath(t *testing.T) {
for _, conf := range configs(overlay) {
t.Logf("Running test with conf: %+v", conf)
for _, test := range []struct {
path string
success bool
}{
{path: "true", success: true},
{path: "bin/true", success: true},
{path: "/bin/true", success: true},
{path: "thisfiledoesntexit", success: false},
{path: "bin/thisfiledoesntexit", success: false},
{path: "/bin/thisfiledoesntexit", success: false},
} {
spec := testutil.NewSpecWithArgs(test.path)
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("exec: %s, error setting up container: %v", test.path, err)
}
ws, err := container.Run(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
os.RemoveAll(rootDir)
os.RemoveAll(bundleDir)
if test.success {
if err != nil {
t.Errorf("exec: %s, error running container: %v", test.path, err)
}
if ws.ExitStatus() != 0 {
t.Errorf("exec: %s, got exit status %v want %v", test.path, ws.ExitStatus(), 0)
}
} else {
if err == nil {
t.Errorf("exec: %s, got: no error, want: error", test.path)
}
}
}
}
}
// Test the we can retrieve the application exit status from the container.
func TestAppExitStatus(t *testing.T) {
// First container will succeed.
succSpec := testutil.NewSpecWithArgs("true")
conf := testutil.TestConfig()
rootDir, bundleDir, err := testutil.SetupContainer(succSpec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
ws, err := container.Run(testutil.UniqueContainerID(), succSpec, conf, bundleDir, "", "")
if err != nil {
t.Fatalf("error running container: %v", err)
}
if ws.ExitStatus() != 0 {
t.Errorf("got exit status %v want %v", ws.ExitStatus(), 0)
}
// Second container exits with non-zero status.
wantStatus := 123
errSpec := testutil.NewSpecWithArgs("bash", "-c", fmt.Sprintf("exit %d", wantStatus))
rootDir2, bundleDir2, err := testutil.SetupContainer(errSpec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir2)
defer os.RemoveAll(bundleDir2)
ws, err = container.Run(testutil.UniqueContainerID(), succSpec, conf, bundleDir2, "", "")
if err != nil {
t.Fatalf("error running container: %v", err)
}
if ws.ExitStatus() != wantStatus {
t.Errorf("got exit status %v want %v", ws.ExitStatus(), wantStatus)
}
}
// TestExec verifies that a container can exec a new program.
func TestExec(t *testing.T) {
for _, conf := range configs(overlay) {
t.Logf("Running test with conf: %+v", conf)
const uid = 343
spec := testutil.NewSpecWithArgs("sleep", "100")
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
// Create and start the container.
s, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer s.Destroy()
if err := s.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// expectedPL lists the expected process state of the container.
expectedPL := []*control.Process{
{
UID: 0,
PID: 1,
PPID: 0,
C: 0,
Cmd: "sleep",
},
{
UID: uid,
PID: 2,
PPID: 0,
C: 0,
Cmd: "sleep",
},
}
// Verify that "sleep 100" is running.
if err := waitForProcessList(s, expectedPL[:1]); err != nil {
t.Error(err)
}
execArgs := control.ExecArgs{
Filename: "/bin/sleep",
Argv: []string{"sleep", "5"},
Envv: []string{"PATH=" + os.Getenv("PATH")},
WorkingDirectory: "/",
KUID: uid,
}
// Verify that "sleep 100" and "sleep 5" are running after exec.
// First, start running exec (whick blocks).
status := make(chan error, 1)
go func() {
exitStatus, err := s.Execute(&execArgs)
if err != nil {
status <- err
} else if exitStatus != 0 {
status <- fmt.Errorf("failed with exit status: %v", exitStatus)
} else {
status <- nil
}
}()
if err := waitForProcessList(s, expectedPL); err != nil {
t.Fatal(err)
}
// Ensure that exec finished without error.
select {
case <-time.After(10 * time.Second):
t.Fatalf("container timed out waiting for exec to finish.")
case st := <-status:
if st != nil {
t.Errorf("container failed to exec %v: %v", execArgs, err)
}
}
}
}
// TestCheckpointRestore creates a container that continuously writes successive integers
// to a file. To test checkpoint and restore functionality, the container is
// checkpointed and the last number printed to the file is recorded. Then, it is restored in two
// new containers and the first number printed from these containers is checked. Both should
// be the next consecutive number after the last number from the checkpointed container.
func TestCheckpointRestore(t *testing.T) {
// Skip overlay because test requires writing to host file.
for _, conf := range configs(kvm) {
t.Logf("Running test with conf: %+v", conf)
dir, err := ioutil.TempDir("", "checkpoint-test")
if err != nil {
t.Fatalf("ioutil.TempDir failed: %v", err)
}
if err := os.Chmod(dir, 0777); err != nil {
t.Fatalf("error chmoding file: %q, %v", dir, err)
}
outputPath := filepath.Join(dir, "output")
outputFile, err := createWriteableOutputFile(outputPath)
if err != nil {
t.Fatalf("error creating output file: %v", err)
}
defer outputFile.Close()
script := "for ((i=0; ;i++)); do echo $i >> /tmp2/output; sleep 1; done"
spec := testutil.NewSpecWithArgs("bash", "-c", script)
spec.Mounts = append(spec.Mounts, specs.Mount{
Type: "bind",
Destination: "/tmp2",
Source: dir,
})
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
// Create and start the container.
cont, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Set the image path, which is where the checkpoint image will be saved.
imagePath := filepath.Join(dir, "test-image-file")
// Create the image file and open for writing.
file, err := os.OpenFile(imagePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644)
if err != nil {
t.Fatalf("error opening new file at imagePath: %v", err)
}
defer file.Close()
// Wait until application has ran.
if err := waitForFile(outputFile); err != nil {
t.Fatalf("Failed to wait for output file: %v", err)
}
// Checkpoint running container; save state into new file.
if err := cont.Checkpoint(file); err != nil {
t.Fatalf("error checkpointing container to empty file: %v", err)
}
defer os.RemoveAll(imagePath)
lastNum, err := readOutputNum(outputFile, false)
if err != nil {
t.Fatalf("error with outputFile: %v", err)
}
// Delete and recreate file before restoring.
if err := os.Remove(outputPath); err != nil {
t.Fatalf("error removing file")
}
outputFile2, err := createWriteableOutputFile(outputPath)
if err != nil {
t.Fatalf("error creating output file: %v", err)
}
defer outputFile2.Close()
// Restore into a new container.
cont2, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont2.Destroy()
if err := cont2.Restore(spec, conf, imagePath); err != nil {
t.Fatalf("error restoring container: %v", err)
}
firstNum, err := readOutputNum(outputFile2, true)
if err != nil {
t.Fatalf("error with outputFile: %v", err)
}
// Check that lastNum is one less than firstNum and that the container picks up from where it left off.
if lastNum+1 != firstNum {
t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum)
}
// Restore into another container!
// Delete and recreate file before restoring.
if err := os.Remove(outputPath); err != nil {
t.Fatalf("error removing file")
}
outputFile3, err := createWriteableOutputFile(outputPath)
if err != nil {
t.Fatalf("error creating output file: %v", err)
}
defer outputFile3.Close()
// Restore into a new container.
cont3, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont3.Destroy()
if err := cont3.Restore(spec, conf, imagePath); err != nil {
t.Fatalf("error restoring container: %v", err)
}
firstNum2, err := readOutputNum(outputFile3, true)
if err != nil {
t.Fatalf("error with outputFile: %v", err)
}
// Check that lastNum is one less than firstNum and that the container picks up from where it left off.
if lastNum+1 != firstNum2 {
t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum2)
}
}
}
// TestPauseResume tests that we can successfully pause and resume a container.
// It checks starts running sleep and executes another sleep. It pauses and checks
// that both processes are still running: sleep will be paused and still exist.
// It will then unpause and confirm that both processes are running. Then it will
// wait until one sleep completes and check to make sure the other is running.
func TestPauseResume(t *testing.T) {
for _, conf := range configs(kvm) {
t.Logf("Running test with conf: %+v", conf)
const uid = 343
spec := testutil.NewSpecWithArgs("sleep", "20")
dir, err := ioutil.TempDir("", "pause-test")
if err != nil {
t.Fatalf("ioutil.TempDir failed: %v", err)
}
lock, err := ioutil.TempFile(dir, "lock")
if err != nil {
t.Fatalf("error creating output file: %v", err)
}
defer lock.Close()
spec.Mounts = append(spec.Mounts, specs.Mount{
Type: "bind",
Destination: "/tmp2",
Source: dir,
})
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
// Create and start the container.
cont, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// expectedPL lists the expected process state of the container.
expectedPL := []*control.Process{
{
UID: 0,
PID: 1,
PPID: 0,
C: 0,
Cmd: "sleep",
},
{
UID: uid,
PID: 2,
PPID: 0,
C: 0,
Cmd: "bash",
},
}
script := fmt.Sprintf("while [[ -f /tmp2/%s ]]; do sleep 0.1; done", filepath.Base(lock.Name()))
execArgs := control.ExecArgs{
Filename: "/bin/bash",
Argv: []string{"bash", "-c", script},
Envv: []string{"PATH=" + os.Getenv("PATH")},
WorkingDirectory: "/",
KUID: uid,
}
// First, start running exec (which blocks).
go cont.Execute(&execArgs)
// Verify that "sleep 5" is running.
if err := waitForProcessList(cont, expectedPL); err != nil {
t.Fatal(err)
}
// Pause the running container.
if err := cont.Pause(); err != nil {
t.Errorf("error pausing container: %v", err)
}
if got, want := cont.Status, container.Paused; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
if err := os.Remove(lock.Name()); err != nil {
t.Fatalf("os.Remove(lock) failed: %v", err)
}
// Script loops and sleeps for 100ms. Give a bit a time for it to exit in
// case pause didn't work.
time.Sleep(200 * time.Millisecond)
// Verify that the two processes still exist.
if err := getAndCheckProcLists(cont, expectedPL); err != nil {
t.Fatal(err)
}
// Resume the running container.
if err := cont.Resume(); err != nil {
t.Errorf("error pausing container: %v", err)
}
if got, want := cont.Status, container.Running; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
expectedPL2 := []*control.Process{
{
UID: 0,
PID: 1,
PPID: 0,
C: 0,
Cmd: "sleep",
},
}
// Verify that deleting the file triggered the process to exit.
if err := waitForProcessList(cont, expectedPL2); err != nil {
t.Fatal(err)
}
}
}
// TestPauseResumeStatus makes sure that the statuses are set correctly
// with calls to pause and resume and that pausing and resuming only
// occurs given the correct state.
func TestPauseResumeStatus(t *testing.T) {
spec := testutil.NewSpecWithArgs("sleep", "20")
conf := testutil.TestConfig()
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
// Create and start the container.
cont, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Pause the running container.
if err := cont.Pause(); err != nil {
t.Errorf("error pausing container: %v", err)
}
if got, want := cont.Status, container.Paused; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
// Try to Pause again. Should cause error.
if err := cont.Pause(); err == nil {
t.Errorf("error pausing container that was already paused: %v", err)
}
if got, want := cont.Status, container.Paused; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
// Resume the running container.
if err := cont.Resume(); err != nil {
t.Errorf("error resuming container: %v", err)
}
if got, want := cont.Status, container.Running; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
// Try to resume again. Should cause error.
if err := cont.Resume(); err == nil {
t.Errorf("error resuming container already running: %v", err)
}
if got, want := cont.Status, container.Running; got != want {
t.Errorf("container status got %v, want %v", got, want)
}
}
// TestCapabilities verifies that:
// - Running exec as non-root UID and GID will result in an error (because the
// executable file can't be read).
// - Running exec as non-root with CAP_DAC_OVERRIDE succeeds because it skips
// this check.
func TestCapabilities(t *testing.T) {
const uid = 343
const gid = 2401
for _, conf := range configs(all) {
t.Logf("Running test with conf: %+v", conf)
spec := testutil.NewSpecWithArgs("sleep", "100")
// We generate files in the host temporary directory.
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: os.TempDir(),
Source: os.TempDir(),
Type: "bind",
})
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
// Create and start the container.
s, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer s.Destroy()
if err := s.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// expectedPL lists the expected process state of the container.
expectedPL := []*control.Process{
{
UID: 0,
PID: 1,
PPID: 0,
C: 0,
Cmd: "sleep",
},
{
UID: uid,
PID: 2,
PPID: 0,
C: 0,
Cmd: "exe",
},
}
if err := waitForProcessList(s, expectedPL[:1]); err != nil {
t.Fatalf("Failed to wait for sleep to start, err: %v", err)
}
// Create an executable that can't be run with the specified UID:GID.
// This shouldn't be callable within the container until we add the
// CAP_DAC_OVERRIDE capability to skip the access check.
exePath := filepath.Join(rootDir, "exe")
if err := ioutil.WriteFile(exePath, []byte("#!/bin/sh\necho hello"), 0770); err != nil {
t.Fatalf("couldn't create executable: %v", err)
}
defer os.Remove(exePath)
// Need to traverse the intermediate directory.
os.Chmod(rootDir, 0755)
execArgs := control.ExecArgs{
Filename: exePath,
Argv: []string{exePath},
Envv: []string{"PATH=" + os.Getenv("PATH")},
WorkingDirectory: "/",
KUID: uid,
KGID: gid,
Capabilities: &auth.TaskCapabilities{},
}
// "exe" should fail because we don't have the necessary permissions.
if _, err := s.Execute(&execArgs); err == nil {
t.Fatalf("container executed without error, but an error was expected")
}
// Now we run with the capability enabled and should succeed.
execArgs.Capabilities = &auth.TaskCapabilities{
EffectiveCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE),
}
// "exe" should not fail this time.
if _, err := s.Execute(&execArgs); err != nil {
t.Fatalf("container failed to exec %v: %v", execArgs, err)
}
}
}
// Test that an tty FD is sent over the console socket if one is provided.
func TestConsoleSocket(t *testing.T) {
for _, conf := range configs(all) {
t.Logf("Running test with conf: %+v", conf)
spec := testutil.NewSpecWithArgs("true")
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
// Create a named socket and start listening. We use a relative path
// to avoid overflowing the unix path length limit (108 chars).
socketPath := filepath.Join(bundleDir, "socket")
cwd, err := os.Getwd()
if err != nil {
t.Fatalf("error getting cwd: %v", err)
}
socketRelPath, err := filepath.Rel(cwd, socketPath)
if err != nil {
t.Fatalf("error getting relative path for %q from cwd %q: %v", socketPath, cwd, err)
}
if len(socketRelPath) > len(socketPath) {
socketRelPath = socketPath
}
srv, err := unet.BindAndListen(socketRelPath, false)
if err != nil {
t.Fatalf("error binding and listening to socket %q: %v", socketPath, err)
}
defer os.Remove(socketPath)
// Create the container and pass the socket name.
id := testutil.UniqueContainerID()
s, err := container.Create(id, spec, conf, bundleDir, socketRelPath, "")
if err != nil {
t.Fatalf("error creating container: %v", err)
}
// Open the othe end of the socket.
sock, err := srv.Accept()
if err != nil {
t.Fatalf("error accepting socket connection: %v", err)
}
// Allow 3 fds to be received. We only expect 1.
r := sock.Reader(true /* blocking */)
r.EnableFDs(1)
// The socket is closed right after sending the FD, so EOF is
// an allowed error.
b := [][]byte{{}}
if _, err := r.ReadVec(b); err != nil && err != io.EOF {
t.Fatalf("error reading from socket connection: %v", err)
}
// We should have gotten a control message.
fds, err := r.ExtractFDs()
if err != nil {
t.Fatalf("error extracting fds from socket connection: %v", err)
}
if len(fds) != 1 {
t.Fatalf("got %d fds from socket, wanted 1", len(fds))
}
// Verify that the fd is a terminal.
if _, err := unix.IoctlGetTermios(fds[0], unix.TCGETS); err != nil {
t.Errorf("fd is not a terminal (ioctl TGGETS got %v)", err)
}
// Shut it down.
if err := s.Destroy(); err != nil {
t.Fatalf("error destroying container: %v", err)
}
// Close socket.
if err := srv.Close(); err != nil {
t.Fatalf("error destroying container: %v", err)
}
}
}
// TestRunNonRoot checks that sandbox can be configured when running as
// non-privileged user.
func TestRunNonRoot(t *testing.T) {
for _, conf := range configs(kvm) {
t.Logf("Running test with conf: %+v", conf)
spec := testutil.NewSpecWithArgs("/bin/true")
spec.Process.User.UID = 343
spec.Process.User.GID = 2401
// User that container runs as can't list '$TMP/blocked' and would fail to
// mount it.
dir, err := ioutil.TempDir("", "blocked")
if err != nil {
t.Fatalf("ioutil.TempDir() failed: %v", err)
}
if err := os.Chmod(dir, 0700); err != nil {
t.Fatalf("os.MkDir(%q) failed: %v", dir, err)
}
dir = path.Join(dir, "test")
if err := os.Mkdir(dir, 0755); err != nil {
t.Fatalf("os.MkDir(%q) failed: %v", dir, err)
}
// We generate files in the host temporary directory.
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: dir,
Source: dir,
Type: "bind",
})
if err := run(spec, conf); err != nil {
t.Fatalf("error running sadbox: %v", err)
}
}
}
// TestMountNewDir checks that runsc will create destination directory if it
// doesn't exit.
func TestMountNewDir(t *testing.T) {
for _, conf := range configs(overlay) {
t.Logf("Running test with conf: %+v", conf)
srcDir := path.Join(os.TempDir(), "src", "newdir", "anotherdir")
if err := os.MkdirAll(srcDir, 0755); err != nil {
t.Fatalf("os.MkDir(%q) failed: %v", srcDir, err)
}
// Attempt to remove dir to ensure it doesn't exist.
mountDir := path.Join(os.TempDir(), "newdir")
if err := os.RemoveAll(mountDir); err != nil {
t.Fatalf("os.RemoveAll(%q) failed: %v", mountDir, err)
}
mountDir = path.Join(mountDir, "anotherdir")
spec := testutil.NewSpecWithArgs("/bin/ls", mountDir)
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: mountDir,
Source: srcDir,
Type: "bind",
})
if err := run(spec, conf); err != nil {
t.Fatalf("error running sadbox: %v", err)
}
}
}
func TestReadonlyRoot(t *testing.T) {
for _, conf := range configs(overlay) {
t.Logf("Running test with conf: %+v", conf)
spec := testutil.NewSpecWithArgs("/bin/touch", "/foo")
spec.Root.Readonly = true
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
conf.Overlay = true
// Create, start and wait for the container.
s, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer s.Destroy()
if err := s.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
ws, err := s.Wait()
if err != nil {
t.Fatalf("error waiting on container: %v", err)
}
if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM {
t.Fatalf("container failed, waitStatus: %v", ws)
}
}
}
func TestReadonlyMount(t *testing.T) {
for _, conf := range configs(overlay) {
t.Logf("Running test with conf: %+v", conf)
spec := testutil.NewSpecWithArgs("/bin/touch", "/foo/file")
dir, err := ioutil.TempDir("", "ro-mount")
if err != nil {
t.Fatalf("ioutil.TempDir() failed: %v", err)
}
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: "/foo",
Source: dir,
Type: "bind",
Options: []string{"ro"},
})
spec.Root.Readonly = false
rootDir, bundleDir, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
conf.Overlay = true
// Create, start and wait for the container.
s, err := container.Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "")
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer s.Destroy()
if err := s.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
ws, err := s.Wait()
if err != nil {
t.Fatalf("error waiting on container: %v", err)
}
if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM {
t.Fatalf("container failed, waitStatus: %v", ws)
}
}
}
// TestAbbreviatedIDs checks that runsc supports using abbreviated container
// IDs in place of full IDs.
func TestAbbreviatedIDs(t *testing.T) {
cids := []string{
"foo-" + testutil.UniqueContainerID(),
"bar-" + testutil.UniqueContainerID(),
"baz-" + testutil.UniqueContainerID(),
}
rootDir, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
for _, cid := range cids {
spec := testutil.NewSpecWithArgs("sleep", "100")
conf := testutil.TestConfig()
bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
// Create and start the container.
cont, err := container.Create(cid, spec, conf, bundleDir, "", "")
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
}
// These should all be unambigious.
unambiguous := map[string]string{
"f": cids[0],
cids[0]: cids[0],
"bar": cids[1],
cids[1]: cids[1],
"baz": cids[2],
cids[2]: cids[2],
}
for shortid, longid := range unambiguous {
if _, err := container.Load(rootDir, shortid); err != nil {
t.Errorf("%q should resolve to %q: %v", shortid, longid, err)
}
}
// These should be ambiguous.
ambiguous := []string{
"b",
"ba",
}
for _, shortid := range ambiguous {
if s, err := container.Load(rootDir, shortid); err == nil {
t.Errorf("%q should be ambiguous, but resolved to %q", shortid, s.ID)
}
}
}
// TestMultiContainerSanity checks that it is possible to run 2 dead-simple
// containers in the same sandbox.
func TestMultiContainerSanity(t *testing.T) {
for _, conf := range configs(all) {
t.Logf("Running test with conf: %+v", conf)
containerIDs := []string{
testutil.UniqueContainerID(),
testutil.UniqueContainerID(),
}
containerAnnotations := []map[string]string{
// The first container creates a sandbox.
map[string]string{
specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,
},
// The second container creates a container within the first
// container's sandbox.
map[string]string{
specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,
specutils.ContainerdSandboxIDAnnotation: containerIDs[0],
},
}
rootDir, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
defer os.RemoveAll(rootDir)
// Setup the containers.
containers := make([]*container.Container, 0, len(containerIDs))
for i, annotations := range containerAnnotations {
spec := testutil.NewSpecWithArgs("sleep", "100")
spec.Annotations = annotations
bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(bundleDir)
cont, err := container.Create(containerIDs[i], spec, conf, bundleDir, "", "")
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
containers = append(containers, cont)
}
expectedPL := []*control.Process{
{
UID: 0,
PID: 1,
PPID: 0,
C: 0,
Cmd: "sleep",
},
{
UID: 0,
PID: 2,
PPID: 0,
C: 0,
Cmd: "sleep",
},
}
// Check via ps that multiple processes are running.
if err := waitForProcessList(containers[0], expectedPL); err != nil {
t.Errorf("failed to wait for sleep to start: %v", err)
}
}
}
func TestMultiContainerWait(t *testing.T) {
containerIDs := []string{
testutil.UniqueContainerID(),
testutil.UniqueContainerID(),
}
containerAnnotations := []map[string]string{
// The first container creates a sandbox.
map[string]string{
specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,
},
// The second container creates a container within the first
// container's sandbox.
map[string]string{
specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,
specutils.ContainerdSandboxIDAnnotation: containerIDs[0],
},
}
args := [][]string{
// The first container should run the entire duration of the
// test.
{"sleep", "100"},
// We'll wait on the second container, which is much shorter
// lived.
{"sleep", "1"},
}
rootDir, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
defer os.RemoveAll(rootDir)
// Setup the containers.
containers := make([]*container.Container, 0, len(containerIDs))
for i, annotations := range containerAnnotations {
spec := testutil.NewSpecWithArgs(args[i][0], args[i][1])
spec.Annotations = annotations
conf := testutil.TestConfig()
bundleDir, err := testutil.SetupContainerInRoot(rootDir, spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(bundleDir)
cont, err := container.Create(containerIDs[i], spec, conf, bundleDir, "", "")
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
containers = append(containers, cont)
}
expectedPL := []*control.Process{
{
UID: 0,
PID: 1,
PPID: 0,
C: 0,
Cmd: "sleep",
},
{
UID: 0,
PID: 2,
PPID: 0,
C: 0,
Cmd: "sleep",
},
}
// Check via ps that multiple processes are running.
if err := waitForProcessList(containers[0], expectedPL); err != nil {
t.Errorf("failed to wait for sleep to start: %v", err)
}
// Wait on the short lived container from multiple goroutines.
wg := sync.WaitGroup{}
for i := 0; i < 3; i++ {
wg.Add(1)
go func() {
defer wg.Done()
if ws, err := containers[1].Wait(); err != nil {
t.Errorf("failed to wait for process %q: %v", strings.Join(containers[1].Spec.Process.Args, " "), err)
} else if es := ws.ExitStatus(); es != 0 {
t.Errorf("process %q exited with non-zero status %d", strings.Join(containers[1].Spec.Process.Args, " "), es)
}
if _, err := containers[1].Wait(); err == nil {
t.Errorf("wait for stopped process %q should fail", strings.Join(containers[1].Spec.Process.Args, " "))
}
// After Wait returns, ensure that the root container is running and
// the child has finished.
if err := waitForProcessList(containers[0], expectedPL[:1]); err != nil {
t.Errorf("failed to wait for %q to start: %v", strings.Join(containers[0].Spec.Process.Args, " "), err)
}
}()
}
// Also wait via PID.
for i := 0; i < 3; i++ {
wg.Add(1)
go func() {
defer wg.Done()
const pid = 2
if ws, err := containers[0].WaitPID(pid); err != nil {
t.Errorf("failed to wait for PID %d: %v", pid, err)
} else if es := ws.ExitStatus(); es != 0 {
t.Errorf("PID %d exited with non-zero status %d", pid, es)
}
if _, err := containers[0].WaitPID(pid); err == nil {
t.Errorf("wait for stopped PID %d should fail", pid)
}
}()
}
wg.Wait()
}
| [
"\"PATH\"",
"\"PATH\"",
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
preprocess_vn.py | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import glob, csv, librosa, os, subprocess, time
import numpy as np
import pandas as pd
import data_vn
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
__author__ = '[email protected]'
# data path
data_path = "asset/data/"
#
# process Vivos corpus
#
def process_vivos(csv_file, category):
parent_path = data_path + 'vivos/'
labels, wave_files = [], []
# create csv writer
writer = csv.writer(csv_file, delimiter=',')
# read label-info
content_filename = parent_path + category + '/prompts.txt'
label_info = pd.read_table(content_filename, usecols=['ID'], index_col=False, delim_whitespace=True)
# print(label_info) # testpoint: label_info
# read file IDs
# file_ids = []
# for uid in label_info.ID.values:
# print(uid) # testpoint: uid
# folder_path, filename = uid.split("_")
# for d in [parent_path + category + '/waves/%s' % folder_path]:
# print(d) # testpoint: folder_path
# a = glob.glob(d + '*.txt')
# print(a)
# b = sorted(glob.glob(d + '*.txt'))
# print(b)
# for f in sorted(glob.glob(d + '*.txt')):
# # print(f[-12:-4])
# file_ids.extend([f[-12:-4]])
# # print(file_ids)
file_ids = label_info.ID
# print(file_ids) # testpoint: file_ID
# preprocess
content_ = open(content_filename, 'r')
title_content = content_.readline()
# print(title_content) # Result: 'ID\t\tContent\n'
for i, f in enumerate(file_ids):
# wave file name
wave_file = parent_path + category + '/waves/%s/' % f[0:10] + f + '.wav'
# print(wave_file)
fn = wave_file.split('/')[-1]
# print(fn)
target_filename = 'asset/data/preprocess_vn/mfcc/' + fn + '.npy'
# print(target_filename)
if os.path.exists(target_filename):
continue
print("Vivos corpus preprocessing (%d/%d) - ['%s']" % (i, len(file_ids), wave_file))
# load wave file
wave, sr = librosa.load(wave_file, sr=16000, mono=True) # default: sr=22050Hz
# re-sample (48K --> 16K)
# wave = wave[::3]
# get mfcc feature
mfcc = librosa.feature.mfcc(wave, sr=16000)
# get label index
curr_content = content_.readline()
curr_content = curr_content[(len(fn)-3):(len(curr_content))]
print(curr_content)
label = data_vn.str2index(curr_content)
# save result (exclude small mfcc data to prevent CTC loss)
if len(label) < mfcc.shape[1]:
# save meta info
writer.writerow([fn] + label)
# save mfcc
np.save(target_filename, mfcc, allow_pickle=False)
# check saved features
print(data_vn.index2str(label), '\n')
# delay for observation and analysis
# time.sleep(10)
#
# Create directories
#
if not os.path.exists('asset/data/preprocess_vn'):
os.makedirs('asset/data/preprocess_vn')
if not os.path.exists('asset/data/preprocess_vn/meta'):
os.makedirs('asset/data/preprocess_vn/meta')
if not os.path.exists('asset/data/preprocess_vn/mfcc'):
os.makedirs('asset/data/preprocess_vn/mfcc')
#
# Run pre-processing for training
#
# Vivos corpus for training
csv_file_train = open('asset/data/preprocess_vn/meta/train.csv', 'w')
process_vivos(csv_file_train, 'train')
csv_file_train.close()
#
# Run pre-processing for testing
#
# Vivos corpus for test
csv_file_test = open('asset/data/preprocess_vn/meta/test.csv', 'w')
process_vivos(csv_file_test, 'test')
csv_file_test.close()
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
share/qt/extract_strings_qt.py | #!/usr/bin/env python3
# Copyright (c) 2020 The Eleccoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import operator
import os
import sys
OUT_CPP="qt/eleccoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
sys.exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w', encoding="utf8")
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *eleccoin_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("eleccoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("eleccoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| []
| []
| [
"COPYRIGHT_HOLDERS",
"XGETTEXT"
]
| [] | ["COPYRIGHT_HOLDERS", "XGETTEXT"] | python | 2 | 0 | |
pkg/output/output_setup.go | package output
import (
"github.com/drud/ddev/pkg/globalconfig"
"github.com/drud/ddev/pkg/nodeps"
"github.com/drud/ddev/pkg/version"
"github.com/evalphobia/logrus_sentry"
"os"
"github.com/fatih/color"
log "github.com/sirupsen/logrus"
)
var (
// UserOut is the customized logrus log used for direct user output
UserOut = log.New()
// UserOutFormatter is the specialized formatter for UserOut
UserOutFormatter = new(TextFormatter)
// JSONOutput is a bool telling whether we're outputting in json. Set by command-line args.
JSONOutput = false
)
// LogSetUp sets up UserOut and log loggers as needed by ddev
func LogSetUp() {
// Use color.Output instead of stderr for all user output
log.SetOutput(color.Output)
UserOut.Out = color.Output
levels := []log.Level{
log.PanicLevel,
log.FatalLevel,
log.ErrorLevel,
}
// Report errors and panics to Sentry
if version.SentryDSN != "" && !globalconfig.DdevNoSentry && nodeps.IsInternetActive() {
hook, err := logrus_sentry.NewAsyncWithTagsSentryHook(version.SentryDSN, nodeps.InstrumentationTags, levels)
if err == nil {
UserOut.Hooks.Add(hook)
}
}
if !JSONOutput {
UserOut.Formatter = UserOutFormatter
} else {
UserOut.Formatter = &JSONFormatter{}
}
UserOutFormatter.DisableTimestamp = true
// Always use log.DebugLevel for UserOut
UserOut.Level = log.DebugLevel // UserOut will by default always output
// But we use custom DRUD_DEBUG-settable loglevel for log
logLevel := log.InfoLevel
drudDebug := os.Getenv("DRUD_DEBUG")
if drudDebug != "" {
logLevel = log.DebugLevel
}
log.SetLevel(logLevel)
}
| [
"\"DRUD_DEBUG\""
]
| []
| [
"DRUD_DEBUG"
]
| [] | ["DRUD_DEBUG"] | go | 1 | 0 | |
middlewarehouse/consumers/shipstation/utils/config.go | package utils
import (
"errors"
"fmt"
"os"
"time"
)
type Config struct {
PollingInterval time.Duration
ApiKey string
ApiSecret string
}
func MakeConfig() (Config, error) {
var err error
config := Config{}
config.PollingInterval, err = time.ParseDuration(os.Getenv("POLLING_INTERVAL"))
fmt.Printf("INTERVAL: %v\n", config.PollingInterval)
if err != nil {
return config, fmt.Errorf("Unable to parse POLLING_INTERVAL with error %s", err.Error())
}
config.ApiKey = os.Getenv("API_KEY")
if config.ApiKey == "" {
return config, errors.New("Unable to find API_KEY in env")
}
config.ApiSecret = os.Getenv("API_SECRET")
if config.ApiSecret == "" {
return config, errors.New("Unable to find API_SECRET in env")
}
return config, nil
}
| [
"\"POLLING_INTERVAL\"",
"\"API_KEY\"",
"\"API_SECRET\""
]
| []
| [
"API_KEY",
"API_SECRET",
"POLLING_INTERVAL"
]
| [] | ["API_KEY", "API_SECRET", "POLLING_INTERVAL"] | go | 3 | 0 | |
src/nstar/fakes/tar.go | package main
import (
"fmt"
"io"
"io/ioutil"
"os"
"strings"
)
func main() {
if strings.Contains(os.Args[4], "CAUSE-TAR-TO-FAIL") {
fmt.Println("tar error")
os.Exit(1)
}
argsLog := os.Getenv("ARGS_LOG")
stdinLog := os.Getenv("STDIN_LOG")
stdoutLog := os.Getenv("STDOUT_LOG")
if err := ioutil.WriteFile(argsLog, []byte(strings.Join(os.Args[1:], " ")), 0644); err != nil {
panic(err)
}
fmt.Print(stdoutLog)
if stdinLog != "" {
stdin, err := os.OpenFile(stdinLog, os.O_CREATE|os.O_RDWR, 0644)
if err != nil {
panic(err)
}
defer stdin.Close()
if _, err := io.Copy(stdin, os.Stdin); err != nil {
panic(err)
}
}
}
| [
"\"ARGS_LOG\"",
"\"STDIN_LOG\"",
"\"STDOUT_LOG\""
]
| []
| [
"STDIN_LOG",
"ARGS_LOG",
"STDOUT_LOG"
]
| [] | ["STDIN_LOG", "ARGS_LOG", "STDOUT_LOG"] | go | 3 | 0 | |
internal/data/db.go | package data
import (
"log"
"os"
"github.com/joho/godotenv"
)
func DBConnection() map[string]interface{} {
Connection := make(map[string]interface{})
errr := godotenv.Load()
if errr != nil {
log.Fatal(errr)
}
mongoConn := Mongo{
Username: os.Getenv("DB0_USER"),
Password: os.Getenv("DB0_PASSWORD"),
Cluster: os.Getenv("DB0_CLUSTER"),
}
mongoConnection := mongoConn
Connection["mongodb"] = mongoConnection
return Connection
}
| [
"\"DB0_USER\"",
"\"DB0_PASSWORD\"",
"\"DB0_CLUSTER\""
]
| []
| [
"DB0_PASSWORD",
"DB0_CLUSTER",
"DB0_USER"
]
| [] | ["DB0_PASSWORD", "DB0_CLUSTER", "DB0_USER"] | go | 3 | 0 | |
spotifytest.py | import datetime
import operator
import json
import os
import random
import re
import argparse
import logging
import spotipy
import time
import argparse
import spotipy.oauth2 as oauth
from spotipy import util
# To access authorised Spotify data
from spotipy.oauth2 import SpotifyClientCredentials
from spotipy.oauth2 import SpotifyOAuth
redirect_uri = 'http://localhost:9999'
LOG_FILENAME = 'logging_rotatingfile_example.out'
logger = logging.getLogger('spotifyTest')
logger.setLevel(logging.DEBUG)
logging.basicConfig(filename='spotilog.txt', level='INFO')
class Traks:
def __init__(self, id_sp, artiste, album, genres, energy, acousticness,
danceability, instrumentalness, liveness, loudness,
speechiness, valence, tempo):
self.id_sp = id_sp
self.artiste = artiste
self.album = album
self.genres = genres
self.energy = energy
self.acousticness = acousticness
self.danceability = danceability
self.instrumentalness = instrumentalness
self.liveness = liveness
self.loudness = loudness
self.speechiness = speechiness
self.valence = valence
self.tempo = tempo
class SpotifyInstance:
def __init__(self):
self.add_argument()
self.scope = 'user-follow-read playlist-modify-public playlist-modify-private user-read-private user-library-modify user-library-read'
self.spo = oauth.SpotifyOAuth(client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=redirect_uri,
scope=self.scope,
cache_path=".cache-{}".format(
self.username))
util.prompt_for_user_token(self.username,
self.scope,
client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=redirect_uri)
self.sp = spotipy.Spotify(auth=self.get_token())
# self.playlist_id = "0RQmvqO5SVHbMJzPTY4efG"
self.initial_time = datetime.datetime.now()
self.number_track_in_playlist = 0
self.playlist_number = 0
self.playlist_id = ""
self.number_max_playlist = 9500
self.number_max_request1 = 99
self.number_max_request2 = 49
# self.number_max_request2 = 5
self.final_database = os.environ[
'APPDATA'] + "/spotipyDatabaseFred/database_final.json"
self.genres_database = os.environ[
'APPDATA'] + "/spotipyDatabaseFred/database_genres.json"
return
def get_token(self):
token_info = self.spo.get_cached_token()
if token_info:
access_token = token_info['access_token']
return access_token
else:
print("error token : ", token_info)
def taille_playlist(self):
# playlist_list_id_to_delete = []
offset = 0
for i in range(0, 50):
playlists = self.sp.user_playlists(self.username,
limit=self.number_max_request2,
offset=offset)
offset += self.number_max_request2
number_of_plalist = 0
for playlist in playlists['items']:
number_of_plalist += 1
# if playlist['id'] in playlist_list_id:
# print("error playlist_list_id deja unfollower")
# print(playlist_list_id)
# print(playlist['id'])
# break
# if re.search("AuN°[0-9].*", playlist['name']) or re.search(
# "feature :.*", playlist['name']):
print("mark playlist to delete :", playlist['name'])
self.playlist_list_id_to_delete.append(playlist['id'])
if number_of_plalist < 49:
break
def mark_playlist_to_delete(self):
self.playlist_list_id_to_delete = []
offset = 0
for i in range(0, 50):
playlists = self.sp.user_playlists(self.username,
limit=self.number_max_request2,
offset=offset)
offset += self.number_max_request2
number_of_plalist = 0
for playlist in playlists['items']:
number_of_plalist += 1
if re.search("AuN°[0-9].*", playlist['name']) or re.search(
"feature.*", playlist['name']) or re.search(
"Xgenre.*", playlist['name']) or re.search(
"genre.*", playlist['name']):
print("mark playlist to delete :", playlist['name'])
self.playlist_list_id_to_delete.append(playlist['id'])
if number_of_plalist < 49:
break
def delete_marked_playlist(self):
print(self.playlist_list_id_to_delete)
for playlist in self.playlist_list_id_to_delete:
self.sp.user_playlist_unfollow(self.username, playlist)
def calcul_time_token(self):
time = datetime.datetime.now() - self.initial_time
print("time = ", time)
if time >= datetime.timedelta(0, 0, 0, 0, 15, 0, 0):
print("token refresh")
self.refresh_token()
self.initial_time = datetime.datetime.now()
return
def refresh_token(self):
cached_token = self.spo.get_cached_token()
print("token expire = ", self.spo.is_token_expired(cached_token))
refreshed_token = cached_token['refresh_token']
new_token = self.spo.refresh_access_token(refreshed_token)
print("new token : ", new_token['access_token']) # <--
# also we need to specifically pass `auth=new_token['access_token']`
self.sp = spotipy.Spotify(auth=new_token['access_token'])
return new_token
def get_artist(self, name):
results = self.sp.search(q='artist:' + name, type='artist')
items = results['artists']['items']
if len(items) > 0:
return items[0]
else:
return None
def get_artist_followed(self):
artist_list = []
artist_id = ""
# for i in range(0, 1):
for i in range(0, 50):
if i == 0:
artist_followed = self.sp.current_user_followed_artists(
limit=self.number_max_request2, after=None)
else:
artist_followed = self.sp.current_user_followed_artists(
limit=self.number_max_request2, after=artist_id)
for key in artist_followed['artists']['items']:
# print(key['genres'])
# if key['genres'] == 'classical':
# self.classic_album.extend(key['genres'])
# print(self.classic_album)
# else:
artist_id = key['id']
# print("fred = " + artist_id)
# print(key['name'])
artist = self.get_artist(key['name'])
if (artist != None):
artist_list.append([artist, key['genres'], key['name']])
print(key['name'])
return artist_list
def create_database(self):
database = []
track_id = []
database = self.get_liked_track()
artiste_list = self.get_artist_followed()
for key, genre, name in artiste_list:
album, album_name = (self.show_artist_albums(key))
for key in album:
self.calcul_time_token()
trakts = (self.show_album_tracks(key))
track_id, track_name = self.add_trakts_id_to_list(trakts)
print(track_id)
energy, acousticness, danceability, instrumentalness, liveness, loudness, speechiness, valence, tempo = self.audio_features_list(
track_id)
# print("energy",energy)
if len(track_id) != 0 and len(energy) != 0:
for i in range(0, len(track_id) - 1):
# print("i=",i)
# print(key['name'])
# print(track_id[i], track_name[i])
# print(energy[i])
try:
database.append([
name, genre, key['name'], track_id[i],
track_name[i], energy[i], acousticness[i],
danceability[i], instrumentalness[i],
liveness[i], loudness[i], speechiness[i],
valence[i], tempo[i]
])
except Exception:
database.append([
name, genre, key['name'], track_id[i],
track_name[i], None, None, None, None, None,
None, None, None, None
])
continue
return database
def show_artist_albums(self, artist):
albums = []
albums_name = []
results = self.sp.artist_albums(artist['id'], album_type='album')
albums.extend(results['items'])
while results['next']:
results = self.sp.next(results)
albums.extend(results['items'])
logger.info('Total albums: %s', len(albums))
unique = set() # skip duplicate albums
album_output = []
for album in albums:
name = album['name'].lower()
if (name not in unique) and (name != None):
logger.info('ALBUM: %s', name)
unique.add(name)
# self.show_album_tracks(album)
album_output.append(album)
albums_name.append(album['name'])
# self.album_list.extend(album_output)
return album_output, albums_name
def audio_features_list(self, trakts_id_liste):
number_trak_boucle = 0
list_key = []
feature_list = []
# print("trakts_id_liste",trakts_id_liste)
for key in trakts_id_liste:
# print("fred1", key)
number_trak_boucle += 1
list_key.append(key)
if len(list_key
) == self.number_max_request1 or number_trak_boucle == len(
trakts_id_liste):
feature_list.extend(self.sp.audio_features(list_key))
list_key.clear()
# print("feature_list", feature_list)
energy = []
acousticness = []
danceability = []
instrumentalness = []
liveness = []
loudness = []
speechiness = []
valence = []
tempo = []
for key in feature_list:
# print(key)
if key != None:
energy.append(key['energy'])
acousticness.append(key['acousticness'])
danceability.append(key['danceability'])
instrumentalness.append(key['instrumentalness'])
liveness.append(key['liveness'])
loudness.append(key['loudness'])
speechiness.append(key['speechiness'])
valence.append(key['valence'])
tempo.append(key['tempo'])
else:
energy.append(None)
acousticness.append(None)
danceability.append(None)
instrumentalness.append(None)
liveness.append(None)
loudness.append(None)
speechiness.append(None)
valence.append(None)
tempo.append(None)
return energy, acousticness, danceability, instrumentalness, liveness, loudness, speechiness, valence, tempo
def playlist_from_genres_pattern(self, genres_name):
# print("processing playlist : ", genres_name)
list_track = []
for key in self.track_list:
# print(key)
for genre in key.genres:
# print(genre)
if re.search(genres_name, genre):
# print("fred",genre)
list_track.append(key.id_sp)
break
playlist_name = "genre pat : " + str(genres_name)
# print(len(list_track),len(list_track))
# print(playlist_name)
self.add_list_of_trackts(list_track, 5, playlist_name)
def playlist_from_genres(self, genres_name):
# print("processing playlist : ", genres_name)
list_track = []
for key in self.track_list:
# print(key)
for genre in key.genres:
# print(genre)
if genre in genres_name:
# print("fred",genre)
list_track.append(key.id_sp)
break
playlist_name = "Xgenre : " + str(genres_name)
# print(len(list_track),len(list_track))
# print(playlist_name)
self.add_list_of_trackts(list_track, 5, playlist_name)
def playlist_from_feature(self, feature_name, min_max, value_arg):
print("processing playlist : ", feature_name)
feature_dico = {}
b = []
for key in self.track_list:
feature = {
"energy": key.energy,
"acousticness": key.acousticness,
"danceability": key.danceability,
"instrumentalness": key.instrumentalness,
"liveness": key.liveness,
"loudness": key.loudness,
"speechiness": key.speechiness,
"valence": key.valence,
"tempo": key.tempo
}
if feature[feature_name] != None:
feature_dico[key.id_sp] = feature[feature_name]
# print(feature_dico)
if feature_dico != {}:
if min_max == True:
playlist_name = "feature rev : " + feature_name
feature_dico = sorted(feature_dico.items(),
key=lambda kv: kv[1])
# print(feature_dico)
if value_arg != None:
for key, value in feature_dico:
# print(key,value)
if value < value_arg:
b.append(key)
else:
b = [x[0] for x in feature_dico]
else:
playlist_name = "feature : " + feature_name
feature_dico = sorted(feature_dico.items(),
key=lambda kv: kv[1],
reverse=True)
if value_arg != None:
for key, value in feature_dico:
if value > value_arg:
b.append(key)
else:
b = [x[0] for x in feature_dico]
# b = [x[0] for x in feature_dico]
print("len(b)=", len(b))
self.add_list_of_trackts(b, 1, playlist_name)
def show_album_tracks(self, album):
tracks = []
results = self.sp.album_tracks(album['id'])
tracks.extend(results['items'])
while results['next']:
results = self.sp.next(results)
if results['items'] != None:
tracks.extend(results['items'])
for i, track in enumerate(tracks):
logger.info('%s. %s', i + 1, track['name'])
return tracks
def add_library_playlist(self):
self.big_playlist_from_database()
self.playlist_from_genres_pattern("french")
self.playlist_from_genres_pattern("classical")
self.playlist_from_feature("energy", True, 0.5)
self.playlist_from_feature("acousticness", True, 0.5)
self.playlist_from_feature("danceability", True, 0.5)
self.playlist_from_feature("instrumentalness", True, 0.5)
self.playlist_from_feature("liveness", True, 0.5)
self.playlist_from_feature("loudness", True, 0.5)
self.playlist_from_feature("speechiness", True, None)
self.playlist_from_feature("valence", True, 0.5)
self.playlist_from_feature("tempo", True, 100)
self.playlist_from_feature("energy", False, 0.5)
self.playlist_from_feature("acousticness", False, 0.5)
self.playlist_from_feature("danceability", False, 0.5)
self.playlist_from_feature("instrumentalness", False, 0.5)
self.playlist_from_feature("liveness", False, 0.5)
self.playlist_from_feature("loudness", False, 0.5)
self.playlist_from_feature("speechiness", False, None)
self.playlist_from_feature("valence", False, 0.5)
self.playlist_from_feature("tempo", False, 100)
list_genres = self.print_genres()
for key in list_genres:
self.playlist_from_genres(key)
def create_or_read_database(self):
if self.complete == True:
logger.info("create database")
database = self.create_database()
self.save_tracks_database_to_file(database, self.final_database)
else:
logger.info("use database processing")
database = self.read_database(self.final_database)
# self.add_trakts_id_to_list(self.read_database(self.path_to_save))
# self.feature_list = self.read_database(self.path_to_save_feature)
self.track_list = []
for key in database:
traks = Traks(key[3], key[0], key[2], key[1], key[5], key[6],
key[7], key[8], key[9], key[10], key[11], key[12],
key[13])
self.track_list.append(traks)
if self.complete == True:
list_genres = self.print_genres()
self.save_tracks_database_to_file(list_genres,
self.genres_database)
else:
list_genres = self.read_database(self.genres_database)
print(list_genres)
def big_playlist_from_database(self):
random.shuffle(self.track_list)
list_of_id = []
classical = 0
for key in self.track_list:
if "classical" in key.genres:
# print("classical = ",classical)
classical += 1
if classical > (len(self.track_list) / 15):
continue
list_of_id.append(key.id_sp)
self.add_list_of_trackts(list_of_id, 30, "AuN°")
def print_genres(self):
list_genres = []
for key in self.track_list:
for key2 in key.genres:
if key2 not in list_genres:
# print(key2)
list_genres.append(key2)
# print(list_genres)
return list_genres
def save_tracks_database_to_file(self, traks, path_to_save):
if not os.path.exists(os.path.dirname(path_to_save)):
try:
os.makedirs(os.path.dirname(path_to_save))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
print(path_to_save)
with open(path_to_save, "w") as outfile:
json.dump(traks, outfile)
def read_database(self, file_to_read):
if not os.path.exists(os.path.dirname(file_to_read)):
raise Exception(
"database don t exist.launch complete process first")
with open(file_to_read) as json_file:
traks = json.load(json_file)
return traks
def user_playlist_add_tracks_error(self, list_9500_track, playlist_id):
i = 0
while i < 500:
i += 1
try:
# print(list_9500_track)
self.sp.user_playlist_add_tracks(self.username, playlist_id,
list_9500_track)
break
except spotipy.client.SpotifyException as e:
if (e.http_status == 429):
print(e.http_status)
time.sleep(1)
continue
elif (e.http_status == 401):
self.refresh_token()
continue
else:
print(e)
continue
# raise e
def add_list_of_trackts(self, trakts_id, number_max_playlist_wanted,
playlist_base_name):
logger.info("add_list_of_trackt")
self.calcul_time_token()
random.shuffle(trakts_id)
list_9500_track = []
number_track_in_playlist = 0
track_number_request = 0
total_trakt = 0
playlist_number = 1
date = datetime.datetime.now().strftime("%m/%d/%Y")
playlist_name = playlist_base_name + str(
playlist_number) + " : " + str(date)
print("add new plalist : ", playlist_name)
playlist = self.sp.user_playlist_create(self.username, playlist_name)
playlist_id = playlist['id']
# print("traks_id", trakts_id)
print("traks to add", len(trakts_id))
for track in trakts_id:
list_9500_track.append(track)
track_number_request += 1
number_track_in_playlist += 1
total_trakt += 1
# print("traks to add", len(trakts_id))
# print("total trakt processed = ", total_trakt)
if track_number_request == self.number_max_request1 or len(
trakts_id) == total_trakt:
# logger.info("add traks to plalist : "+ list_9500_track)
# print("add short list")
# print("len list added", len(list_9500_track))
# raise Exception()
self.user_playlist_add_tracks_error(list_9500_track,
playlist_id)
list_9500_track.clear()
track_number_request = 0
if number_track_in_playlist >= self.number_max_playlist:
playlist_number += 1
if playlist_number > number_max_playlist_wanted:
break
playlist_name = playlist_base_name + str(
playlist_number) + " : " + str(date)
print("add new plalist : ", playlist_name)
playlist = self.sp.user_playlist_create(
self.username, playlist_name)
playlist_id = playlist['id']
number_track_in_playlist = 0
def add_trakts_id_to_list(self, tracks):
dico = {}
dico2 = {}
for track in tracks:
dico[track['id']] = track['name']
l = list(dico.items())
random.shuffle(l)
dico = dict(l)
print("len(l) : ", len(l))
for key, value in dico.items():
if value not in dico2.values():
dico2[key] = value
# else:
# print("track duplicate deleted : ", value)
trakts_id_list = []
trakts_name_list = []
for key, value in dico2.items():
trakts_id_list.append(key)
trakts_name_list.append(value)
return trakts_id_list, trakts_name_list
def get_liked_track(self):
database=[]
track_name=[]
track_id=[]
offset = 0
for i in range(0, 50):
trak = self.sp.current_user_saved_tracks(limit=self.number_max_request2, offset=offset)
offset += self.number_max_request2
# print(trak['items'])
# track_id, trak_name = self.add_trakts_id_to_list(trak['items'])
# list_track_id.append(track_id)
for key in trak['items']:
# print(key['track']['id'])
# print(key['track']['name'])
# for name in key['track']['artists']:
# artiste_name = name['name']
# print(name['name'])
track_id.append(key['track']['id'])
track_name.append(key['track']['name'])
# list_trak.append((key['track']['id'],key['track']['name'] ))
energy, acousticness, danceability, instrumentalness, liveness, loudness, speechiness, valence, tempo = self.audio_features_list(
track_id)
if len(track_id) != 0 and len(energy) != 0:
for i in range(0, len(track_id) - 1):
# print("i=",i)
# print(key['name'])
# print(track_id[i], track_name[i])
# print(energy[i])
try:
database.append([
"titre liked", "titre liked", "titre liked", track_id[i], track_name[i],
energy[i], acousticness[i], danceability[i],
instrumentalness[i], liveness[i], loudness[i],
speechiness[i], valence[i], tempo[i]
])
except Exception:
database.append([
"titre liked", "titre liked", "titre liked", track_id[i], track_name[i],
None, None, None, None, None, None, None, None, None
])
continue
return database
def add_argument(self):
parser = argparse.ArgumentParser(
description="give credential for spotify")
parser.add_argument('--client_id',
required=True,
help='give \
client id. see : https://developer.spotify.com/documentation/general/guides/app-settings/ '
)
parser.add_argument('--client_secret',
required=True,
help='give \
client id. see : https://developer.spotify.com/documentation/general/guides/app-settings/ '
)
parser.add_argument('--username',
required=True,
help='give your \
spotify username ')
parser.add_argument(
'--complete',
action="store_true",
help='rebuild complete database (don t reuse existing database)')
args = parser.parse_args()
# print(args.client_id)
self.client_id = args.client_id
self.client_secret = args.client_secret
self.username = args.username
self.complete = args.complete
if __name__ == '__main__':
spotifyInstance = SpotifyInstance()
spotifyInstance.refresh_token()
spotifyInstance.mark_playlist_to_delete()
spotifyInstance.create_or_read_database()
spotifyInstance.add_library_playlist()
spotifyInstance.delete_marked_playlist()
# main()
| []
| []
| [
"APPDATA"
]
| [] | ["APPDATA"] | python | 1 | 0 | |
examples/maven-okhttp4/src/test/java/com/commercetools/sdk/examples/GettingStarted.java | package com.commercetools.sdk.examples;
import com.commercetools.api.client.ApiRoot;
import com.commercetools.api.defaultconfig.ApiFactory;
import com.commercetools.api.defaultconfig.ServiceRegion;
import com.commercetools.api.models.project.Project;
import io.vrap.rmf.base.client.oauth2.ClientCredentials;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
public class GettingStarted {
@Test
public void project() {
ApiRoot apiRoot = ApiFactory.create(
ClientCredentials.of().withClientId(System.getenv("CTP_CLIENT_ID"))
.withClientSecret(System.getenv("CTP_CLIENT_SECRET"))
.build(),
ServiceRegion.GCP_EUROPE_WEST1.getOAuthTokenUrl(),
ServiceRegion.GCP_EUROPE_WEST1.getApiUrl());
Project response = apiRoot.withProjectKey(System.getenv("CTP_PROJECT_KEY"))
.get()
.executeBlocking().getBody();
Assertions.assertEquals(System.getenv("CTP_PROJECT_KEY"), response.getKey());
}
}
| [
"\"CTP_CLIENT_ID\"",
"\"CTP_CLIENT_SECRET\"",
"\"CTP_PROJECT_KEY\"",
"\"CTP_PROJECT_KEY\""
]
| []
| [
"CTP_CLIENT_SECRET",
"CTP_PROJECT_KEY",
"CTP_CLIENT_ID"
]
| [] | ["CTP_CLIENT_SECRET", "CTP_PROJECT_KEY", "CTP_CLIENT_ID"] | java | 3 | 0 | |
provision.py | from __future__ import print_function
import os
import sys
import logging
import platform
try:
import sh
except ImportError:
import pbs as sh
SUPPORTED_PLATFORMS = {
"Ubuntu": [
"trusty",
],
}
APT_DEPENDENCIES = {
"trusty": [
"closure-compiler",
"libfreetype6-dev",
"libffi-dev",
"memcached",
"rabbitmq-server",
"libldap2-dev",
"redis-server",
"postgresql-server-dev-all",
"libmemcached-dev",
"postgresql-9.3",
"python-dev",
"hunspell-en-us",
"nodejs",
"python-virtualenv",
"supervisor",
"git",
"npm",
"node-jquery",
"yui-compressor",
"puppet", # Used by lint-all
"gettext", # Used by makemessages i18n
]
}
# TODO: backport node-{cssstyle,htmlparser2,nwmatcher} to trusty,
# so we can eliminate npm (above) and this section.
NPM_DEPENDENCIES = {
"trusty": [
"cssstyle",
"htmlparser2",
"nwmatcher",
]
}
VENV_PATH="/srv/zulip-venv"
ZULIP_PATH="/srv/zulip"
if not os.path.exists(os.path.join(os.path.dirname(__file__), ".git")):
print("Error: No Zulip git repository present at /srv/zulip!")
print("To setup the Zulip development environment, you should clone the code")
print("from GitHub, rather than using a Zulip production release tarball.")
sys.exit(1)
# TODO: Parse arguments properly
if "--travis" in sys.argv:
ZULIP_PATH="."
# tsearch-extras is an extension to postgres's built-in full-text search.
# TODO: use a real APT repository
TSEARCH_URL_BASE = "https://dl.dropboxusercontent.com/u/283158365/zuliposs/"
TSEARCH_PACKAGE_NAME = {
"trusty": "postgresql-9.3-tsearch-extras"
}
TSEARCH_VERSION = "0.1.2"
# TODO: this path is platform-specific!
TSEARCH_STOPWORDS_PATH = "/usr/share/postgresql/9.3/tsearch_data/"
REPO_STOPWORDS_PATH = os.path.join(
ZULIP_PATH,
"puppet",
"zulip",
"files",
"postgresql",
"zulip_english.stop",
)
LOUD = dict(_out=sys.stdout, _err=sys.stderr)
def main():
log = logging.getLogger("zulip-provisioner")
# TODO: support other architectures
if platform.architecture()[0] == '64bit':
arch = 'amd64'
else:
log.critical("Only amd64 is supported.")
vendor, version, codename = platform.dist()
if not (vendor in SUPPORTED_PLATFORMS and codename in SUPPORTED_PLATFORMS[vendor]):
log.critical("Unsupported platform: {} {}".format(vendor, codename))
with sh.sudo:
sh.apt_get.update(**LOUD)
sh.apt_get.install(*APT_DEPENDENCIES["trusty"], assume_yes=True, **LOUD)
temp_deb_path = sh.mktemp("package_XXXXXX.deb", tmpdir=True)
sh.wget(
"{}/{}_{}_{}.deb".format(
TSEARCH_URL_BASE,
TSEARCH_PACKAGE_NAME["trusty"],
TSEARCH_VERSION,
arch,
),
output_document=temp_deb_path,
**LOUD
)
with sh.sudo:
sh.dpkg("--install", temp_deb_path, **LOUD)
with sh.sudo:
PHANTOMJS_PATH = "/srv/phantomjs"
PHANTOMJS_TARBALL = os.path.join(PHANTOMJS_PATH, "phantomjs-1.9.8-linux-x86_64.tar.bz2")
sh.mkdir("-p", PHANTOMJS_PATH, **LOUD)
sh.wget("https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-1.9.8-linux-x86_64.tar.bz2",
output_document=PHANTOMJS_TARBALL, **LOUD)
sh.tar("xj", directory=PHANTOMJS_PATH, file=PHANTOMJS_TARBALL, **LOUD)
sh.ln("-sf", os.path.join(PHANTOMJS_PATH, "phantomjs-1.9.8-linux-x86_64", "bin", "phantomjs"),
"/usr/local/bin/phantomjs", **LOUD)
with sh.sudo:
sh.rm("-rf", VENV_PATH, **LOUD)
sh.mkdir("-p", VENV_PATH, **LOUD)
sh.chown("{}:{}".format(os.getuid(), os.getgid()), VENV_PATH, **LOUD)
sh.virtualenv(VENV_PATH, **LOUD)
# Add the ./tools and ./scripts/setup directories inside the repository root to
# the system path; we'll reference them later.
orig_path = os.environ["PATH"]
os.environ["PATH"] = os.pathsep.join((
os.path.join(ZULIP_PATH, "tools"),
os.path.join(ZULIP_PATH, "scripts", "setup"),
orig_path
))
# Put Python virtualenv activation in our .bash_profile.
with open(os.path.expanduser('~/.bash_profile'), 'w+') as bash_profile:
bash_profile.writelines([
"source .bashrc\n",
"source %s\n" % (os.path.join(VENV_PATH, "bin", "activate"),),
])
# Switch current Python context to the virtualenv.
activate_this = os.path.join(VENV_PATH, "bin", "activate_this.py")
execfile(activate_this, dict(__file__=activate_this))
sh.pip.install(requirement=os.path.join(ZULIP_PATH, "requirements.txt"), **LOUD)
with sh.sudo:
sh.cp(REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH, **LOUD)
# Add additional node packages for test-js-with-node.
with sh.sudo:
sh.npm.install(*NPM_DEPENDENCIES["trusty"], g=True, prefix="/usr", **LOUD)
# Management commands expect to be run from the root of the project.
os.chdir(ZULIP_PATH)
os.system("tools/download-zxcvbn")
os.system("tools/emoji_dump/build_emoji")
os.system("generate_secrets.py -d")
if "--travis" in sys.argv:
os.system("sudo service rabbitmq-server restart")
os.system("sudo service redis-server restart")
os.system("sudo service memcached restart")
sh.configure_rabbitmq(**LOUD)
sh.postgres_init_dev_db(**LOUD)
sh.do_destroy_rebuild_database(**LOUD)
sh.postgres_init_test_db(**LOUD)
sh.do_destroy_rebuild_test_database(**LOUD)
if __name__ == "__main__":
sys.exit(main())
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.