filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
tsweb/tsweb.go
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tsweb contains code used in various Tailscale webservers.
package tsweb
import (
"bufio"
"bytes"
"context"
"errors"
"expvar"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
_ "net/http/pprof"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"inet.af/netaddr"
"tailscale.com/metrics"
"tailscale.com/net/tsaddr"
"tailscale.com/types/logger"
)
func init() {
expvar.Publish("counter_uptime_sec", expvar.Func(func() interface{} { return int64(Uptime().Seconds()) }))
expvar.Publish("gauge_goroutines", expvar.Func(func() interface{} { return runtime.NumGoroutine() }))
}
// DevMode controls whether extra output in shown, for when the binary is being run in dev mode.
var DevMode bool
func DefaultCertDir(leafDir string) string {
cacheDir, err := os.UserCacheDir()
if err == nil {
return filepath.Join(cacheDir, "tailscale", leafDir)
}
return ""
}
// IsProd443 reports whether addr is a Go listen address for port 443.
func IsProd443(addr string) bool {
_, port, _ := net.SplitHostPort(addr)
return port == "443" || port == "https"
}
// AllowDebugAccess reports whether r should be permitted to access
// various debug endpoints.
func AllowDebugAccess(r *http.Request) bool {
if r.Header.Get("X-Forwarded-For") != "" {
// TODO if/when needed. For now, conservative:
return false
}
ipStr, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
return false
}
ip, err := netaddr.ParseIP(ipStr)
if err != nil {
return false
}
if tsaddr.IsTailscaleIP(ip) || ip.IsLoopback() || ipStr == os.Getenv("TS_ALLOW_DEBUG_IP") {
return true
}
if r.Method == "GET" {
urlKey := r.FormValue("debugkey")
keyPath := os.Getenv("TS_DEBUG_KEY_PATH")
if urlKey != "" && keyPath != "" {
slurp, err := ioutil.ReadFile(keyPath)
if err == nil && string(bytes.TrimSpace(slurp)) == urlKey {
return true
}
}
}
return false
}
// Protected wraps a provided debug handler, h, returning a Handler
// that enforces AllowDebugAccess and returns forbiden replies for
// unauthorized requests.
func Protected(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !AllowDebugAccess(r) {
msg := "debug access denied"
if DevMode {
ipStr, _, _ := net.SplitHostPort(r.RemoteAddr)
msg += fmt.Sprintf("; to permit access, set TS_ALLOW_DEBUG_IP=%v", ipStr)
}
http.Error(w, msg, http.StatusForbidden)
return
}
h.ServeHTTP(w, r)
})
}
var timeStart = time.Now()
func Uptime() time.Duration { return time.Since(timeStart).Round(time.Second) }
// Port80Handler is the handler to be given to
// autocert.Manager.HTTPHandler. The inner handler is the mux
// returned by NewMux containing registered /debug handlers.
type Port80Handler struct{ Main http.Handler }
func (h Port80Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
path := r.RequestURI
if path == "/debug" || strings.HasPrefix(path, "/debug") {
h.Main.ServeHTTP(w, r)
return
}
if r.Method != "GET" && r.Method != "HEAD" {
http.Error(w, "Use HTTPS", http.StatusBadRequest)
return
}
if path == "/" && AllowDebugAccess(r) {
// Redirect authorized user to the debug handler.
path = "/debug/"
}
target := "https://" + stripPort(r.Host) + path
http.Redirect(w, r, target, http.StatusFound)
}
func stripPort(hostport string) string {
host, _, err := net.SplitHostPort(hostport)
if err != nil {
return hostport
}
return net.JoinHostPort(host, "443")
}
// ReturnHandler is like net/http.Handler, but the handler can return an
// error instead of writing to its ResponseWriter.
type ReturnHandler interface {
// ServeHTTPReturn is like http.Handler.ServeHTTP, except that
// it can choose to return an error instead of writing to its
// http.ResponseWriter.
//
// If ServeHTTPReturn returns an error, it caller should handle
// an error by serving an HTTP 500 response to the user. The
// error details should not be sent to the client, as they may
// contain sensitive information. If the error is an
// HTTPError, though, callers should use the HTTP response
// code and message as the response to the client.
ServeHTTPReturn(http.ResponseWriter, *http.Request) error
}
type HandlerOptions struct {
Quiet200s bool // if set, do not log successfully handled HTTP requests
Logf logger.Logf
Now func() time.Time // if nil, defaults to time.Now
// If non-nil, StatusCodeCounters maintains counters
// of status codes for handled responses.
// The keys are "1xx", "2xx", "3xx", "4xx", and "5xx".
StatusCodeCounters *expvar.Map
}
// ReturnHandlerFunc is an adapter to allow the use of ordinary
// functions as ReturnHandlers. If f is a function with the
// appropriate signature, ReturnHandlerFunc(f) is a ReturnHandler that
// calls f.
type ReturnHandlerFunc func(http.ResponseWriter, *http.Request) error
// ServeHTTPReturn calls f(w, r).
func (f ReturnHandlerFunc) ServeHTTPReturn(w http.ResponseWriter, r *http.Request) error {
return f(w, r)
}
// StdHandler converts a ReturnHandler into a standard http.Handler.
// Handled requests are logged using opts.Logf, as are any errors.
// Errors are handled as specified by the Handler interface.
func StdHandler(h ReturnHandler, opts HandlerOptions) http.Handler {
if opts.Now == nil {
opts.Now = time.Now
}
if opts.Logf == nil {
opts.Logf = logger.Discard
}
return retHandler{h, opts}
}
// retHandler is an http.Handler that wraps a Handler and handles errors.
type retHandler struct {
rh ReturnHandler
opts HandlerOptions
}
// ServeHTTP implements the http.Handler interface.
func (h retHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
msg := AccessLogRecord{
When: h.opts.Now(),
RemoteAddr: r.RemoteAddr,
Proto: r.Proto,
TLS: r.TLS != nil,
Host: r.Host,
Method: r.Method,
RequestURI: r.URL.RequestURI(),
UserAgent: r.UserAgent(),
Referer: r.Referer(),
}
lw := &loggingResponseWriter{ResponseWriter: w, logf: h.opts.Logf}
err := h.rh.ServeHTTPReturn(lw, r)
hErr, hErrOK := err.(HTTPError)
if lw.code == 0 && err == nil && !lw.hijacked {
// If the handler didn't write and didn't send a header, that still means 200.
// (See https://play.golang.org/p/4P7nx_Tap7p)
lw.code = 200
}
msg.Seconds = h.opts.Now().Sub(msg.When).Seconds()
msg.Code = lw.code
msg.Bytes = lw.bytes
switch {
case lw.hijacked:
// Connection no longer belongs to us, just log that we
// switched protocols away from HTTP.
if msg.Code == 0 {
msg.Code = http.StatusSwitchingProtocols
}
case err != nil && r.Context().Err() == context.Canceled:
msg.Code = 499 // nginx convention: Client Closed Request
msg.Err = context.Canceled.Error()
case hErrOK:
// Handler asked us to send an error. Do so, if we haven't
// already sent a response.
msg.Err = hErr.Msg
if hErr.Err != nil {
if msg.Err == "" {
msg.Err = hErr.Err.Error()
} else {
msg.Err = msg.Err + ": " + hErr.Err.Error()
}
}
if lw.code != 0 {
h.opts.Logf("[unexpected] handler returned HTTPError %v, but already sent a response with code %d", hErr, lw.code)
break
}
msg.Code = hErr.Code
if msg.Code == 0 {
h.opts.Logf("[unexpected] HTTPError %v did not contain an HTTP status code, sending internal server error", hErr)
msg.Code = http.StatusInternalServerError
}
http.Error(lw, hErr.Msg, msg.Code)
case err != nil:
// Handler returned a generic error. Serve an internal server
// error, if necessary.
msg.Err = err.Error()
if lw.code == 0 {
msg.Code = http.StatusInternalServerError
http.Error(lw, "internal server error", msg.Code)
}
}
if msg.Code != 200 || !h.opts.Quiet200s {
h.opts.Logf("%s", msg)
}
if h.opts.StatusCodeCounters != nil {
key := fmt.Sprintf("%dxx", msg.Code/100)
h.opts.StatusCodeCounters.Add(key, 1)
}
}
// loggingResponseWriter wraps a ResponseWriter and record the HTTP
// response code that gets sent, if any.
type loggingResponseWriter struct {
http.ResponseWriter
code int
bytes int
hijacked bool
logf logger.Logf
}
// WriteHeader implements http.Handler.
func (l *loggingResponseWriter) WriteHeader(statusCode int) {
if l.code != 0 {
l.logf("[unexpected] HTTP handler set statusCode twice (%d and %d)", l.code, statusCode)
return
}
l.code = statusCode
l.ResponseWriter.WriteHeader(statusCode)
}
// Write implements http.Handler.
func (l *loggingResponseWriter) Write(bs []byte) (int, error) {
if l.code == 0 {
l.code = 200
}
n, err := l.ResponseWriter.Write(bs)
l.bytes += n
return n, err
}
// Hijack implements http.Hijacker. Note that hijacking can still fail
// because the wrapped ResponseWriter is not required to implement
// Hijacker, as this breaks HTTP/2.
func (l *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
h, ok := l.ResponseWriter.(http.Hijacker)
if !ok {
return nil, nil, errors.New("ResponseWriter is not a Hijacker")
}
conn, buf, err := h.Hijack()
if err == nil {
l.hijacked = true
}
return conn, buf, err
}
func (l loggingResponseWriter) Flush() {
f, _ := l.ResponseWriter.(http.Flusher)
if f == nil {
l.logf("[unexpected] tried to Flush a ResponseWriter that can't flush")
return
}
f.Flush()
}
// HTTPError is an error with embedded HTTP response information.
//
// It is the error type to be (optionally) used by Handler.ServeHTTPReturn.
type HTTPError struct {
Code int // HTTP response code to send to client; 0 means means 500
Msg string // Response body to send to client
Err error // Detailed error to log on the server
}
// Error implements the error interface.
func (e HTTPError) Error() string { return fmt.Sprintf("httperror{%d, %q, %v}", e.Code, e.Msg, e.Err) }
// Error returns an HTTPError containing the given information.
func Error(code int, msg string, err error) HTTPError {
return HTTPError{Code: code, Msg: msg, Err: err}
}
// VarzHandler is an HTTP handler to write expvar values into the
// prometheus export format:
//
// https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md
//
// It makes the following assumptions:
//
// * *expvar.Int are counters (unless marked as a gauge_; see below)
// * a *tailscale/metrics.Set is descended into, joining keys with
// underscores. So use underscores as your metric names.
// * an expvar named starting with "gauge_" or "counter_" is of that
// Prometheus type, and has that prefix stripped.
// * anything else is untyped and thus not exported.
// * expvar.Func can return an int or int64 (for now) and anything else
// is not exported.
//
// This will evolve over time, or perhaps be replaced.
func VarzHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; version=0.0.4")
var dump func(prefix string, kv expvar.KeyValue)
dump = func(prefix string, kv expvar.KeyValue) {
name := prefix + kv.Key
var typ string
switch {
case strings.HasPrefix(kv.Key, "gauge_"):
typ = "gauge"
name = prefix + strings.TrimPrefix(kv.Key, "gauge_")
case strings.HasPrefix(kv.Key, "counter_"):
typ = "counter"
name = prefix + strings.TrimPrefix(kv.Key, "counter_")
}
switch v := kv.Value.(type) {
case *expvar.Int:
if typ == "" {
typ = "counter"
}
fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, typ, name, v.Value())
return
case *metrics.Set:
v.Do(func(kv expvar.KeyValue) {
dump(name+"_", kv)
})
return
}
if typ == "" {
var funcRet string
if f, ok := kv.Value.(expvar.Func); ok {
v := f()
if ms, ok := v.(runtime.MemStats); ok && name == "memstats" {
writeMemstats(w, &ms)
return
}
funcRet = fmt.Sprintf(" returning %T", v)
}
fmt.Fprintf(w, "# skipping expvar %q (Go type %T%s) with undeclared Prometheus type\n", name, kv.Value, funcRet)
return
}
switch v := kv.Value.(type) {
case expvar.Func:
val := v()
switch val.(type) {
case int64, int:
fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, typ, name, val)
default:
fmt.Fprintf(w, "# skipping expvar func %q returning unknown type %T\n", name, val)
}
case *metrics.LabelMap:
fmt.Fprintf(w, "# TYPE %s %s\n", name, typ)
// IntMap uses expvar.Map on the inside, which presorts
// keys. The output ordering is deterministic.
v.Do(func(kv expvar.KeyValue) {
fmt.Fprintf(w, "%s{%s=%q} %v\n", name, v.Label, kv.Key, kv.Value)
})
}
}
expvar.Do(func(kv expvar.KeyValue) {
dump("", kv)
})
}
func writeMemstats(w io.Writer, ms *runtime.MemStats) {
out := func(name, typ string, v uint64, help string) {
if help != "" {
fmt.Fprintf(w, "# HELP memstats_%s %s\n", name, help)
}
fmt.Fprintf(w, "# TYPE memstats_%s %s\nmemstats_%s %v\n", name, typ, name, v)
}
g := func(name string, v uint64, help string) { out(name, "gauge", v, help) }
c := func(name string, v uint64, help string) { out(name, "counter", v, help) }
g("heap_alloc", ms.HeapAlloc, "current bytes of allocated heap objects (up/down smoothly)")
c("total_alloc", ms.TotalAlloc, "cumulative bytes allocated for heap objects")
g("sys", ms.Sys, "total bytes of memory obtained from the OS")
c("mallocs", ms.Mallocs, "cumulative count of heap objects allocated")
c("frees", ms.Frees, "cumulative count of heap objects freed")
c("num_gc", uint64(ms.NumGC), "number of completed GC cycles")
}
|
[
"\"TS_ALLOW_DEBUG_IP\"",
"\"TS_DEBUG_KEY_PATH\""
] |
[] |
[
"TS_ALLOW_DEBUG_IP",
"TS_DEBUG_KEY_PATH"
] |
[]
|
["TS_ALLOW_DEBUG_IP", "TS_DEBUG_KEY_PATH"]
|
go
| 2 | 0 | |
webhook_test.go
|
/*
Copyright 2014 go-trello authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package trello
import (
"fmt"
"math/rand"
"os"
"testing"
goblin "github.com/franela/goblin"
. "github.com/onsi/gomega"
)
func TestWebhook(t *testing.T) {
g := goblin.Goblin(t)
RegisterFailHandler(func(m string, _ ...int) { g.Fail(m) })
g.Describe("Webhook tests", func() {
var webhook *Webhook
var token string
var callbackURL string
g.Before(func() {
token = os.Getenv("API_TOKEN")
Expect(token).NotTo(BeEmpty())
callbackURL = fmt.Sprintf("https://www.google.com/?q=go-trello-test-%v", rand.Intn(65536))
})
g.It("should create a webhook", func() {
member, err := client.Member("me")
Expect(err).To(BeNil())
webhook, err = client.CreateWebhook(Webhook{
Description: "Testing Webhook",
CallbackURL: callbackURL,
IDModel: member.ID,
})
Expect(err).To(BeNil())
})
g.It("should deactivate a webhook", func() {
err = webhook.SetActive(false)
Expect(err).To(BeNil())
Expect(webhook.Active).To(BeFalse())
})
g.It("should set a callback url for a webhook", func() {
err = webhook.SetCallbackURL(callbackURL + "-test")
Expect(err).To(BeNil())
Expect(webhook.CallbackURL).To(Equal(callbackURL + "-test"))
})
g.It("should set a callback url for a webhook", func() {
err = webhook.SetDescription("Go-Trello Testing")
Expect(err).To(BeNil())
Expect(webhook.Description).To(Equal("Go-Trello Testing"))
})
g.It("should set a idModel for a webhook", func() {
err = webhook.SetIDModel(webhook.IDModel) // kinda cheating
Expect(err).To(BeNil())
})
g.It("should retrieve a webhook by id", func() {
_, err = client.Webhook(webhook.ID)
Expect(err).To(BeNil())
})
g.It("should get webhooks for a token", func() {
_, err = client.Webhooks(token)
})
// Destructive Action - Should be last
g.It("should delete a webhook", func() {
err = webhook.Delete()
Expect(err).To(BeNil())
})
})
}
|
[
"\"API_TOKEN\""
] |
[] |
[
"API_TOKEN"
] |
[]
|
["API_TOKEN"]
|
go
| 1 | 0 | |
test_fork.py
|
import os
import sys
import h5py
import numpy as np
import torch
os.environ['TOOLBOX_PATH'] = "/scratch/dkarkalousos/apps/bart-0.6.00/"
sys.path.append('/scratch/dkarkalousos/apps/bart-0.6.00/python/')
import bart
import fastmri.data.transforms as T
from fastmri.data.subsample import create_mask_for_mask_type
from fastmri import tensor_to_complex_np
from fastmri.fftc import ifft2c_new as ifft2c
from fastmri.coil_combine import rss_complex
import matplotlib.pyplot as plt
fname = '/data/projects/recon/data/public/fastmri/knee/multicoil/multicoil_train/file1000002.h5'
data = h5py.File(fname, 'r')
kspace = data["kspace"][()]
slice = 20
crop_size = (320, 320)
device = 'cuda'
target = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kspace[slice], axes=(-2, -1)), axes=(-2, -1)), axes=(-2, -1))
target = target / np.max(np.abs(target))
target = np.sqrt(np.sum(T.center_crop(target, crop_size) ** 2, 0))
crop_size = (320, 320)
mask_func = create_mask_for_mask_type(mask_type_str="random", center_fractions=[0.08], accelerations=[4])
_kspace = T.to_tensor(kspace)[slice]
masked_kspace, mask = T.apply_mask(_kspace, mask_func)
linear_recon = masked_kspace[..., 0] + 1j * masked_kspace[..., 1]
linear_recon = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(linear_recon, axes=(-2, -1)), axes=(-2, -1)),
axes=(-2, -1))
linear_recon = linear_recon / np.max(np.abs(linear_recon))
linear_recon = np.sqrt(np.sum(T.center_crop(linear_recon, (320, 320)) ** 2, 0))
masked_kspace = masked_kspace.permute(1, 2, 0, 3).unsqueeze(0)
masked_kspace = tensor_to_complex_np(masked_kspace)
sens_maps = bart.bart(1, "ecalib -d0 -m1", masked_kspace)
reg_wt = 0.01
num_iters = 200
pred = np.abs(bart.bart(1, f"pics -d0 -S -R T:7:0:{reg_wt} -i {num_iters}", masked_kspace, sens_maps)[0])
pred = torch.from_numpy(pred / np.max(np.abs(pred))).cpu().numpy()
# check for FLAIR 203
if pred.shape[1] < crop_size[1]:
crop_size = (pred.shape[1], pred.shape[1])
pred = T.center_crop(pred, crop_size)
plt.subplot(1, 4, 1)
plt.imshow(np.abs(target), cmap='gray')
plt.title('Fully-sampled')
plt.colorbar()
plt.subplot(1, 4, 2)
plt.imshow(np.abs(linear_recon), cmap='gray')
plt.title('4x')
plt.colorbar()
plt.subplot(1, 4, 3)
plt.imshow(np.abs(pred), cmap='gray')
plt.title('PICS')
plt.colorbar()
plt.subplot(1, 4, 4)
plt.imshow(np.abs(target)-np.abs(pred), cmap='gray')
plt.title('PICS')
plt.colorbar()
plt.show()
|
[] |
[] |
[
"TOOLBOX_PATH"
] |
[]
|
["TOOLBOX_PATH"]
|
python
| 1 | 0 | |
tests/test_tools.py
|
import sys
import os
import pytest
sys.path.insert(0, os.getcwd())
import autoclasswrapper as wrapper
here = os.path.abspath(os.path.dirname(__file__))
dir_data = "test_data"
@pytest.fixture(scope='session')
def tmp_dir(tmpdir_factory):
"""Create temp dir and cd in it
"""
tmpd = tmpdir_factory.mktemp("run")
os.chdir(str(tmpd))
print("Tests are in: {}".format(str(tmpd)))
def test_search_autoclass_in_path(caplog, tmp_dir):
# create fake autoclass binary and add it to PATH
autoclass_bin = "autoclass"
open(autoclass_bin, "a").close()
os.chmod(autoclass_bin, 766)
os.environ["PATH"] = os.getcwd() + ":" + os.environ["PATH"]
wrapper.search_autoclass_in_path()
assert "AutoClass C executable found in" in caplog.text
def test_get_autoclass_version(caplog):
wrapper.get_autoclass_version()
assert "AUTOCLASS" in caplog.text
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
pkg/configmanager/parser.go
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package configmanager
import (
"encoding/json"
"net"
"os"
"runtime"
"strconv"
"mosn.io/api"
"mosn.io/mosn/pkg/config/v2"
"mosn.io/mosn/pkg/log"
"mosn.io/mosn/pkg/protocol"
)
type ContentKey string
var ProtocolsSupported = map[string]bool{
string(protocol.Auto): true,
string(protocol.HTTP1): true,
string(protocol.HTTP2): true,
string(protocol.Xprotocol): true,
}
const (
MinHostWeight = uint32(1)
MaxHostWeight = uint32(128)
DefaultMaxRequestPerConn = uint32(1024)
DefaultConnBufferLimitBytes = uint32(16 * 1024)
)
// RegisterProtocolParser
// used to register parser
func RegisterProtocolParser(key string) bool {
if _, ok := ProtocolsSupported[key]; ok {
return false
}
log.StartLogger.Infof("[config] %s added to ProtocolsSupported", key)
ProtocolsSupported[key] = true
return true
}
// ParsedCallback is an
// alias for closure func(data interface{}, endParsing bool) error
type ParsedCallback func(data interface{}, endParsing bool) error
// basic config
var configParsedCBMaps = make(map[ContentKey][]ParsedCallback)
// extend config parsed listener
// for user defined configs in extend field
var configExtendParsedCBMaps = make(map[string][]ParsedCallback)
// Group of ContentKey
// notes: configcontentkey equals to the key of config file
const (
ParseCallbackKeyCluster ContentKey = "clusters"
ParseCallbackKeyServiceRgtInfo ContentKey = "service_registry"
ParseCallbackKeyProcessor ContentKey = "processor"
)
// RegisterConfigParsedListener
// used to register ParsedCallback
func RegisterConfigParsedListener(key ContentKey, cb ParsedCallback) {
if cbs, ok := configParsedCBMaps[key]; ok {
cbs = append(cbs, cb)
// append maybe change the slice, should be assigned again
configParsedCBMaps[key] = cbs
} else {
log.StartLogger.Infof("[config] %s added to configParsedCBMaps", key)
cpc := []ParsedCallback{cb}
configParsedCBMaps[key] = cpc
}
}
// RegisterConfigExtendParsedListener used to do callback
// when the extend config is parsed
//
// "extend" : [{
// "type" : "dubbo_registry",
// "config" : {},
// },{
// "type" : "sofa_registry",
// "config" : {},
// },{
// "type" : "msg_broker",
// "config" : {}
// },{
// "type" : "oh_very",
// "config" : "here can be a string"
// }]
//
func RegisterConfigExtendParsedListener(key string, cb ParsedCallback) {
if cbs, ok := configExtendParsedCBMaps[key]; ok {
configExtendParsedCBMaps[key] = append(cbs, cb)
} else {
log.StartLogger.Infof("[config] %s added to configParsedCBMaps", key)
configExtendParsedCBMaps[key] = []ParsedCallback{cb}
}
}
// ParseClusterConfig parses config data to api data, verify whether the config is valid
func ParseClusterConfig(clusters []v2.Cluster) ([]v2.Cluster, map[string][]v2.Host) {
if len(clusters) == 0 {
log.StartLogger.Warnf("[config] [parse cluster] No Cluster provided in cluster config")
}
var pClusters []v2.Cluster
clusterV2Map := make(map[string][]v2.Host)
for _, c := range clusters {
if c.Name == "" {
log.StartLogger.Fatalf("[config] [parse cluster] name is required in cluster config")
}
if c.MaxRequestPerConn == 0 {
c.MaxRequestPerConn = DefaultMaxRequestPerConn
log.StartLogger.Infof("[config] [parse cluster] max_request_per_conn is not specified, use default value %d",
DefaultMaxRequestPerConn)
}
if c.ConnBufferLimitBytes == 0 {
c.ConnBufferLimitBytes = DefaultConnBufferLimitBytes
log.StartLogger.Infof("[config] [parse cluster] conn_buffer_limit_bytes is not specified, use default value %d",
DefaultConnBufferLimitBytes)
}
if c.LBSubSetConfig.FallBackPolicy > 2 {
log.StartLogger.Fatalf("[config] [parse cluster] lb subset config 's fall back policy set error. " +
"For 0, represent NO_FALLBACK" +
"For 1, represent ANY_ENDPOINT" +
"For 2, represent DEFAULT_SUBSET")
}
if _, ok := ProtocolsSupported[c.HealthCheck.Protocol]; !ok && c.HealthCheck.Protocol != "" {
log.StartLogger.Fatalf("[config] [parse cluster] unsupported health check protocol: %v", c.HealthCheck.Protocol)
}
c.Hosts = parseHostConfig(c.Hosts)
clusterV2Map[c.Name] = c.Hosts
pClusters = append(pClusters, c)
}
// trigger all callbacks
if cbs, ok := configParsedCBMaps[ParseCallbackKeyCluster]; ok {
for _, cb := range cbs {
cb(pClusters, false)
}
}
return pClusters, clusterV2Map
}
func parseHostConfig(hosts []v2.Host) (hs []v2.Host) {
for _, host := range hosts {
host.Weight = transHostWeight(host.Weight)
hs = append(hs, host)
}
return
}
func transHostWeight(weight uint32) uint32 {
if weight > MaxHostWeight {
return MaxHostWeight
}
if weight < MinHostWeight {
return MinHostWeight
}
return weight
}
var logLevelMap = map[string]log.Level{
"TRACE": log.TRACE,
"DEBUG": log.DEBUG,
"FATAL": log.FATAL,
"ERROR": log.ERROR,
"WARN": log.WARN,
"INFO": log.INFO,
}
func ParseLogLevel(level string) log.Level {
if logLevel, ok := logLevelMap[level]; ok {
return logLevel
}
return log.INFO
}
// ParseListenerConfig
func ParseListenerConfig(lc *v2.Listener, inheritListeners []net.Listener) *v2.Listener {
if lc.AddrConfig == "" {
log.StartLogger.Fatalf("[config] [parse listener] Address is required in listener config")
}
addr, err := net.ResolveTCPAddr("tcp", lc.AddrConfig)
if err != nil {
log.StartLogger.Fatalf("[config] [parse listener] Address not valid: %v", lc.AddrConfig)
}
//try inherit legacy listener
var old *net.TCPListener
for i, il := range inheritListeners {
if il == nil {
continue
}
tl := il.(*net.TCPListener)
ilAddr, err := net.ResolveTCPAddr("tcp", tl.Addr().String())
if err != nil {
log.StartLogger.Fatalf("[config] [parse listener] inheritListener not valid: %s", tl.Addr().String())
}
if addr.Port != ilAddr.Port {
continue
}
if (addr.IP.IsUnspecified() && ilAddr.IP.IsUnspecified()) ||
(addr.IP.IsLoopback() && ilAddr.IP.IsLoopback()) ||
addr.IP.Equal(ilAddr.IP) {
log.StartLogger.Infof("[config] [parse listener] inherit listener addr: %s", lc.AddrConfig)
old = tl
inheritListeners[i] = nil
break
}
}
lc.Addr = addr
lc.PerConnBufferLimitBytes = 1 << 15
lc.InheritListener = old
return lc
}
func ParseRouterConfiguration(c *v2.FilterChain) (*v2.RouterConfiguration, error) {
routerConfiguration := &v2.RouterConfiguration{}
for _, f := range c.Filters {
if f.Type == v2.CONNECTION_MANAGER {
data, err := json.Marshal(f.Config)
if err != nil {
return nil, err
}
if err := json.Unmarshal(data, routerConfiguration); err != nil {
return nil, err
}
}
}
return routerConfiguration, nil
}
// extensible service registry
// for various service registries,
// eg: dubbo_registry, sofa_registry, msg_broker or any other user defined ...
func ParseConfigExtend(itemList []v2.ExtendItem) {
// trigger all extend callbacks
for _, extConfig := range itemList {
if cbs, ok := configExtendParsedCBMaps[extConfig.Type]; ok {
for _, cb := range cbs {
cb(extConfig.Config, true)
}
}
}
}
func ParseServiceRegistry(src v2.ServiceRegistryInfo) {
//trigger all callbacks
if cbs, ok := configParsedCBMaps[ParseCallbackKeyServiceRgtInfo]; ok {
for _, cb := range cbs {
cb(src, true)
}
}
}
// ParseServerConfig
func ParseServerConfig(c *v2.ServerConfig) *v2.ServerConfig {
if n, _ := strconv.Atoi(os.Getenv("GOMAXPROCS")); n > 0 && n <= runtime.NumCPU() {
c.Processor = n
} else if c.Processor == 0 {
c.Processor = runtime.NumCPU()
}
// trigger processor callbacks
if cbs, ok := configParsedCBMaps[ParseCallbackKeyProcessor]; ok {
for _, cb := range cbs {
cb(c.Processor, true)
}
}
return c
}
// GetListenerFilters returns a listener filter factory by filter.Type
func GetListenerFilters(configs []v2.Filter) []api.ListenerFilterChainFactory {
var factories []api.ListenerFilterChainFactory
for _, c := range configs {
sfcc, err := api.CreateListenerFilterChainFactory(c.Type, c.Config)
if err != nil {
log.DefaultLogger.Errorf("[config] get listener filter failed, type: %s, error: %v", c.Type, err)
continue
}
factories = append(factories, sfcc)
}
return factories
}
// GetStreamFilters returns a stream filter factory by filter.Type
func GetStreamFilters(configs []v2.Filter) []api.StreamFilterChainFactory {
var factories []api.StreamFilterChainFactory
for _, c := range configs {
sfcc, err := api.CreateStreamFilterChainFactory(c.Type, c.Config)
if err != nil {
log.DefaultLogger.Errorf("[config] get stream filter failed, type: %s, error: %v", c.Type, err)
continue
}
factories = append(factories, sfcc)
}
return factories
}
// GetNetworkFilters returns a network filter factory by filter.Type
func GetNetworkFilters(c *v2.FilterChain) []api.NetworkFilterChainFactory {
var factories []api.NetworkFilterChainFactory
for _, f := range c.Filters {
factory, err := api.CreateNetworkFilterChainFactory(f.Type, f.Config)
if err != nil {
log.StartLogger.Errorf("[config] network filter create failed, type:%s, error: %v", f.Type, err)
continue
}
if factory != nil {
factories = append(factories, factory)
}
}
return factories
}
|
[
"\"GOMAXPROCS\""
] |
[] |
[
"GOMAXPROCS"
] |
[]
|
["GOMAXPROCS"]
|
go
| 1 | 0 | |
pygeoapi/util.py
|
# =================================================================
#
# Authors: Tom Kralidis <[email protected]>
#
# Copyright (c) 2019 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
"""Generic util functions used in the code"""
from datetime import date, datetime, time
from decimal import Decimal
import json
import logging
import os
import re
from urllib.parse import urlparse
from jinja2 import Environment, FileSystemLoader
import yaml
from pygeoapi import __version__
LOGGER = logging.getLogger(__name__)
TEMPLATES = '{}{}templates'.format(os.path.dirname(
os.path.realpath(__file__)), os.sep)
def dategetter(date_property, collection):
"""
Attempts to obtains a date value from a collection.
:param date_property: property representing the date
:param collection: dictionary to check within
:returns: `str` (ISO8601) representing the date. ('..' if null or "now",
allowing for an open interval).
"""
value = collection.get(date_property, None)
if value == 'now' or value is None:
return '..'
return value.isoformat()
def get_typed_value(value):
"""
Derive true type from data value
:param value: value
:returns: value as a native Python data type
"""
try:
if '.' in value: # float?
value2 = float(value)
elif len(value) > 1 and value.startswith('0'):
value2 = value
else: # int?
value2 = int(value)
except ValueError: # string (default)?
value2 = value
return value2
def yaml_load(fh):
"""
serializes a YAML files into a pyyaml object
:param fh: file handle
:returns: `dict` representation of YAML
"""
# support environment variables in config
# https://stackoverflow.com/a/55301129
path_matcher = re.compile(r'.*\$\{([^}^{]+)\}.*')
def path_constructor(loader, node):
env_var = path_matcher.match(node.value).group(1)
if env_var not in os.environ:
raise EnvironmentError('Undefined environment variable in config')
return get_typed_value(os.path.expandvars(node.value))
class EnvVarLoader(yaml.SafeLoader):
pass
EnvVarLoader.add_implicit_resolver('!path', path_matcher, None)
EnvVarLoader.add_constructor('!path', path_constructor)
return yaml.load(fh, Loader=EnvVarLoader)
def str2bool(value):
"""
helper function to return Python boolean
type (source: https://stackoverflow.com/a/715468)
:param value: value to be evaluated
:returns: `bool` of whether the value is boolean-ish
"""
value2 = False
if isinstance(value, bool):
value2 = value
else:
value2 = value.lower() in ('yes', 'true', 't', '1', 'on')
return value2
def to_json(dict_):
"""
Serialize dict to json
:param dict_: `dict` of JSON representation
:returns: JSON string representation
"""
return json.dumps(dict_, default=json_serial)
def json_serial(obj):
"""
helper function to convert to JSON non-default
types (source: https://stackoverflow.com/a/22238613)
:param obj: `object` to be evaluated
:returns: JSON non-default type to `str`
"""
if isinstance(obj, (datetime, date, time)):
return obj.isoformat()
elif isinstance(obj, Decimal):
return float(obj)
msg = '{} type {} not serializable'.format(obj, type(obj))
LOGGER.error(msg)
raise TypeError(msg)
def is_url(urlstring):
"""
Validation function that determines whether a candidate URL should be
considered a URI. No remote resource is obtained; this does not check
the existence of any remote resource.
:param urlstring: `str` to be evaluated as candidate URL.
:returns: `bool` of whether the URL looks like a URL.
"""
try:
result = urlparse(urlstring)
return bool(result.scheme and result.netloc)
except ValueError:
return False
def render_j2_template(config, template, data):
"""
render Jinja2 template
:param config: dict of configuration
:param template: template (relative path)
:param data: dict of data
:returns: string of rendered template
"""
env = Environment(loader=FileSystemLoader(TEMPLATES))
env.filters['to_json'] = to_json
env.globals.update(to_json=to_json)
template = env.get_template(template)
return template.render(config=config, data=data, version=__version__)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
example/stopJob.go
|
package main
import (
"os"
"github.com/flink-go/api"
)
func main() {
c, err := api.New(os.Getenv("FLINK_API"))
if err != nil {
panic(err)
}
// stop job test
err = c.StopJob("8ea123d2bdc3064f36b92889e43803ee")
if err != nil {
panic(err)
}
}
|
[
"\"FLINK_API\""
] |
[] |
[
"FLINK_API"
] |
[]
|
["FLINK_API"]
|
go
| 1 | 0 | |
app/admin/main/spy/dao/dao_test.go
|
package dao
import (
"context"
"flag"
"fmt"
"os"
"testing"
"time"
"go-common/app/admin/main/spy/conf"
"go-common/app/admin/main/spy/model"
. "github.com/smartystreets/goconvey/convey"
)
var (
dataMID = int64(15555180)
noDataMID = int64(1)
d *Dao
)
const (
_cleanFactorSQL = "delete from spy_factor where nick_name = ? AND service_id = ? AND event_id = ? AND risk_level = ?"
_cleanEventSQL = "delete from spy_event where name = ? AND service_id = ? AND status = ?"
_cleanServiceSQL = "delete from spy_service where name = ? AND status = ?"
_cleanGroupSQL = "delete from spy_factor_group where name = ?"
)
func CleanMysql() {
ctx := context.Background()
d.db.Exec(ctx, _cleanFactorSQL, "test", 1, 1, 2)
d.db.Exec(ctx, _cleanEventSQL, "test", 1, 1)
d.db.Exec(ctx, _cleanServiceSQL, "test", 1)
d.db.Exec(ctx, _cleanGroupSQL, "test")
}
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.account-law.spy-admin")
flag.Set("conf_token", "bc3d60c2bb2b08a1b690b004a1953d3c")
flag.Set("tree_id", "2857")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../cmd/spy-admin-test.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
CleanMysql()
m.Run()
os.Exit(0)
}
func WithMysql(f func(d *Dao)) func() {
return func() {
f(d)
}
}
func Test_Mysql(t *testing.T) {
Convey("get user info", t, WithMysql(func(d *Dao) {
res, err := d.Info(context.TODO(), dataMID)
So(err, ShouldBeNil)
So(res, ShouldNotBeEmpty)
}))
Convey("get user info no data ", t, WithMysql(func(d *Dao) {
res, err := d.Info(context.TODO(), noDataMID)
So(err, ShouldBeNil)
So(res, ShouldNotBeEmpty)
}))
Convey("get event history", t, WithMysql(func(d *Dao) {
hpConf := &model.HisParamReq{
Mid: 46333,
Ps: 10,
Pn: 1,
}
res, err := d.HistoryPage(context.TODO(), hpConf)
So(err, ShouldBeNil)
So(res, ShouldNotBeEmpty)
}))
Convey("get event history count", t, WithMysql(func(d *Dao) {
hpConf := &model.HisParamReq{
Mid: 46333,
Ps: 10,
Pn: 1,
}
res, err := d.HistoryPageTotalC(context.TODO(), hpConf)
So(err, ShouldBeNil)
fmt.Printf("history count : %d\n", res)
So(res, ShouldNotBeEmpty)
}))
Convey("get setting list", t, WithMysql(func(d *Dao) {
res, err := d.SettingList(context.TODO())
So(err, ShouldBeNil)
So(res, ShouldNotBeEmpty)
}))
Convey("update setting", t, WithMysql(func(d *Dao) {
list, err := d.SettingList(context.TODO())
So(err, ShouldBeNil)
setting := list[0]
res, err := d.UpdateSetting(context.TODO(), setting.Val, setting.Property)
So(err, ShouldBeNil)
fmt.Println(res)
}))
Convey(" add factor ", t, WithMysql(func(d *Dao) {
res, err := d.AddFactor(context.TODO(), &model.Factor{
NickName: "test",
ServiceID: int64(1),
EventID: int64(1),
GroupID: int64(1),
RiskLevel: int8(2),
FactorVal: float32(1),
CategoryID: int8(1),
CTime: time.Now(),
MTime: time.Now(),
})
So(err, ShouldBeNil)
So(res == 1, ShouldBeTrue)
}))
Convey(" add event ", t, WithMysql(func(d *Dao) {
res, err := d.AddEvent(context.TODO(), &model.Event{
Name: "test",
NickName: "nickname",
ServiceID: 1,
Status: 1,
CTime: time.Now(),
MTime: time.Now(),
})
So(err, ShouldBeNil)
So(res == 1, ShouldBeTrue)
}))
Convey(" add service ", t, WithMysql(func(d *Dao) {
res, err := d.AddService(context.TODO(), &model.Service{
Name: "test",
NickName: "nickname",
Status: 1,
CTime: time.Now(),
MTime: time.Now(),
})
So(err, ShouldBeNil)
So(res == 1, ShouldBeTrue)
}))
Convey(" add group ", t, WithMysql(func(d *Dao) {
res, err := d.AddGroup(context.TODO(), &model.FactorGroup{
Name: "test",
CTime: time.Now(),
})
So(err, ShouldBeNil)
So(res == 1, ShouldBeTrue)
}))
}
|
[
"\"DEPLOY_ENV\""
] |
[] |
[
"DEPLOY_ENV"
] |
[]
|
["DEPLOY_ENV"]
|
go
| 1 | 0 | |
core/settings.py
|
"""
Django settings for core project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import dj_database_url
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party apps
'rest_framework',
'rest_framework.authtoken',
'djoser',
'cloudinary',
'cloudinary_storage',
# Local apps
'dish',
'order',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('NAME'),
'USER': os.environ.get('USER'),
'PASSWORD': os.environ.get('PASSWORD'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication',
],
}
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
CLOUDINARY_STORAGE = {
'CLOUD_NAME': os.environ.get('CLOUD_NAME'),
'API_KEY': os.environ.get('API_KEY'),
'API_SECRET': os.environ.get('API_SECRET'),
}
DEFAULT_FILE_STORAGE = 'cloudinary_storage.storage.MediaCloudinaryStorage'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
TIME_ZONE = 'Africa/Nairobi'
prod_db = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(prod_db)
|
[] |
[] |
[
"CLOUD_NAME",
"PASSWORD",
"API_KEY",
"SECRET_KEY",
"USER",
"API_SECRET",
"NAME"
] |
[]
|
["CLOUD_NAME", "PASSWORD", "API_KEY", "SECRET_KEY", "USER", "API_SECRET", "NAME"]
|
python
| 7 | 0 | |
sdk/management/samples/src/main/java/com/azure/management/network/samples/ManageVirtualMachinesInParallelWithNetwork.java
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.management.network.samples;
import com.azure.core.http.policy.HttpLogDetailLevel;
import com.azure.management.Azure;
import com.azure.management.compute.KnownLinuxVirtualMachineImage;
import com.azure.management.compute.VirtualMachine;
import com.azure.management.compute.VirtualMachineSizeTypes;
import com.azure.management.network.Network;
import com.azure.management.network.NetworkSecurityGroup;
import com.azure.management.network.SecurityRuleProtocol;
import com.azure.management.resources.ResourceGroup;
import com.azure.management.resources.fluentcore.arm.Region;
import com.azure.management.resources.fluentcore.model.Creatable;
import com.azure.management.samples.Utils;
import com.azure.management.storage.StorageAccount;
import org.apache.commons.lang.time.StopWatch;
import java.io.File;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/**
* Azure Network sample for managing virtual machines with virtual network -
* Create a virtual network with two Subnets – frontend and backend
* Frontend allows HTTP in and denies Internet out
* Backend denies Internet in and Internet out
* Create m Linux virtual machines in the frontend
* Create m Windows virtual machines in the backend.
*/
public final class ManageVirtualMachinesInParallelWithNetwork {
/**
* Main function which runs the actual sample.
* @param azure instance of the azure client
* @return true if sample runs successfully
*/
public static boolean runSample(Azure azure) {
final int frontendVMCount = 4;
final int backendVMCount = 4;
final String rgName = azure.sdkContext().randomResourceName("rgNEPP", 24);
final String frontEndNsgName = azure.sdkContext().randomResourceName("fensg", 24);
final String backEndNsgName = azure.sdkContext().randomResourceName("bensg", 24);
final String networkName = azure.sdkContext().randomResourceName("vnetCOMV", 24);
final String storageAccountName = azure.sdkContext().randomResourceName("stgCOMV", 20);
final String userName = "tirekicker";
// [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="Serves as an example, not for deployment. Please change when using this in your code.")]
final String password = "12NewPA$$w0rd!";
final Region region = Region.US_SOUTH_CENTRAL;
try {
// Create a resource group [Where all resources gets created]
ResourceGroup resourceGroup = azure.resourceGroups().define(rgName)
.withRegion(region)
.create();
//============================================================
// Define a network security group for the front end of a subnet
// front end subnet contains two rules
// - ALLOW-SSH - allows SSH traffic into the front end subnet
// - ALLOW-WEB- allows HTTP traffic into the front end subnet
Creatable<NetworkSecurityGroup> frontEndNSGCreatable = azure.networkSecurityGroups().define(frontEndNsgName)
.withRegion(region)
.withExistingResourceGroup(resourceGroup)
.defineRule("ALLOW-SSH")
.allowInbound()
.fromAnyAddress()
.fromAnyPort()
.toAnyAddress()
.toPort(22)
.withProtocol(SecurityRuleProtocol.TCP)
.withPriority(100)
.withDescription("Allow SSH")
.attach()
.defineRule("ALLOW-HTTP")
.allowInbound()
.fromAnyAddress()
.fromAnyPort()
.toAnyAddress()
.toPort(80)
.withProtocol(SecurityRuleProtocol.TCP)
.withPriority(101)
.withDescription("Allow HTTP")
.attach();
//============================================================
// Define a network security group for the back end of a subnet
// back end subnet contains two rules
// - ALLOW-SQL - allows SQL traffic only from the front end subnet
// - DENY-WEB - denies all outbound internet traffic from the back end subnet
Creatable<NetworkSecurityGroup> backEndNSGCreatable = azure.networkSecurityGroups().define(backEndNsgName)
.withRegion(region)
.withExistingResourceGroup(resourceGroup)
.defineRule("ALLOW-SQL")
.allowInbound()
.fromAddress("172.16.1.0/24")
.fromAnyPort()
.toAnyAddress()
.toPort(1433)
.withProtocol(SecurityRuleProtocol.TCP)
.withPriority(100)
.withDescription("Allow SQL")
.attach()
.defineRule("DENY-WEB")
.denyOutbound()
.fromAnyAddress()
.fromAnyPort()
.toAnyAddress()
.toAnyPort()
.withAnyProtocol()
.withDescription("Deny Web")
.withPriority(200)
.attach();
System.out.println("Creating security group for the front ends - allows SSH and HTTP");
System.out.println("Creating security group for the back ends - allows SSH and denies all outbound internet traffic");
@SuppressWarnings("unchecked")
Collection<NetworkSecurityGroup> networkSecurityGroups = azure.networkSecurityGroups()
.create(frontEndNSGCreatable, backEndNSGCreatable).values();
NetworkSecurityGroup frontendNSG = null;
NetworkSecurityGroup backendNSG = null;
for (NetworkSecurityGroup nsg : networkSecurityGroups) {
if (nsg.name().equalsIgnoreCase(frontEndNsgName)) {
frontendNSG = nsg;
}
if (nsg.name().equalsIgnoreCase(backEndNsgName)) {
backendNSG = nsg;
}
}
System.out.println("Created a security group for the front end: " + frontendNSG.id());
Utils.print(frontendNSG);
System.out.println("Created a security group for the back end: " + backendNSG.id());
Utils.print(backendNSG);
// Create Network [Where all the virtual machines get added to]
Network network = azure.networks().define(networkName)
.withRegion(region)
.withExistingResourceGroup(resourceGroup)
.withAddressSpace("172.16.0.0/16")
.defineSubnet("Front-end")
.withAddressPrefix("172.16.1.0/24")
.withExistingNetworkSecurityGroup(frontendNSG)
.attach()
.defineSubnet("Back-end")
.withAddressPrefix("172.16.2.0/24")
.withExistingNetworkSecurityGroup(backendNSG)
.attach()
.create();
// Prepare Creatable Storage account definition [For storing VMs disk]
Creatable<StorageAccount> creatableStorageAccount = azure.storageAccounts().define(storageAccountName)
.withRegion(region)
.withExistingResourceGroup(resourceGroup);
// Prepare a batch of Creatable Virtual Machines definitions
List<Creatable<VirtualMachine>> frontendCreatableVirtualMachines = new ArrayList<>();
for (int i = 0; i < frontendVMCount; i++) {
Creatable<VirtualMachine> creatableVirtualMachine = azure.virtualMachines().define("VM-FE-" + i)
.withRegion(region)
.withExistingResourceGroup(resourceGroup)
.withExistingPrimaryNetwork(network)
.withSubnet("Front-end")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername(userName)
.withRootPassword(password)
.withSize(VirtualMachineSizeTypes.STANDARD_D3_V2)
.withNewStorageAccount(creatableStorageAccount);
frontendCreatableVirtualMachines.add(creatableVirtualMachine);
}
List<Creatable<VirtualMachine>> backendCreatableVirtualMachines = new ArrayList<>();
for (int i = 0; i < backendVMCount; i++) {
Creatable<VirtualMachine> creatableVirtualMachine = azure.virtualMachines().define("VM-BE-" + i)
.withRegion(region)
.withExistingResourceGroup(resourceGroup)
.withExistingPrimaryNetwork(network)
.withSubnet("Back-end")
.withPrimaryPrivateIPAddressDynamic()
.withoutPrimaryPublicIPAddress()
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
.withRootUsername(userName)
.withRootPassword(password)
.withSize(VirtualMachineSizeTypes.STANDARD_D3_V2)
.withNewStorageAccount(creatableStorageAccount);
backendCreatableVirtualMachines.add(creatableVirtualMachine);
}
System.out.println("Creating the virtual machines");
List<Creatable<VirtualMachine>> allCreatableVirtualMachines = new ArrayList<>();
allCreatableVirtualMachines.addAll(frontendCreatableVirtualMachines);
allCreatableVirtualMachines.addAll(backendCreatableVirtualMachines);
StopWatch stopwatch = new StopWatch();
stopwatch.start();
Collection<VirtualMachine> virtualMachines = azure.virtualMachines().create(allCreatableVirtualMachines).values();
stopwatch.stop();
System.out.println("Created virtual machines");
for (VirtualMachine virtualMachine : virtualMachines) {
System.out.println(virtualMachine.id());
}
System.out.println("Virtual Machines create: (took " + (stopwatch.getTime() / 1000) + " seconds) ");
return true;
} catch (Exception f) {
System.out.println(f.getMessage());
f.printStackTrace();
} finally {
try {
System.out.println("Deleting Resource Group: " + rgName);
azure.resourceGroups().deleteByName(rgName);
System.out.println("Deleted Resource Group: " + rgName);
} catch (NullPointerException npe) {
System.out.println("Did not create any resources in Azure. No clean up is necessary");
} catch (Exception g) {
g.printStackTrace();
}
}
return false;
}
/**
* Main entry point.
* @param args the parameters
*/
public static void main(String[] args) {
try {
//=============================================================
// Authenticate
final File credFile = new File(System.getenv("AZURE_AUTH_LOCATION"));
Azure azure = Azure.configure()
.withLogLevel(HttpLogDetailLevel.BASIC)
.authenticate(credFile)
.withDefaultSubscription();
// Print selected subscription
System.out.println("Selected subscription: " + azure.subscriptionId());
runSample(azure);
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private ManageVirtualMachinesInParallelWithNetwork() {
}
}
|
[
"\"AZURE_AUTH_LOCATION\""
] |
[] |
[
"AZURE_AUTH_LOCATION"
] |
[]
|
["AZURE_AUTH_LOCATION"]
|
java
| 1 | 0 | |
config/config_test.go
|
package config_test
import (
"github.com/swift9/ares-nacos/config"
"os"
"testing"
)
func TestEnv(t *testing.T) {
println(os.Getenv("TEST"))
}
func TestConfigString(t *testing.T) {
println(config.GetString("test", "test"))
}
|
[
"\"TEST\""
] |
[] |
[
"TEST"
] |
[]
|
["TEST"]
|
go
| 1 | 0 | |
cni-plugin/tests/calico_cni_test.go
|
// Copyright (c) 2015-2021 Tigera, Inc. All rights reserved.
package main_test
import (
"context"
"fmt"
"io"
"math/rand"
"net"
"os"
"os/exec"
"strings"
"syscall"
cniv1 "github.com/containernetworking/cni/pkg/types/100"
"github.com/containernetworking/plugins/pkg/ns"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vishvananda/netlink"
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/calico/cni-plugin/internal/pkg/testutils"
"github.com/projectcalico/calico/cni-plugin/internal/pkg/utils"
grpc_dataplane "github.com/projectcalico/calico/cni-plugin/pkg/dataplane/grpc"
"github.com/projectcalico/calico/cni-plugin/pkg/dataplane/grpc/proto"
"github.com/projectcalico/calico/cni-plugin/pkg/dataplane/linux"
libapiv3 "github.com/projectcalico/calico/libcalico-go/lib/apis/v3"
client "github.com/projectcalico/calico/libcalico-go/lib/clientv3"
"github.com/projectcalico/calico/libcalico-go/lib/names"
"github.com/projectcalico/calico/libcalico-go/lib/options"
)
var _ = Describe("CalicoCni", func() {
hostname, _ := names.Hostname()
ctx := context.Background()
calicoClient, _ := client.NewFromEnv()
BeforeEach(func() {
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
Skip("Don't run non-kubernetes test with Kubernetes Datastore")
}
testutils.WipeDatastore()
// Create the node for these tests. The IPAM code requires a corresponding Calico node to exist.
var err error
n := libapiv3.NewNode()
n.Name, err = names.Hostname()
Expect(err).NotTo(HaveOccurred())
_, err = calicoClient.Nodes().Create(context.Background(), n, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
if os.Getenv("DATASTORE_TYPE") == "kubernetes" {
// no cleanup needed.
return
}
// Delete the node.
name, err := names.Hostname()
Expect(err).NotTo(HaveOccurred())
_, err = calicoClient.Nodes().Delete(context.Background(), name, options.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
})
cniVersion := os.Getenv("CNI_SPEC_VERSION")
Context("using host-local IPAM", func() {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"log_level": "info",
"nodename_file_optional": true,
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
}
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
It("successfully networks the namespace", func() {
containerID, result, contVeth, contAddresses, contRoutes, contNs, err := testutils.CreateContainerWithId(netconf, "", testutils.TEST_DEFAULT_NS, "", "abc123")
Expect(err).ShouldNot(HaveOccurred())
Expect(len(result.IPs)).Should(Equal(1))
ip := result.IPs[0].Address.IP.String()
result.IPs[0].Address.IP = result.IPs[0].Address.IP.To4() // Make sure the IP is respresented as 4 bytes
Expect(result.IPs[0].Address.Mask.String()).Should(Equal("ffffffff"))
// datastore things:
// Profile is created with correct details
profile, err := calicoClient.Profiles().Get(ctx, "net1", options.GetOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(profile.Spec.LabelsToApply).Should(Equal(map[string]string{"net1": ""}))
Expect(profile.Spec.Egress).Should(Equal([]apiv3.Rule{{Action: "Allow"}}))
Expect(profile.Spec.Ingress).Should(Equal([]apiv3.Rule{{Action: "Allow", Source: apiv3.EntityRule{Selector: "has(net1)"}}}))
// The endpoint is created in etcd
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: "cni",
Endpoint: "eth0",
Pod: "",
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.TEST_DEFAULT_NS))
mac := contVeth.Attrs().HardwareAddr
Expect(endpoints.Items[0].Spec).Should(Equal(libapiv3.WorkloadEndpointSpec{
InterfaceName: fmt.Sprintf("cali%s", containerID),
IPNetworks: []string{result.IPs[0].Address.String()},
MAC: mac.String(),
Profiles: []string{"net1"},
Node: hostname,
Endpoint: "eth0",
Workload: "",
ContainerID: containerID,
Orchestrator: "cni",
}))
// Routes and interface on host - there's is nothing to assert on the routes since felix adds those.
hostVethName := "cali" + containerID[:utils.Min(11, len(containerID))] //"cali" + containerID
hostVeth, err := netlink.LinkByName(hostVethName)
Expect(err).ToNot(HaveOccurred())
Expect(hostVeth.Attrs().Flags.String()).Should(ContainSubstring("up"))
Expect(hostVeth.Attrs().MTU).Should(Equal(1500))
Expect(hostVeth.Attrs().HardwareAddr.String()).Should(Equal("ee:ee:ee:ee:ee:ee"))
// Assert hostVeth sysctl values are set to what we expect for IPv4.
err = testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/proxy_arp", hostVethName), "1")
Expect(err).ShouldNot(HaveOccurred())
err = testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv4/neigh/%s/proxy_delay", hostVethName), "0")
Expect(err).ShouldNot(HaveOccurred())
err = testutils.CheckSysctlValue(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/forwarding", hostVethName), "1")
Expect(err).ShouldNot(HaveOccurred())
// Assert the container sysctl values are set to what we expect for IPv4.
targetNs, _ := ns.GetNS(contNs.Path())
err = targetNs.Do(func(_ ns.NetNS) error {
return testutils.CheckSysctlValue("/proc/sys/net/ipv4/ip_forward", "0")
})
Expect(err).ShouldNot(HaveOccurred())
// Assert if the host side route is programmed correctly.
hostRoutes, err := netlink.RouteList(hostVeth, syscall.AF_INET)
Expect(err).ShouldNot(HaveOccurred())
Expect(hostRoutes[0]).Should(Equal(netlink.Route{
LinkIndex: hostVeth.Attrs().Index,
Scope: netlink.SCOPE_LINK,
Dst: &result.IPs[0].Address,
Protocol: syscall.RTPROT_BOOT,
Table: syscall.RT_TABLE_MAIN,
Type: syscall.RTN_UNICAST,
Family: syscall.AF_INET,
}))
// Routes and interface in netns
Expect(contVeth.Attrs().Flags.String()).Should(ContainSubstring("up"))
// Assume the first IP is the IPv4 address
Expect(contAddresses[0].IP.String()).Should(Equal(ip))
Expect(contRoutes).Should(SatisfyAll(ContainElement(netlink.Route{
LinkIndex: contVeth.Attrs().Index,
Gw: net.IPv4(169, 254, 1, 1).To4(),
Protocol: syscall.RTPROT_BOOT,
Table: syscall.RT_TABLE_MAIN,
Type: syscall.RTN_UNICAST,
Family: syscall.AF_INET,
}),
ContainElement(netlink.Route{
LinkIndex: contVeth.Attrs().Index,
Scope: netlink.SCOPE_LINK,
Dst: &net.IPNet{IP: net.IPv4(169, 254, 1, 1).To4(), Mask: net.CIDRMask(32, 32)},
Protocol: syscall.RTPROT_BOOT,
Table: syscall.RT_TABLE_MAIN,
Type: syscall.RTN_UNICAST,
Family: syscall.AF_INET,
})))
_, err = testutils.DeleteContainerWithId(netconf, contNs.Path(), "", testutils.TEST_DEFAULT_NS, containerID)
Expect(err).ShouldNot(HaveOccurred())
// Make sure there are no endpoints anymore
endpoints, err = calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(0))
// Make sure the interface has been removed from the namespace
err = targetNs.Do(func(_ ns.NetNS) error {
_, err = netlink.LinkByName("eth0")
return err
})
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(Equal("Link not found"))
// Make sure the interface has been removed from the host
_, err = netlink.LinkByName("cali" + containerID)
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(Equal("Link not found"))
})
Context("when the same hostVeth exists", func() {
It("successfully networks the namespace", func() {
containerID := fmt.Sprintf("con%d", rand.Uint32())
if err := testutils.CreateHostVeth(containerID, "", "", hostname); err != nil {
panic(err)
}
_, _, _, _, _, contNs, err := testutils.CreateContainerWithId(netconf, "", testutils.TEST_DEFAULT_NS, "", containerID)
Expect(err).ShouldNot(HaveOccurred())
_, err = testutils.DeleteContainerWithId(netconf, contNs.Path(), "", testutils.TEST_DEFAULT_NS, containerID)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("when ready flag is false", func() {
It("errors when ADD is done", func() {
ci, err := calicoClient.ClusterInformation().Get(ctx, "default", options.GetOptions{})
Expect(err).ShouldNot(HaveOccurred())
r := false
ci.Spec.DatastoreReady = &r
_, err = calicoClient.ClusterInformation().Update(ctx, ci, options.SetOptions{})
Expect(err).ShouldNot(HaveOccurred())
_, _, _, _, _, _, err = testutils.CreateContainer(netconf, "", testutils.TEST_DEFAULT_NS, "")
Expect(err).Should(HaveOccurred())
})
It("errors when DEL is done", func() {
_, _, _, _, _, contNs, err := testutils.CreateContainer(netconf, "", testutils.TEST_DEFAULT_NS, "")
Expect(err).ShouldNot(HaveOccurred())
ci, err := calicoClient.ClusterInformation().Get(ctx, "default", options.GetOptions{})
Expect(err).ShouldNot(HaveOccurred())
r := false
ci.Spec.DatastoreReady = &r
_, err = calicoClient.ClusterInformation().Update(ctx, ci, options.SetOptions{})
Expect(err).ShouldNot(HaveOccurred())
exitCode, err := testutils.DeleteContainer(netconf, contNs.Path(), "", testutils.TEST_DEFAULT_NS)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).ShouldNot(Equal(0))
})
})
Context("when ready flag is missing", func() {
It("errors when ADD is done", func() {
_, err := calicoClient.ClusterInformation().Delete(ctx, "default", options.DeleteOptions{})
Expect(err).ShouldNot(HaveOccurred())
_, _, _, _, _, _, err = testutils.CreateContainer(netconf, "", testutils.TEST_DEFAULT_NS, "")
Expect(err).Should(HaveOccurred())
})
It("errors when DEL is done", func() {
_, _, _, _, _, contNs, err := testutils.CreateContainer(netconf, "", testutils.TEST_DEFAULT_NS, "")
Expect(err).ShouldNot(HaveOccurred())
_, err = calicoClient.ClusterInformation().Delete(ctx, "default", options.DeleteOptions{})
Expect(err).ShouldNot(HaveOccurred())
exitCode, err := testutils.DeleteContainer(netconf, contNs.Path(), "", testutils.TEST_DEFAULT_NS)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).ShouldNot(Equal(0))
})
})
})
Context("With IP forwarding enabled", func() {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"log_level": "info",
"nodename_file_optional": true,
"datastore_type": "%s",
"container_settings": {
"allow_ip_forwarding": true
},
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
}
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
It("should enable IPv4 forwarding", func() {
containerID := fmt.Sprintf("con%d", rand.Uint32())
_, _, _, _, _, contNs, err := testutils.CreateContainerWithId(netconf, "", testutils.TEST_DEFAULT_NS, "", containerID)
By("successfully networking the container", func() {
Expect(err).ShouldNot(HaveOccurred())
})
By("asserting IPv4 forwarding is enabled", func() {
targetNs, _ := ns.GetNS(contNs.Path())
err = targetNs.Do(func(_ ns.NetNS) error {
return testutils.CheckSysctlValue("/proc/sys/net/ipv4/ip_forward", "1")
})
Expect(err).ShouldNot(HaveOccurred())
})
By("tearing down the container", func() {
_, err = testutils.DeleteContainerWithId(netconf, contNs.Path(), "", testutils.TEST_DEFAULT_NS, containerID)
Expect(err).ShouldNot(HaveOccurred())
})
})
})
Context("With an invalid dataplane type", func() {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"log_level": "info",
"nodename_file_optional": true,
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
},
"dataplane_options": {
"type": "invalid-dataplane-type"
}
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
It("fails with an error", func() {
_, _, _, _, _, _, err := testutils.CreateContainer(netconf, "", testutils.TEST_DEFAULT_NS, "")
Expect(err).Should(HaveOccurred())
})
})
Context("With a misconfigured gRPC dataplane", func() {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"log_level": "info",
"nodename_file_optional": true,
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
},
"dataplane_options": {
"type": "grpc",
"socket": "unix:///tmp/xxxx-non-existent-dont-create-this-please.sock"
}
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
It("fails with an error", func() {
_, _, _, _, _, _, err := testutils.CreateContainer(netconf, "", testutils.TEST_DEFAULT_NS, "")
Expect(err).Should(HaveOccurred())
})
})
Context("With a gRPC dataplane", func() {
It("communicates with the dataplane", func(done Done) {
var contNs ns.NetNS
var grpcBackend *grpc_dataplane.TestServer
var exitCode int
var err error
socket := fmt.Sprintf("/tmp/cni_grpc_dataplane_test%d.sock", rand.Uint32())
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"log_level": "info",
"nodename_file_optional": true,
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
},
"dataplane_options": {
"type": "grpc",
"socket": "unix://%s",
"extra": "option"
}
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"), socket)
grpcBackend, err = grpc_dataplane.StartTestServer(socket, true, "00:11:22:33:44:55")
Expect(err).ShouldNot(HaveOccurred())
Expect(grpcBackend).ShouldNot(Equal(nil))
By("sending ADD requests to the gRPC backend")
_, _, _, _, _, contNs, err = testutils.CreateContainer(netconf, "", testutils.TEST_DEFAULT_NS, "")
Expect(err).ShouldNot(HaveOccurred())
message := <-grpcBackend.Received
addRequest, ok := message.(*proto.AddRequest)
Expect(ok).Should(BeTrue())
Expect(addRequest.Netns).Should(Equal(contNs.Path()))
option, ok := addRequest.DataplaneOptions["extra"]
Expect(ok).Should(BeTrue())
Expect(option).Should(Equal("option"))
Expect(len(addRequest.ContainerIps)).Should(BeNumerically(">=", 1))
By("erroring if the backend fails to cleanup an interface")
grpcBackend.SetResult(false)
exitCode, err = testutils.DeleteContainer(netconf, contNs.Path(), "", testutils.TEST_DEFAULT_NS)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).ShouldNot(Equal(0))
message = <-grpcBackend.Received
_, ok = message.(*proto.DelRequest)
Expect(ok).Should(BeTrue())
By("sending DEL requests to the gRPC backend")
grpcBackend.SetResult(true)
exitCode, err = testutils.DeleteContainer(netconf, contNs.Path(), "", testutils.TEST_DEFAULT_NS)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).Should(Equal(0))
message = <-grpcBackend.Received
delRequest, ok := message.(*proto.DelRequest)
Expect(ok).Should(BeTrue())
Expect(delRequest.Netns).Should(Equal(contNs.Path()))
option, ok = delRequest.DataplaneOptions["extra"]
Expect(ok).Should(BeTrue())
Expect(option).Should(Equal("option"))
By("erroring if the backend fails to configure an interface")
grpcBackend.SetResult(false)
_, _, _, _, _, _, err = testutils.CreateContainer(netconf, "", testutils.TEST_DEFAULT_NS, "")
Expect(err).Should(HaveOccurred())
message = <-grpcBackend.Received
_, ok = message.(*proto.AddRequest)
Expect(ok).Should(BeTrue())
grpcBackend.GracefulStop()
err = syscall.Unlink(socket)
if err != nil && !strings.Contains(err.Error(), "no such file or directory") {
Expect(err).NotTo(HaveOccurred())
}
close(done)
}, 30.0)
})
Context("deprecate hostname for nodename", func() {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"hostname": "named-hostname.somewhere",
"nodename_file_optional": true,
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
}
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
It("has hostname even though deprecated", func() {
containerID, _, _, _, _, contNs, err := testutils.CreateContainerWithId(netconf, "", testutils.TEST_DEFAULT_NS, "", "abcd1234")
Expect(err).ShouldNot(HaveOccurred())
// The endpoint is created in etcd
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
ids := names.WorkloadEndpointIdentifiers{
Node: "named-hostname.somewhere",
Orchestrator: "cni",
Endpoint: "eth0",
Pod: "",
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.TEST_DEFAULT_NS))
Expect(endpoints.Items[0].Spec.Node).Should(Equal("named-hostname.somewhere"))
_, err = testutils.DeleteContainerWithId(netconf, contNs.Path(), "", testutils.TEST_DEFAULT_NS, containerID)
Expect(err).ShouldNot(HaveOccurred())
})
netconf2 := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"hostname": "named-hostname",
"nodename": "named-nodename",
"nodename_file_optional": true,
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
}
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
It("nodename takes precedence over hostname", func() {
containerID, _, _, _, _, contNs, err := testutils.CreateContainerWithId(netconf2, "", testutils.TEST_DEFAULT_NS, "", "abcd")
Expect(err).ShouldNot(HaveOccurred())
// The endpoint is created in etcd
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
ids := names.WorkloadEndpointIdentifiers{
Node: "named-nodename",
Orchestrator: "cni",
Endpoint: "eth0",
Pod: "",
ContainerID: containerID,
}
wrkload, err := ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
Expect(endpoints.Items[0].Name).Should(Equal(wrkload))
Expect(endpoints.Items[0].Namespace).Should(Equal(testutils.TEST_DEFAULT_NS))
Expect(endpoints.Items[0].Spec.Node).Should(Equal("named-nodename"))
_, err = testutils.DeleteContainerWithId(netconf2, contNs.Path(), "", testutils.TEST_DEFAULT_NS, containerID)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("Mesos Labels", func() {
It("applies mesos labels", func() {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"hostname": "named-hostname.somewhere",
"nodename_file_optional": true,
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
},
"args": {
"org.apache.mesos": {
"network_info": {
"labels": {
"labels": [
{
"key": "k",
"value": "v"
}
]
}
}
}
}
}`, cniVersion, os.Getenv("ETCD_IP"))
containerID, _, _, _, _, contNs, err := testutils.CreateContainerWithId(netconf, "", testutils.TEST_DEFAULT_NS, "", "abcd1234")
Expect(err).ShouldNot(HaveOccurred())
// The endpoint is created in etcd
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
Expect(endpoints.Items[0].Labels["k"]).Should(Equal("v"))
_, err = testutils.DeleteContainerWithId(netconf, contNs.Path(), "", testutils.TEST_DEFAULT_NS, containerID)
Expect(err).ShouldNot(HaveOccurred())
})
It("sanitizes dcos label", func() {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"hostname": "named-hostname.somewhere",
"nodename_file_optional": true,
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
},
"args": {
"org.apache.mesos": {
"network_info": {
"labels": {
"labels": [
{
"key": "DCOS_SPACE",
"value": "/a/b/c"
}
]
}
}
}
}
}`, cniVersion, os.Getenv("ETCD_IP"))
containerID, _, _, _, _, contNs, err := testutils.CreateContainerWithId(netconf, "", testutils.TEST_DEFAULT_NS, "", "abcd1234")
Expect(err).ShouldNot(HaveOccurred())
// The endpoint is created in etcd
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
Expect(endpoints.Items[0].Labels["DCOS_SPACE"]).Should(Equal("a.b.c"))
_, err = testutils.DeleteContainerWithId(netconf, contNs.Path(), "", testutils.TEST_DEFAULT_NS, containerID)
Expect(err).ShouldNot(HaveOccurred())
})
})
Context("feature flag processing", func() {
It("errors if ip_addrs_no_ipam if not running kubernetes", func() {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"feature_control": {
"ip_addrs_no_ipam": true
},
"etcd_endpoints": "http://%s:2379",
"nodename": "named-nodename",
"nodename_file_optional": true,
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
}
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
containerNs, containerId, err := testutils.CreateContainerNamespace()
Expect(err).ToNot(HaveOccurred())
_, _, _, _, err = testutils.RunCNIPluginWithId(netconf, "", testutils.K8S_TEST_NS, "", containerId, "", containerNs)
Expect(err).To(HaveOccurred())
})
})
Describe("DEL", func() {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"nodename_file_optional": true,
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
}
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
Context("when it was never called for SetUP", func() {
Context("and a namespace does exist", func() {
It("exits with 'success' error code", func() {
contNs, containerID, err := testutils.CreateContainerNamespace()
Expect(err).ShouldNot(HaveOccurred())
exitCode, err := testutils.DeleteContainerWithId(netconf, contNs.Path(), "", testutils.TEST_DEFAULT_NS, containerID)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).To(Equal(0))
})
})
Context("and no namespace exists", func() {
It("exits with 'success' error code", func() {
exitCode, err := testutils.DeleteContainer(netconf, "/not/a/real/path1234567890", "", testutils.TEST_DEFAULT_NS)
Expect(err).ShouldNot(HaveOccurred())
Expect(exitCode).To(Equal(0))
})
})
})
})
Describe("with calico-ipam enabled, after creating a container", func() {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"log_level": "info",
"nodename_file_optional": true,
"ipam": { "type": "calico-ipam" }
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
var containerID string
var workloadName string
var endpointSpec libapiv3.WorkloadEndpointSpec
var contNs ns.NetNS
var result *cniv1.Result
checkIPAMReservation := func() {
// IPAM reservation should still be in place.
handleID := utils.GetHandleID("net1", containerID, workloadName)
ipamIPs, err := calicoClient.IPAM().IPsByHandle(context.Background(), handleID)
ExpectWithOffset(1, err).NotTo(HaveOccurred())
ExpectWithOffset(1, ipamIPs).To(HaveLen(1),
"There should be an IPAM handle for endpoint")
ExpectWithOffset(1, ipamIPs[0].String()+"/32").To(Equal(endpointSpec.IPNetworks[0]))
}
BeforeEach(func() {
// Create a new ipPool.
testutils.MustCreateNewIPPool(calicoClient, "10.0.0.0/24", false, false, true)
var err error
containerID, result, _, _, _, contNs, err = testutils.CreateContainerWithId(netconf, "", testutils.TEST_DEFAULT_NS, "", "badbeef")
Expect(err).ShouldNot(HaveOccurred())
// The endpoint is created in etcd
endpoints, err := calicoClient.WorkloadEndpoints().List(ctx, options.ListOptions{Namespace: "default"})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).To(HaveLen(1))
ids := names.WorkloadEndpointIdentifiers{
Node: hostname,
Orchestrator: "cni",
Endpoint: "eth0",
Pod: "",
ContainerID: containerID,
}
workloadName, err = ids.CalculateWorkloadEndpointName(false)
Expect(err).NotTo(HaveOccurred())
endpoint := endpoints.Items[0]
Expect(endpoint.Name).Should(Equal(workloadName))
endpointSpec = endpoint.Spec
Expect(endpoint.Namespace).Should(Equal(testutils.TEST_DEFAULT_NS))
Expect(endpoint.Spec.Node).Should(Equal(hostname))
Expect(endpoint.Spec.Endpoint).Should(Equal("eth0"))
Expect(endpoint.Spec.ContainerID).Should(Equal(containerID))
Expect(endpoint.Spec.
Orchestrator).Should(Equal("cni"))
Expect(endpoint.Spec.Workload).Should(BeEmpty())
// IPAM reservation should have been created.
checkIPAMReservation()
})
AfterEach(func() {
_, err := testutils.DeleteContainerWithId(
netconf, contNs.Path(), "", testutils.TEST_DEFAULT_NS, containerID)
Expect(err).ShouldNot(HaveOccurred())
})
It("a second ADD for the same container should be a no-op", func() {
// Try to create the same container (so CNI receives the ADD for the same endpoint again)
resultSecondAdd, _, _, _, err := testutils.RunCNIPluginWithId(netconf, "", testutils.TEST_DEFAULT_NS, "", containerID, "eth0", contNs)
Expect(err).ShouldNot(HaveOccurred())
Expect(resultSecondAdd).Should(Equal(result))
// The endpoint is created in etcd
endpoints, err := calicoClient.WorkloadEndpoints().List(context.Background(), options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
Expect(endpoints.Items[0].Spec.Profiles).To(ConsistOf("net1"))
// IPAM reservation should still be in place.
checkIPAMReservation()
})
It("a second ADD with new profile ID should append it", func() {
// Try to create the same container (so CNI receives the ADD for the same endpoint again)
tweaked := strings.Replace(netconf, "net1", "net2", 1)
resultSecondAdd, _, _, _, err := testutils.RunCNIPluginWithId(tweaked, "", "", "", containerID, "", contNs)
Expect(err).ShouldNot(HaveOccurred())
Expect(resultSecondAdd).Should(Equal(result))
// The endpoint is created in etcd
endpoints, err := calicoClient.WorkloadEndpoints().List(context.Background(), options.ListOptions{})
Expect(err).ShouldNot(HaveOccurred())
Expect(endpoints.Items).Should(HaveLen(1))
Expect(endpoints.Items[0].Spec.Profiles).To(ConsistOf("net1", "net2"))
// IPAM reservation should still be in place.
checkIPAMReservation()
})
Context("with networking rigged to fail", func() {
BeforeEach(func() {
// To prevent the networking attempt from succeeding, rename the old veth.
// This leaves a route and an eth0 in place that the plugin will struggle with.
By("Breaking networking for the created interface")
hostVeth := endpointSpec.InterfaceName
newName := strings.Replace(hostVeth, "cali", "sali", 1)
output, err := exec.Command("ip", "link", "set", hostVeth, "down").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Output: %s", output))
output, err = exec.Command("ip", "link", "set", hostVeth, "name", newName).CombinedOutput()
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Output: %s", output))
output, err = exec.Command("ip", "link", "set", newName, "up").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Output: %s", output))
})
It("a second ADD for the same container should leave the datastore alone", func() {
// Try to create the same container (so CNI receives the ADD for the same endpoint again)
By("Running the CNI plugin a second time on the same container")
_, _, _, _, err := testutils.RunCNIPluginWithId(netconf, "", "", "", containerID, "", contNs)
Expect(err).ShouldNot(HaveOccurred())
// IPAM reservation should still be in place.
checkIPAMReservation()
})
})
})
Describe("SetupRoutes works fine when the route is already programmed", func() {
Context("container route already exists on the host", func() {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
},
"nodename_file_optional": true,
"log_level":"info"
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
It("route setup should be resilient to existing route", func() {
By("creating a CNI networked container, which should also install the container route in the host namespace")
containerID, result, _, _, _, contNs, err := testutils.CreateContainerWithId(netconf, "", testutils.TEST_DEFAULT_NS, "", "meep1337")
Expect(err).ShouldNot(HaveOccurred())
// CNI plugin generates host side vEth name from containerID if used for "cni" orchestrator.
hostVethName := "cali" + containerID[:utils.Min(11, len(containerID))] //"cali" + containerID
hostVeth, err := netlink.LinkByName(hostVethName)
Expect(err).ToNot(HaveOccurred())
By("setting up the same route CNI plugin installed in the initial run for the hostVeth")
err = linux.SetupRoutes(hostVeth, result)
Expect(err).NotTo(HaveOccurred())
_, err = testutils.DeleteContainerWithId(netconf, contNs.Path(), "", testutils.TEST_DEFAULT_NS, containerID)
Expect(err).ShouldNot(HaveOccurred())
})
})
})
Describe("testConnection tests", func() {
It("successfully connects to the datastore", func(done Done) {
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2379",
"log_level": "info",
"nodename_file_optional": true,
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
}
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
pluginPath := fmt.Sprintf("%s/%s", os.Getenv("BIN"), os.Getenv("PLUGIN"))
c := exec.Command(pluginPath, "-t")
stdin, err := c.StdinPipe()
Expect(err).ToNot(HaveOccurred())
go func() {
defer stdin.Close()
_, _ = io.WriteString(stdin, netconf)
}()
_, err = c.CombinedOutput()
Expect(err).ToNot(HaveOccurred())
close(done)
}, 10)
It("reports it cannot connect to the datastore", func(done Done) {
// wrong port.
netconf := fmt.Sprintf(`
{
"cniVersion": "%s",
"name": "net1",
"type": "calico",
"etcd_endpoints": "http://%s:2370",
"log_level": "info",
"nodename_file_optional": true,
"datastore_type": "%s",
"ipam": {
"type": "host-local",
"subnet": "10.0.0.0/8"
}
}`, cniVersion, os.Getenv("ETCD_IP"), os.Getenv("DATASTORE_TYPE"))
pluginPath := fmt.Sprintf("%s/%s", os.Getenv("BIN"), os.Getenv("PLUGIN"))
c := exec.Command(pluginPath, "-t")
stdin, err := c.StdinPipe()
Expect(err).ToNot(HaveOccurred())
go func() {
defer stdin.Close()
_, _ = io.WriteString(stdin, netconf)
}()
_, err = c.CombinedOutput()
Expect(err).To(HaveOccurred())
close(done)
}, 10)
})
})
|
[
"\"DATASTORE_TYPE\"",
"\"DATASTORE_TYPE\"",
"\"CNI_SPEC_VERSION\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"ETCD_IP\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"BIN\"",
"\"PLUGIN\"",
"\"ETCD_IP\"",
"\"DATASTORE_TYPE\"",
"\"BIN\"",
"\"PLUGIN\""
] |
[] |
[
"CNI_SPEC_VERSION",
"BIN",
"PLUGIN",
"DATASTORE_TYPE",
"ETCD_IP"
] |
[]
|
["CNI_SPEC_VERSION", "BIN", "PLUGIN", "DATASTORE_TYPE", "ETCD_IP"]
|
go
| 5 | 0 | |
tests/e2e/e2e_setup_test.go
|
package e2e
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"strconv"
"testing"
"time"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"github.com/stretchr/testify/suite"
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
"github.com/osmosis-labs/osmosis/v7/tests/e2e/chain"
dockerconfig "github.com/osmosis-labs/osmosis/v7/tests/e2e/docker"
"github.com/osmosis-labs/osmosis/v7/tests/e2e/util"
)
type status struct {
LatestHeight string `json:"latest_block_height"`
}
type syncInfo struct {
SyncInfo status `json:"SyncInfo"`
}
type chainConfig struct {
// voting period is number of blocks it takes to deposit, 1.2 seconds per validator to vote on the prop, and a buffer.
votingPeriod float32
// upgrade proposal height for chain.
propHeight int
// Indexes of the validators to skip from running during initialization.
// This is needed for testing functionality like state-sync where we would
// like to start a node during tests post-initialization.
skipRunValidatorIndexes map[int]struct{}
chain *chain.Chain
}
const (
// osmosis version being upgraded to (folder must exist here https://github.com/osmosis-labs/osmosis/tree/main/app/upgrades)
upgradeVersion = "v9"
// estimated number of blocks it takes to submit for a proposal
propSubmitBlocks float32 = 10
// estimated number of blocks it takes to deposit for a proposal
propDepositBlocks float32 = 10
// number of blocks it takes to vote for a single validator to vote for a proposal
propVoteBlocks float32 = 1.2
// number of blocks used as a calculation buffer
propBufferBlocks float32 = 5
// max retries for json unmarshalling
maxRetries = 60
)
var (
// whatever number of validator configs get posted here are how many validators that will spawn on chain A and B respectively
validatorConfigsChainA = []*chain.ValidatorConfig{
{
Pruning: "default",
PruningKeepRecent: "0",
PruningInterval: "0",
SnapshotInterval: 1500,
SnapshotKeepRecent: 2,
},
{
Pruning: "nothing",
PruningKeepRecent: "0",
PruningInterval: "0",
SnapshotInterval: 1500,
SnapshotKeepRecent: 2,
},
{
Pruning: "custom",
PruningKeepRecent: "10000",
PruningInterval: "13",
SnapshotInterval: 1500,
SnapshotKeepRecent: 2,
},
{
Pruning: "everything",
PruningKeepRecent: "0",
PruningInterval: "0",
SnapshotInterval: 0,
SnapshotKeepRecent: 0,
},
}
validatorConfigsChainB = []*chain.ValidatorConfig{
{
Pruning: "default",
PruningKeepRecent: "0",
PruningInterval: "0",
SnapshotInterval: 1500,
SnapshotKeepRecent: 2,
},
{
Pruning: "nothing",
PruningKeepRecent: "0",
PruningInterval: "0",
SnapshotInterval: 1500,
SnapshotKeepRecent: 2,
},
{
Pruning: "custom",
PruningKeepRecent: "10000",
PruningInterval: "13",
SnapshotInterval: 1500,
SnapshotKeepRecent: 2,
},
}
)
type IntegrationTestSuite struct {
suite.Suite
tmpDirs []string
chainConfigs []*chainConfig
dkrPool *dockertest.Pool
dkrNet *dockertest.Network
hermesResource *dockertest.Resource
initResource *dockertest.Resource
valResources map[string][]*dockertest.Resource
dockerImages dockerconfig.ImageConfig
}
func TestIntegrationTestSuite(t *testing.T) {
suite.Run(t, new(IntegrationTestSuite))
}
func (s *IntegrationTestSuite) SetupSuite() {
s.T().Log("setting up e2e integration test suite...")
s.chainConfigs = make([]*chainConfig, 0, 2)
// The e2e test flow is as follows:
//
// 1. Configure two chains - chan A and chain B.
// * For each chain, set up two validators
// * Initialize configs and genesis for all validators.
// 2. Start both networks.
// 3. Run IBC relayer betweeen the two chains.
// 4. Execute various e2e tests, including IBC.
var (
skipUpgrade bool
err error
)
if str := os.Getenv("OSMOSIS_E2E_SKIP_UPGRADE"); len(str) > 0 {
skipUpgrade, err = strconv.ParseBool(str)
s.Require().NoError(err)
}
s.dockerImages = *dockerconfig.NewImageConfig(!skipUpgrade)
s.configureDockerResources(chain.ChainAID, chain.ChainBID)
s.configureChain(chain.ChainAID, validatorConfigsChainA, map[int]struct{}{
3: {}, // skip validator at index 3
})
s.configureChain(chain.ChainBID, validatorConfigsChainB, map[int]struct{}{})
for i, chainConfig := range s.chainConfigs {
s.runValidators(chainConfig, s.dockerImages.OsmosisRepository, s.dockerImages.OsmosisTag, i*10)
}
// Run a relayer between every possible pair of chains.
for i := 0; i < len(s.chainConfigs); i++ {
for j := i + 1; j < len(s.chainConfigs); j++ {
s.runIBCRelayer(s.chainConfigs[i].chain, s.chainConfigs[j].chain)
}
}
if !skipUpgrade {
s.createPreUpgradeState()
s.upgrade()
s.runPostUpgradeTests()
}
}
func (s *IntegrationTestSuite) TearDownSuite() {
if str := os.Getenv("OSMOSIS_E2E_SKIP_CLEANUP"); len(str) > 0 {
skipCleanup, err := strconv.ParseBool(str)
s.Require().NoError(err)
if skipCleanup {
return
}
}
s.T().Log("tearing down e2e integration test suite...")
s.Require().NoError(s.dkrPool.Purge(s.hermesResource))
for _, vr := range s.valResources {
for _, r := range vr {
s.Require().NoError(s.dkrPool.Purge(r))
}
}
s.Require().NoError(s.dkrPool.RemoveNetwork(s.dkrNet))
for _, chainConfig := range s.chainConfigs {
os.RemoveAll(chainConfig.chain.ChainMeta.DataDir)
}
for _, td := range s.tmpDirs {
os.RemoveAll(td)
}
}
func (s *IntegrationTestSuite) runValidators(chainConfig *chainConfig, dockerRepository, dockerTag string, portOffset int) {
chain := chainConfig.chain
s.T().Logf("starting %s validator containers...", chain.ChainMeta.Id)
s.valResources[chain.ChainMeta.Id] = make([]*dockertest.Resource, len(chain.Validators)-len(chainConfig.skipRunValidatorIndexes))
pwd, err := os.Getwd()
s.Require().NoError(err)
for i, val := range chain.Validators {
// Skip some validators from running during set up.
// This is needed for testing functionality like
// state-sunc where we might want to start some validators during tests.
if _, ok := chainConfig.skipRunValidatorIndexes[i]; ok {
s.T().Logf("skipping %s validator with index %d from running...", val.Name, i)
continue
}
runOpts := &dockertest.RunOptions{
Name: val.Name,
NetworkID: s.dkrNet.Network.ID,
Mounts: []string{
fmt.Sprintf("%s/:/osmosis/.osmosisd", val.ConfigDir),
fmt.Sprintf("%s/scripts:/osmosis", pwd),
},
Repository: dockerRepository,
Tag: dockerTag,
Cmd: []string{
"start",
},
}
// expose the first validator for debugging and communication
if val.Index == 0 {
runOpts.PortBindings = map[docker.Port][]docker.PortBinding{
"1317/tcp": {{HostIP: "", HostPort: fmt.Sprintf("%d", 1317+portOffset)}},
"6060/tcp": {{HostIP: "", HostPort: fmt.Sprintf("%d", 6060+portOffset)}},
"6061/tcp": {{HostIP: "", HostPort: fmt.Sprintf("%d", 6061+portOffset)}},
"6062/tcp": {{HostIP: "", HostPort: fmt.Sprintf("%d", 6062+portOffset)}},
"6063/tcp": {{HostIP: "", HostPort: fmt.Sprintf("%d", 6063+portOffset)}},
"6064/tcp": {{HostIP: "", HostPort: fmt.Sprintf("%d", 6064+portOffset)}},
"6065/tcp": {{HostIP: "", HostPort: fmt.Sprintf("%d", 6065+portOffset)}},
"9090/tcp": {{HostIP: "", HostPort: fmt.Sprintf("%d", 9090+portOffset)}},
"26656/tcp": {{HostIP: "", HostPort: fmt.Sprintf("%d", 26656+portOffset)}},
"26657/tcp": {{HostIP: "", HostPort: fmt.Sprintf("%d", 26657+portOffset)}},
}
}
resource, err := s.dkrPool.RunWithOptions(runOpts, noRestart)
s.Require().NoError(err)
s.valResources[chain.ChainMeta.Id][i] = resource
s.T().Logf("started %s validator container: %s", resource.Container.Name[1:], resource.Container.ID)
}
rpcClient, err := rpchttp.New("tcp://localhost:26657", "/websocket")
s.Require().NoError(err)
s.Require().Eventually(
func() bool {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
status, err := rpcClient.Status(ctx)
if err != nil {
return false
}
// let the node produce a few blocks
if status.SyncInfo.CatchingUp || status.SyncInfo.LatestBlockHeight < 3 {
return false
}
return true
},
5*time.Minute,
time.Second,
"Osmosis node failed to produce blocks",
)
}
func (s *IntegrationTestSuite) runIBCRelayer(chainA *chain.Chain, chainB *chain.Chain) {
s.T().Log("starting Hermes relayer container...")
tmpDir, err := ioutil.TempDir("", "osmosis-e2e-testnet-hermes-")
s.Require().NoError(err)
s.tmpDirs = append(s.tmpDirs, tmpDir)
osmoAVal := chainA.Validators[0]
osmoBVal := chainB.Validators[0]
hermesCfgPath := path.Join(tmpDir, "hermes")
s.Require().NoError(os.MkdirAll(hermesCfgPath, 0o755))
_, err = util.CopyFile(
filepath.Join("./scripts/", "hermes_bootstrap.sh"),
filepath.Join(hermesCfgPath, "hermes_bootstrap.sh"),
)
s.Require().NoError(err)
s.hermesResource, err = s.dkrPool.RunWithOptions(
&dockertest.RunOptions{
Name: fmt.Sprintf("%s-%s-relayer", chainA.ChainMeta.Id, chainB.ChainMeta.Id),
Repository: s.dockerImages.RelayerRepository,
Tag: s.dockerImages.RelayerTag,
NetworkID: s.dkrNet.Network.ID,
Cmd: []string{
"start",
},
User: "root:root",
Mounts: []string{
fmt.Sprintf("%s/:/root/hermes", hermesCfgPath),
},
ExposedPorts: []string{
"3031",
},
PortBindings: map[docker.Port][]docker.PortBinding{
"3031/tcp": {{HostIP: "", HostPort: "3031"}},
},
Env: []string{
fmt.Sprintf("OSMO_A_E2E_CHAIN_ID=%s", chainA.ChainMeta.Id),
fmt.Sprintf("OSMO_B_E2E_CHAIN_ID=%s", chainB.ChainMeta.Id),
fmt.Sprintf("OSMO_A_E2E_VAL_MNEMONIC=%s", osmoAVal.Mnemonic),
fmt.Sprintf("OSMO_B_E2E_VAL_MNEMONIC=%s", osmoBVal.Mnemonic),
fmt.Sprintf("OSMO_A_E2E_VAL_HOST=%s", s.valResources[chainA.ChainMeta.Id][0].Container.Name[1:]),
fmt.Sprintf("OSMO_B_E2E_VAL_HOST=%s", s.valResources[chainB.ChainMeta.Id][0].Container.Name[1:]),
},
Entrypoint: []string{
"sh",
"-c",
"chmod +x /root/hermes/hermes_bootstrap.sh && /root/hermes/hermes_bootstrap.sh",
},
},
noRestart,
)
s.Require().NoError(err)
endpoint := fmt.Sprintf("http://%s/state", s.hermesResource.GetHostPort("3031/tcp"))
s.Require().Eventually(
func() bool {
resp, err := http.Get(endpoint)
if err != nil {
return false
}
defer resp.Body.Close()
bz, err := io.ReadAll(resp.Body)
if err != nil {
return false
}
var respBody map[string]interface{}
if err := json.Unmarshal(bz, &respBody); err != nil {
return false
}
status := respBody["status"].(string)
result := respBody["result"].(map[string]interface{})
return status == "success" && len(result["chains"].([]interface{})) == 2
},
5*time.Minute,
time.Second,
"hermes relayer not healthy",
)
s.T().Logf("started Hermes relayer container: %s", s.hermesResource.Container.ID)
// XXX: Give time to both networks to start, otherwise we might see gRPC
// transport errors.
time.Sleep(10 * time.Second)
// create the client, connection and channel between the two Osmosis chains
s.connectIBCChains(chainA, chainB)
}
func (s *IntegrationTestSuite) configureChain(chainId string, validatorConfigs []*chain.ValidatorConfig, skipValidatorIndexes map[int]struct{}) {
s.T().Logf("starting e2e infrastructure for chain-id: %s", chainId)
tmpDir, err := ioutil.TempDir("", "osmosis-e2e-testnet-")
s.T().Logf("temp directory for chain-id %v: %v", chainId, tmpDir)
s.Require().NoError(err)
validatorConfigBytes, err := json.Marshal(validatorConfigs)
s.Require().NoError(err)
numVal := float32(len(validatorConfigs))
newChainConfig := chainConfig{
votingPeriod: propDepositBlocks + numVal*propVoteBlocks + propBufferBlocks,
skipRunValidatorIndexes: skipValidatorIndexes,
}
votingPeriodDuration := time.Duration(int(newChainConfig.votingPeriod) * 1000000000)
s.initResource, err = s.dkrPool.RunWithOptions(
&dockertest.RunOptions{
Name: fmt.Sprintf("%s", chainId),
Repository: s.dockerImages.InitRepository,
Tag: s.dockerImages.InitTag,
NetworkID: s.dkrNet.Network.ID,
Cmd: []string{
fmt.Sprintf("--data-dir=%s", tmpDir),
fmt.Sprintf("--chain-id=%s", chainId),
fmt.Sprintf("--config=%s", validatorConfigBytes),
fmt.Sprintf("--voting-period=%v", votingPeriodDuration),
},
User: "root:root",
Mounts: []string{
fmt.Sprintf("%s:%s", tmpDir, tmpDir),
},
},
noRestart,
)
s.Require().NoError(err)
fileName := fmt.Sprintf("%v/%v-encode", tmpDir, chainId)
s.T().Logf("serialized init file for chain-id %v: %v", chainId, fileName)
// loop through the reading and unmarshaling of the init file a total of maxRetries or until error is nil
// without this, test attempts to unmarshal file before docker container is finished writing
for i := 0; i < maxRetries; i++ {
encJson, _ := os.ReadFile(fileName)
err = json.Unmarshal(encJson, &newChainConfig.chain)
if err == nil {
break
}
if i == maxRetries-1 {
s.Require().NoError(err)
}
if i > 0 {
time.Sleep(1 * time.Second)
}
}
s.Require().NoError(s.dkrPool.Purge(s.initResource))
s.chainConfigs = append(s.chainConfigs, &newChainConfig)
}
func (s *IntegrationTestSuite) configureDockerResources(chainIDOne, chainIDTwo string) {
var err error
s.dkrPool, err = dockertest.NewPool("")
s.Require().NoError(err)
s.dkrNet, err = s.dkrPool.CreateNetwork(fmt.Sprintf("%s-%s-testnet", chainIDOne, chainIDTwo))
s.Require().NoError(err)
s.valResources = make(map[string][]*dockertest.Resource)
}
func noRestart(config *docker.HostConfig) {
// in this case we don't want the nodes to restart on failure
config.RestartPolicy = docker.RestartPolicy{
Name: "no",
}
}
func (s *IntegrationTestSuite) upgrade() {
// submit, deposit, and vote for upgrade proposal
// prop height = current height + voting period + time it takes to submit proposal + small buffer
for _, chainConfig := range s.chainConfigs {
currentHeight := s.getCurrentChainHeight(s.valResources[chainConfig.chain.ChainMeta.Id][0].Container.ID)
chainConfig.propHeight = currentHeight + int(chainConfig.votingPeriod) + int(propSubmitBlocks) + int(propBufferBlocks)
s.submitProposal(chainConfig.chain, chainConfig.propHeight)
s.depositProposal(chainConfig.chain)
s.voteProposal(chainConfig)
}
// wait till all chains halt at upgrade height
for _, chainConfig := range s.chainConfigs {
curChain := chainConfig.chain
for i := range chainConfig.chain.Validators {
if _, ok := chainConfig.skipRunValidatorIndexes[i]; ok {
continue
}
// use counter to ensure no new blocks are being created
counter := 0
s.T().Logf("waiting to reach upgrade height on %s validator container: %s", s.valResources[curChain.ChainMeta.Id][i].Container.Name[1:], s.valResources[curChain.ChainMeta.Id][i].Container.ID)
s.Require().Eventually(
func() bool {
currentHeight := s.getCurrentChainHeight(s.valResources[curChain.ChainMeta.Id][i].Container.ID)
if currentHeight != chainConfig.propHeight {
s.T().Logf("current block height on %s is %v, waiting for block %v container: %s", s.valResources[curChain.ChainMeta.Id][i].Container.Name[1:], currentHeight, chainConfig.propHeight, s.valResources[curChain.ChainMeta.Id][i].Container.ID)
}
if currentHeight > chainConfig.propHeight {
panic("chain did not halt at upgrade height")
}
if currentHeight == chainConfig.propHeight {
counter++
}
return counter == 3
},
5*time.Minute,
time.Second,
)
s.T().Logf("reached upgrade height on %s container: %s", s.valResources[curChain.ChainMeta.Id][i].Container.Name[1:], s.valResources[curChain.ChainMeta.Id][i].Container.ID)
}
}
// remove all containers so we can upgrade them to the new version
for _, chainConfig := range s.chainConfigs {
curChain := chainConfig.chain
for valIdx := range curChain.Validators {
if _, ok := chainConfig.skipRunValidatorIndexes[valIdx]; ok {
continue
}
var opts docker.RemoveContainerOptions
opts.ID = s.valResources[curChain.ChainMeta.Id][valIdx].Container.ID
opts.Force = true
s.dkrPool.Client.RemoveContainer(opts)
s.T().Logf("removed container: %s", s.valResources[curChain.ChainMeta.Id][valIdx].Container.Name[1:])
}
}
// remove all containers so we can upgrade them to the new version
for _, chainConfig := range s.chainConfigs {
s.upgradeContainers(chainConfig, chainConfig.propHeight)
}
}
func (s *IntegrationTestSuite) upgradeContainers(chainConfig *chainConfig, propHeight int) {
// upgrade containers to the locally compiled daemon
chain := chainConfig.chain
s.T().Logf("starting upgrade for chain-id: %s...", chain.ChainMeta.Id)
pwd, err := os.Getwd()
s.Require().NoError(err)
for i, val := range chain.Validators {
if _, ok := chainConfig.skipRunValidatorIndexes[i]; ok {
continue
}
runOpts := &dockertest.RunOptions{
Name: val.Name,
Repository: dockerconfig.LocalOsmoRepository,
Tag: dockerconfig.LocalOsmoTag,
NetworkID: s.dkrNet.Network.ID,
User: "root:root",
Mounts: []string{
fmt.Sprintf("%s/:/osmosis/.osmosisd", val.ConfigDir),
fmt.Sprintf("%s/scripts:/osmosis", pwd),
},
}
resource, err := s.dkrPool.RunWithOptions(runOpts, noRestart)
s.Require().NoError(err)
s.valResources[chain.ChainMeta.Id][i] = resource
s.T().Logf("started %s validator container: %s", resource.Container.Name[1:], resource.Container.ID)
}
// check that we are creating blocks again
for i := range chain.Validators {
if _, ok := chainConfig.skipRunValidatorIndexes[i]; ok {
continue
}
s.Require().Eventually(
func() bool {
currentHeight := s.getCurrentChainHeight(s.valResources[chain.ChainMeta.Id][i].Container.ID)
if currentHeight <= propHeight {
s.T().Logf("current block height on %s is %v, waiting to create blocks container: %s", s.valResources[chain.ChainMeta.Id][i].Container.Name[1:], currentHeight, s.valResources[chain.ChainMeta.Id][i].Container.ID)
}
return currentHeight > propHeight
},
5*time.Minute,
time.Second,
)
s.T().Logf("upgrade successful on %s validator container: %s", s.valResources[chain.ChainMeta.Id][i].Container.Name[1:], s.valResources[chain.ChainMeta.Id][i].Container.ID)
}
}
func (s *IntegrationTestSuite) createPreUpgradeState() {
chainA := s.chainConfigs[0].chain
chainB := s.chainConfigs[1].chain
s.sendIBC(chainA, chainB, chainB.Validators[0].PublicAddress, chain.OsmoToken)
s.sendIBC(chainB, chainA, chainA.Validators[0].PublicAddress, chain.OsmoToken)
s.sendIBC(chainA, chainB, chainB.Validators[0].PublicAddress, chain.StakeToken)
s.sendIBC(chainB, chainA, chainA.Validators[0].PublicAddress, chain.StakeToken)
s.createPool(chainA, "pool1A.json")
s.createPool(chainB, "pool1B.json")
}
func (s *IntegrationTestSuite) runPostUpgradeTests() {
chainA := s.chainConfigs[0].chain
chainB := s.chainConfigs[1].chain
s.sendIBC(chainA, chainB, chainB.Validators[0].PublicAddress, chain.OsmoToken)
s.sendIBC(chainB, chainA, chainA.Validators[0].PublicAddress, chain.OsmoToken)
s.sendIBC(chainA, chainB, chainB.Validators[0].PublicAddress, chain.StakeToken)
s.sendIBC(chainB, chainA, chainA.Validators[0].PublicAddress, chain.StakeToken)
s.createPool(chainA, "pool2A.json")
s.createPool(chainB, "pool2B.json")
}
|
[
"\"OSMOSIS_E2E_SKIP_UPGRADE\"",
"\"OSMOSIS_E2E_SKIP_CLEANUP\""
] |
[] |
[
"OSMOSIS_E2E_SKIP_UPGRADE",
"OSMOSIS_E2E_SKIP_CLEANUP"
] |
[]
|
["OSMOSIS_E2E_SKIP_UPGRADE", "OSMOSIS_E2E_SKIP_CLEANUP"]
|
go
| 2 | 0 | |
pkg/operator/ceph/csi/spec.go
|
/*
Copyright 2019 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/pkg/errors"
"github.com/rook/rook/pkg/operator/k8sutil"
apps "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
k8scsi "k8s.io/api/storage/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/kubernetes"
)
type Param struct {
CSIPluginImage string
RegistrarImage string
ProvisionerImage string
AttacherImage string
SnapshotterImage string
ResizerImage string
DriverNamePrefix string
EnableSnapshotter string
EnableCSIGRPCMetrics string
KubeletDirPath string
ForceCephFSKernelClient string
CephFSPluginUpdateStrategy string
RBDPluginUpdateStrategy string
LogLevel uint8
CephFSGRPCMetricsPort uint16
CephFSLivenessMetricsPort uint16
RBDGRPCMetricsPort uint16
RBDLivenessMetricsPort uint16
}
type templateParam struct {
Param
// non-global template only parameters
Namespace string
}
var (
CSIParam Param
EnableRBD = false
EnableCephFS = false
EnableCSIGRPCMetrics = false
//driver names
CephFSDriverName string
RBDDriverName string
// template paths
RBDPluginTemplatePath string
RBDProvisionerSTSTemplatePath string
RBDProvisionerDepTemplatePath string
CephFSPluginTemplatePath string
CephFSProvisionerSTSTemplatePath string
CephFSProvisionerDepTemplatePath string
// configuration map for csi
ConfigName = "rook-ceph-csi-config"
ConfigKey = "csi-cluster-config-json"
)
// Specify default images as var instead of const so that they can be overridden with the Go
// linker's -X flag. This allows users to easily build images with a different opinionated set of
// images without having to specify them manually in charts/manifests which can make upgrades more
// manually challenging.
var (
// image names
DefaultCSIPluginImage = "quay.io/cephcsi/cephcsi:v2.0.0"
DefaultRegistrarImage = "quay.io/k8scsi/csi-node-driver-registrar:v1.2.0"
DefaultProvisionerImage = "quay.io/k8scsi/csi-provisioner:v1.4.0"
DefaultAttacherImage = "quay.io/k8scsi/csi-attacher:v2.1.0"
DefaultSnapshotterImage = "quay.io/k8scsi/csi-snapshotter:v1.2.2"
defaultResizerImage = "quay.io/k8scsi/csi-resizer:v0.4.0"
)
const (
KubeMinMajor = "1"
KubeMinMinor = "13"
provDeploymentSuppVersion = "14"
kubeMinVerForFilesystemRestore = "15"
kubeMinVerForBlockRestore = "16"
// toleration and node affinity
provisionerTolerationsEnv = "CSI_PROVISIONER_TOLERATIONS"
provisionerNodeAffinityEnv = "CSI_PROVISIONER_NODE_AFFINITY"
pluginTolerationsEnv = "CSI_PLUGIN_TOLERATIONS"
pluginNodeAffinityEnv = "CSI_PLUGIN_NODE_AFFINITY"
// kubelet directory path
DefaultKubeletDirPath = "/var/lib/kubelet"
// template
DefaultRBDPluginTemplatePath = "/etc/ceph-csi/rbd/csi-rbdplugin.yaml"
DefaultRBDProvisionerSTSTemplatePath = "/etc/ceph-csi/rbd/csi-rbdplugin-provisioner-sts.yaml"
DefaultRBDProvisionerDepTemplatePath = "/etc/ceph-csi/rbd/csi-rbdplugin-provisioner-dep.yaml"
DefaultRBDPluginServiceTemplatePath = "/etc/ceph-csi/rbd/csi-rbdplugin-svc.yaml"
DefaultCephFSPluginTemplatePath = "/etc/ceph-csi/cephfs/csi-cephfsplugin.yaml"
DefaultCephFSProvisionerSTSTemplatePath = "/etc/ceph-csi/cephfs/csi-cephfsplugin-provisioner-sts.yaml"
DefaultCephFSProvisionerDepTemplatePath = "/etc/ceph-csi/cephfs/csi-cephfsplugin-provisioner-dep.yaml"
DefaultCephFSPluginServiceTemplatePath = "/etc/ceph-csi/cephfs/csi-cephfsplugin-svc.yaml"
// grpc metrics and liveness port for cephfs and rbd
DefaultCephFSGRPCMerticsPort uint16 = 9091
DefaultCephFSLivenessMerticsPort uint16 = 9081
DefaultRBDGRPCMerticsPort uint16 = 9090
DefaultRBDLivenessMerticsPort uint16 = 9080
// default log level for csi containers
defaultLogLevel uint8 = 0
)
func CSIEnabled() bool {
return EnableRBD || EnableCephFS
}
func ValidateCSIParam() error {
if len(CSIParam.CSIPluginImage) == 0 {
return errors.New("missing csi rbd plugin image")
}
if len(CSIParam.RegistrarImage) == 0 {
return errors.New("missing csi registrar image")
}
if len(CSIParam.ProvisionerImage) == 0 {
return errors.New("missing csi provisioner image")
}
if len(CSIParam.AttacherImage) == 0 {
return errors.New("missing csi attacher image")
}
if EnableRBD {
if len(RBDPluginTemplatePath) == 0 {
return errors.New("missing rbd plugin template path")
}
if len(RBDProvisionerSTSTemplatePath) == 0 && len(RBDProvisionerDepTemplatePath) == 0 {
return errors.New("missing rbd provisioner template path")
}
}
if EnableCephFS {
if len(CephFSPluginTemplatePath) == 0 {
return errors.New("missing cephfs plugin template path")
}
if len(CephFSProvisionerSTSTemplatePath) == 0 && len(CephFSProvisionerDepTemplatePath) == 0 {
return errors.New("missing ceph provisioner template path")
}
}
return nil
}
func StartCSIDrivers(namespace string, clientset kubernetes.Interface, ver *version.Info) error {
var (
err error
rbdPlugin, cephfsPlugin *apps.DaemonSet
rbdProvisionerSTS, cephfsProvisionerSTS *apps.StatefulSet
rbdProvisionerDeployment, cephfsProvisionerDeployment *apps.Deployment
deployProvSTS bool
rbdService, cephfsService *corev1.Service
)
// create an empty config map. config map will be filled with data
// later when clusters have mons
_, err = CreateCsiConfigMap(namespace, clientset)
if err != nil {
return errors.Wrapf(err, "failed creating csi config map")
}
tp := templateParam{
Param: CSIParam,
Namespace: namespace,
}
// if the user didn't specify a custom DriverNamePrefix use
// the namespace (and a dot).
if tp.DriverNamePrefix == "" {
tp.DriverNamePrefix = fmt.Sprintf("%s.", namespace)
}
CephFSDriverName = tp.DriverNamePrefix + "cephfs.csi.ceph.com"
RBDDriverName = tp.DriverNamePrefix + "rbd.csi.ceph.com"
tp.EnableCSIGRPCMetrics = fmt.Sprintf("%t", EnableCSIGRPCMetrics)
// If not set or set to anything but "false", the kernel client will be enabled
kClinet := os.Getenv("CSI_FORCE_CEPHFS_KERNEL_CLIENT")
if strings.EqualFold(kClinet, "false") {
tp.ForceCephFSKernelClient = "false"
} else {
tp.ForceCephFSKernelClient = "true"
}
// parse GRPC and Liveness ports
tp.CephFSGRPCMetricsPort = getPortFromENV("CSI_CEPHFS_GRPC_METRICS_PORT", DefaultCephFSGRPCMerticsPort)
tp.CephFSLivenessMetricsPort = getPortFromENV("CSI_CEPHFS_LIVENESS_METRICS_PORT", DefaultCephFSLivenessMerticsPort)
tp.RBDGRPCMetricsPort = getPortFromENV("CSI_RBD_GRPC_METRICS_PORT", DefaultRBDGRPCMerticsPort)
tp.RBDLivenessMetricsPort = getPortFromENV("CSI_RBD_LIVENESS_METRICS_PORT", DefaultRBDLivenessMerticsPort)
enableSnap := os.Getenv("CSI_ENABLE_SNAPSHOTTER")
if !strings.EqualFold(enableSnap, "false") {
tp.EnableSnapshotter = "true"
}
updateStrategy := os.Getenv("CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY")
if strings.EqualFold(updateStrategy, "ondelete") {
tp.CephFSPluginUpdateStrategy = "OnDelete"
} else {
tp.CephFSPluginUpdateStrategy = "RollingUpdate"
}
updateStrategy = os.Getenv("CSI_RBD_PLUGIN_UPDATE_STRATEGY")
if strings.EqualFold(updateStrategy, "ondelete") {
tp.RBDPluginUpdateStrategy = "OnDelete"
} else {
tp.RBDPluginUpdateStrategy = "RollingUpdate"
}
if ver.Major > KubeMinMajor || (ver.Major == KubeMinMajor && ver.Minor < provDeploymentSuppVersion) {
deployProvSTS = true
}
tp.ResizerImage = os.Getenv("ROOK_CSI_RESIZER_IMAGE")
if tp.ResizerImage == "" {
tp.ResizerImage = defaultResizerImage
}
if ver.Major < KubeMinMajor || ver.Major == KubeMinMajor && ver.Minor < kubeMinVerForFilesystemRestore {
logger.Warning("CSI Filesystem volume expansion requires Kubernetes version >=1.15.0")
}
if ver.Major < KubeMinMajor || ver.Major == KubeMinMajor && ver.Minor < kubeMinVerForBlockRestore {
logger.Warning("CSI Block volume expansion requires Kubernetes version >=1.16.0")
}
logLevel := os.Getenv("CSI_LOG_LEVEL")
tp.LogLevel = defaultLogLevel
if logLevel != "" {
l, err := strconv.ParseUint(logLevel, 10, 8)
if err != nil {
logger.Errorf("failed to parse CSI_LOG_LEVEL. Defaulting to %d. %v", defaultLogLevel, err)
} else {
tp.LogLevel = uint8(l)
}
}
if EnableRBD {
rbdPlugin, err = templateToDaemonSet("rbdplugin", RBDPluginTemplatePath, tp)
if err != nil {
return errors.Wrapf(err, "failed to load rbdplugin template")
}
if deployProvSTS {
rbdProvisionerSTS, err = templateToStatefulSet("rbd-provisioner", RBDProvisionerSTSTemplatePath, tp)
if err != nil {
return errors.Wrapf(err, "failed to load rbd provisioner statefulset template")
}
} else {
rbdProvisionerDeployment, err = templateToDeployment("rbd-provisioner", RBDProvisionerDepTemplatePath, tp)
if err != nil {
return errors.Wrapf(err, "failed to load rbd provisioner deployment template")
}
}
rbdService, err = templateToService("rbd-service", DefaultRBDPluginServiceTemplatePath, tp)
if err != nil {
return errors.Wrapf(err, "failed to load rbd plugin service template")
}
}
if EnableCephFS {
cephfsPlugin, err = templateToDaemonSet("cephfsplugin", CephFSPluginTemplatePath, tp)
if err != nil {
return errors.Wrapf(err, "failed to load CephFS plugin template")
}
if deployProvSTS {
cephfsProvisionerSTS, err = templateToStatefulSet("cephfs-provisioner", CephFSProvisionerSTSTemplatePath, tp)
if err != nil {
return errors.Wrapf(err, "failed to load CephFS provisioner statefulset template")
}
} else {
cephfsProvisionerDeployment, err = templateToDeployment("cephfs-provisioner", CephFSProvisionerDepTemplatePath, tp)
if err != nil {
return errors.Wrapf(err, "failed to load rbd provisioner deployment template")
}
}
cephfsService, err = templateToService("cephfs-service", DefaultCephFSPluginServiceTemplatePath, tp)
if err != nil {
return errors.Wrapf(err, "failed to load cephfs plugin service template")
}
}
// get provisioner toleration and node affinity
provisionerTolerations := getToleration(true)
provisionerNodeAffinity := getNodeAffinity(true)
// get plugin toleration and node affinity
pluginTolerations := getToleration(false)
pluginNodeAffinity := getNodeAffinity(false)
if rbdPlugin != nil {
applyToPodSpec(&rbdPlugin.Spec.Template.Spec, pluginNodeAffinity, pluginTolerations)
err = k8sutil.CreateDaemonSet("csi-rbdplugin", namespace, clientset, rbdPlugin)
if err != nil {
return errors.Wrapf(err, "failed to start rbdplugin daemonset: %+v", rbdPlugin)
}
k8sutil.AddRookVersionLabelToDaemonSet(rbdPlugin)
}
if rbdProvisionerSTS != nil {
applyToPodSpec(&rbdProvisionerSTS.Spec.Template.Spec, provisionerNodeAffinity, provisionerTolerations)
err = k8sutil.CreateStatefulSet("csi-rbdplugin-provisioner", namespace, clientset, rbdProvisionerSTS)
if err != nil {
return errors.Wrapf(err, "failed to start rbd provisioner statefulset: %+v", rbdProvisionerSTS)
}
k8sutil.AddRookVersionLabelToStatefulSet(rbdProvisionerSTS)
} else if rbdProvisionerDeployment != nil {
applyToPodSpec(&rbdProvisionerDeployment.Spec.Template.Spec, provisionerNodeAffinity, provisionerTolerations)
err = k8sutil.CreateDeployment("csi-rbdplugin-provisioner", namespace, clientset, rbdProvisionerDeployment)
if err != nil {
return errors.Wrapf(err, "failed to start rbd provisioner deployment: %+v", rbdProvisionerDeployment)
}
k8sutil.AddRookVersionLabelToDeployment(rbdProvisionerDeployment)
}
if rbdService != nil {
_, err = k8sutil.CreateOrUpdateService(clientset, namespace, rbdService)
if err != nil {
return errors.Wrapf(err, "failed to create rbd service: %+v", rbdService)
}
}
if cephfsPlugin != nil {
applyToPodSpec(&cephfsPlugin.Spec.Template.Spec, pluginNodeAffinity, pluginTolerations)
err = k8sutil.CreateDaemonSet("csi-cephfsplugin", namespace, clientset, cephfsPlugin)
if err != nil {
return errors.Wrapf(err, "failed to start cephfs plugin daemonset: %+v", cephfsPlugin)
}
k8sutil.AddRookVersionLabelToDaemonSet(cephfsPlugin)
}
if cephfsProvisionerSTS != nil {
applyToPodSpec(&cephfsProvisionerSTS.Spec.Template.Spec, provisionerNodeAffinity, provisionerTolerations)
err = k8sutil.CreateStatefulSet("csi-cephfsplugin-provisioner", namespace, clientset, cephfsProvisionerSTS)
if err != nil {
return errors.Wrapf(err, "failed to start cephfs provisioner statefulset: %+v", cephfsProvisionerSTS)
}
k8sutil.AddRookVersionLabelToStatefulSet(cephfsProvisionerSTS)
} else if cephfsProvisionerDeployment != nil {
applyToPodSpec(&cephfsProvisionerDeployment.Spec.Template.Spec, provisionerNodeAffinity, provisionerTolerations)
err = k8sutil.CreateDeployment("csi-cephfsplugin-provisioner", namespace, clientset, cephfsProvisionerDeployment)
if err != nil {
return errors.Wrapf(err, "failed to start cephfs provisioner deployment: %+v", cephfsProvisionerDeployment)
}
k8sutil.AddRookVersionLabelToDeployment(cephfsProvisionerDeployment)
}
if cephfsService != nil {
_, err = k8sutil.CreateOrUpdateService(clientset, namespace, cephfsService)
if err != nil {
return errors.Wrapf(err, "failed to create rbd service: %+v", cephfsService)
}
}
if ver.Major > KubeMinMajor || (ver.Major == KubeMinMajor && ver.Minor >= provDeploymentSuppVersion) {
err = createCSIDriverInfo(clientset, RBDDriverName)
if err != nil {
return errors.Wrapf(err, "failed to create CSI driver object for %q", RBDDriverName)
}
err = createCSIDriverInfo(clientset, CephFSDriverName)
if err != nil {
return errors.Wrapf(err, "failed to create CSI driver object for %q", CephFSDriverName)
}
}
return nil
}
func StopCSIDrivers(namespace string, clientset kubernetes.Interface) error {
logger.Warningf("Skipped removing the CSI driver")
// FIX: Restore the ownerRefs for all CSI resources in order to delegate entirely to the ConfigMap deletion.
return DeleteCsiConfigMap(namespace, clientset)
}
// createCSIDriverInfo Registers CSI driver by creating a CSIDriver object
func createCSIDriverInfo(clientset kubernetes.Interface, name string) error {
attach := true
mountInfo := false
// Create CSIDriver object
csiDriver := &k8scsi.CSIDriver{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: k8scsi.CSIDriverSpec{
AttachRequired: &attach,
PodInfoOnMount: &mountInfo,
},
}
csidrivers := clientset.StorageV1beta1().CSIDrivers()
_, err := csidrivers.Create(csiDriver)
if err == nil {
logger.Infof("CSIDriver object created for driver %q", name)
return nil
}
if apierrors.IsAlreadyExists(err) {
logger.Infof("CSIDriver CRD already had been registered for %q", name)
return nil
}
return err
}
|
[
"\"CSI_FORCE_CEPHFS_KERNEL_CLIENT\"",
"\"CSI_ENABLE_SNAPSHOTTER\"",
"\"CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY\"",
"\"CSI_RBD_PLUGIN_UPDATE_STRATEGY\"",
"\"ROOK_CSI_RESIZER_IMAGE\"",
"\"CSI_LOG_LEVEL\""
] |
[] |
[
"CSI_LOG_LEVEL",
"CSI_FORCE_CEPHFS_KERNEL_CLIENT",
"ROOK_CSI_RESIZER_IMAGE",
"CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY",
"CSI_ENABLE_SNAPSHOTTER",
"CSI_RBD_PLUGIN_UPDATE_STRATEGY"
] |
[]
|
["CSI_LOG_LEVEL", "CSI_FORCE_CEPHFS_KERNEL_CLIENT", "ROOK_CSI_RESIZER_IMAGE", "CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY", "CSI_ENABLE_SNAPSHOTTER", "CSI_RBD_PLUGIN_UPDATE_STRATEGY"]
|
go
| 6 | 0 | |
cmd/authelia/main.go
|
package main
import (
"errors"
"fmt"
"log"
"os"
"github.com/clems4ever/authelia/internal/authentication"
"github.com/clems4ever/authelia/internal/authorization"
"github.com/clems4ever/authelia/internal/configuration"
"github.com/clems4ever/authelia/internal/logging"
"github.com/clems4ever/authelia/internal/middlewares"
"github.com/clems4ever/authelia/internal/notification"
"github.com/clems4ever/authelia/internal/regulation"
"github.com/clems4ever/authelia/internal/server"
"github.com/clems4ever/authelia/internal/session"
"github.com/clems4ever/authelia/internal/storage"
"github.com/clems4ever/authelia/internal/utils"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var configPathFlag string
func startServer() {
if configPathFlag == "" {
log.Fatal(errors.New("No config file path provided"))
}
if os.Getenv("ENVIRONMENT") == "dev" {
logging.Logger().Info("===> Authelia is running in development mode. <===")
}
config, errs := configuration.Read(configPathFlag)
if len(errs) > 0 {
for _, err := range errs {
logging.Logger().Error(err)
}
panic(errors.New("Some errors have been reported"))
}
switch config.LogsLevel {
case "info":
logging.Logger().Info("Logging severity set to info")
logging.SetLevel(logrus.InfoLevel)
break
case "debug":
logging.Logger().Info("Logging severity set to debug")
logging.SetLevel(logrus.DebugLevel)
break
case "trace":
logging.Logger().Info("Logging severity set to trace")
logging.SetLevel(logrus.TraceLevel)
}
var userProvider authentication.UserProvider
if config.AuthenticationBackend.File != nil {
userProvider = authentication.NewFileUserProvider(config.AuthenticationBackend.File.Path)
} else if config.AuthenticationBackend.Ldap != nil {
userProvider = authentication.NewLDAPUserProvider(*config.AuthenticationBackend.Ldap)
} else {
log.Fatalf("Unrecognized authentication backend")
}
var storageProvider storage.Provider
if config.Storage.PostgreSQL != nil {
storageProvider = storage.NewPostgreSQLProvider(*config.Storage.PostgreSQL)
} else if config.Storage.MySQL != nil {
storageProvider = storage.NewMySQLProvider(*config.Storage.MySQL)
} else if config.Storage.Local != nil {
storageProvider = storage.NewSQLiteProvider(config.Storage.Local.Path)
} else {
log.Fatalf("Unrecognized storage backend")
}
var notifier notification.Notifier
if config.Notifier.SMTP != nil {
notifier = notification.NewSMTPNotifier(*config.Notifier.SMTP)
} else if config.Notifier.FileSystem != nil {
notifier = notification.NewFileNotifier(*config.Notifier.FileSystem)
} else {
log.Fatalf("Unrecognized notifier")
}
clock := utils.RealClock{}
authorizer := authorization.NewAuthorizer(*config.AccessControl)
sessionProvider := session.NewProvider(config.Session)
regulator := regulation.NewRegulator(config.Regulation, storageProvider, clock)
providers := middlewares.Providers{
Authorizer: authorizer,
UserProvider: userProvider,
Regulator: regulator,
StorageProvider: storageProvider,
Notifier: notifier,
SessionProvider: sessionProvider,
}
server.StartServer(*config, providers)
}
func main() {
rootCmd := &cobra.Command{
Use: "authelia",
Run: func(cmd *cobra.Command, args []string) {
startServer()
},
}
rootCmd.Flags().StringVar(&configPathFlag, "config", "", "Configuration file")
versionCmd := &cobra.Command{
Use: "version",
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("build git tag: %s\n", BuildTag)
fmt.Printf("build git commit: %s\n", BuildCommit)
fmt.Printf("build time: %s\n", BuildTime)
},
}
rootCmd.AddCommand(versionCmd)
rootCmd.Execute()
}
|
[
"\"ENVIRONMENT\""
] |
[] |
[
"ENVIRONMENT"
] |
[]
|
["ENVIRONMENT"]
|
go
| 1 | 0 | |
dandi/tests/test_utils.py
|
import inspect
import os
import os.path as op
import time
import pytest
import requests
import responses
from semantic_version import Version
from .. import __version__
from ..consts import DandiInstance, known_instances
from ..exceptions import BadCliVersionError, CliVersionTooOldError
from ..utils import (
ensure_datetime,
ensure_strtime,
find_files,
flatten,
flattened,
get_instance,
get_mime_type,
get_module_version,
get_utcnow_datetime,
is_same_time,
on_windows,
remap_dict,
)
def test_find_files():
tests_dir = op.dirname(__file__)
proj_dir = op.normpath(op.join(op.dirname(__file__), op.pardir))
ff = find_files(".*", proj_dir)
assert inspect.isgenerator(ff)
files = list(ff)
assert len(files) > 3 # we have more than 3 test files here
assert op.join(tests_dir, "test_utils.py") in files
# and no directories should be mentioned
assert tests_dir not in files
ff2 = find_files(".*", proj_dir, dirs=True)
files2 = list(ff2)
assert op.join(tests_dir, "test_utils.py") in files2
assert tests_dir in files2
# now actually matching the path
ff3 = find_files(
r".*\\test_.*\.py$" if on_windows else r".*/test_.*\.py$", proj_dir, dirs=True
)
files3 = list(ff3)
assert op.join(tests_dir, "test_utils.py") in files3
assert tests_dir not in files3
for f in files3:
assert op.basename(f).startswith("test_")
# TODO: more tests
def test_find_files_dotfiles(tmpdir):
tmpsubdir = tmpdir.mkdir("subdir")
for p in (".dot.nwb", "regular", ".git"):
for f in (tmpdir / p, tmpsubdir / p):
f.write_text("", "utf-8")
def relpaths(paths):
return sorted(op.relpath(p, tmpdir) for p in paths)
regular = ["regular", op.join("subdir", "regular")]
dotfiles = [".dot.nwb", op.join("subdir", ".dot.nwb")]
vcs = [".git", op.join("subdir", ".git")]
ff = find_files(".*", tmpdir)
assert relpaths(ff) == regular
ff = find_files(".*", tmpdir, exclude_dotfiles=False)
# we still exclude VCS
assert relpaths(ff) == sorted(regular + dotfiles)
# current VCS are also dot files
ff = find_files(".*", tmpdir, exclude_vcs=False)
assert relpaths(ff) == regular
# current VCS are also dot files
ff = find_files(".*", tmpdir, exclude_vcs=False, exclude_dotfiles=False)
assert relpaths(ff) == sorted(regular + dotfiles + vcs)
def test_times_manipulations():
t0 = get_utcnow_datetime()
t0_isoformat = ensure_strtime(t0)
t0_str = ensure_strtime(t0, isoformat=False)
assert t0 == ensure_datetime(t0)
assert isinstance(t0_isoformat, str)
# Test comparison and round-trips
assert is_same_time(t0, t0_isoformat, t0_str)
assert is_same_time(t0, t0_str)
assert is_same_time(t0, t0_str, tolerance=0) # exactly the same
assert t0_str != t0_isoformat # " " vs "T"
time.sleep(0.001) # so there is a definite notable delay, in particular for Windows
t1_epoch = time.time()
t1 = ensure_datetime(t1_epoch)
assert is_same_time(t1, t1_epoch)
# We must not consume more than half a second between start of this test
# and here
assert is_same_time(t0, t1, tolerance=0.5)
assert is_same_time(t1, t0, tolerance=0.5)
# but must not be exactly the same unless we are way too fast or disregard
# milliseconds
assert not is_same_time(t0, t1, tolerance=0)
assert is_same_time(t0, t1_epoch + 100, tolerance=101)
@pytest.mark.parametrize(
"t", ["2018-09-26 17:29:17.000000-07:00", "2018-09-26 17:29:17-07:00"]
)
def test_time_samples(t):
assert is_same_time(
ensure_datetime(t), "2018-09-27 00:29:17-00:00", tolerance=0
) # exactly the same
def test_flatten():
assert inspect.isgenerator(flatten([1]))
# flattened is just a list() around flatten
assert flattened([1, [2, 3, [4]], 5, (i for i in range(2))]) == [
1,
2,
3,
4,
5,
0,
1,
]
@pytest.mark.parametrize(
"from_,revmapping,to",
[
({"1": 2}, {"1": "1"}, {"1": 2}),
({1: 2}, {(1,): [1]}, {1: 2}), # if path must not be string, use list or tuple
(
{1: 2},
{"sub.key": (1,)},
{"sub": {"key": 2}},
), # if path must not be string, use list or tuple
(
{1: 2, "a": {"b": [1]}},
{"sub.key": (1,), "sub.key2.blah": "a.b"},
{"sub": {"key": 2, "key2": {"blah": [1]}}},
),
],
)
def test_remap_dict(from_, revmapping, to):
assert remap_dict(from_, revmapping) == to
redirector_base = known_instances["dandi"].redirector
@responses.activate
def test_get_instance_dandi_with_api():
responses.add(
responses.GET,
f"{redirector_base}/server-info",
json={
"version": "1.0.0",
"cli-minimal-version": "0.5.0",
"cli-bad-versions": [],
"services": {
"webui": {"url": "https://gui.dandi"},
"api": {"url": "https://api.dandi"},
"jupyterhub": {"url": "https://hub.dandi"},
},
},
)
assert get_instance("dandi") == DandiInstance(
gui="https://gui.dandi",
redirector=redirector_base,
api="https://api.dandi",
)
@responses.activate
def test_get_instance_url():
responses.add(
responses.GET,
"https://example.dandi/server-info",
json={
"version": "1.0.0",
"cli-minimal-version": "0.5.0",
"cli-bad-versions": [],
"services": {
"webui": {"url": "https://gui.dandi"},
"api": {"url": "https://api.dandi"},
"jupyterhub": {"url": "https://hub.dandi"},
},
},
)
assert get_instance("https://example.dandi/") == DandiInstance(
gui="https://gui.dandi",
redirector="https://example.dandi/",
api="https://api.dandi",
)
@responses.activate
def test_get_instance_cli_version_too_old():
responses.add(
responses.GET,
"https://example.dandi/server-info",
json={
"version": "1.0.0",
"cli-minimal-version": "99.99.99",
"cli-bad-versions": [],
"services": {
"webui": {"url": "https://gui.dandi"},
"api": {"url": "https://api.dandi"},
"jupyterhub": {"url": "https://hub.dandi"},
},
},
)
with pytest.raises(CliVersionTooOldError) as excinfo:
get_instance("https://example.dandi/")
assert str(excinfo.value) == (
f"Client version {__version__} is too old!"
" Server requires at least version 99.99.99"
)
@responses.activate
def test_get_instance_bad_cli_version():
responses.add(
responses.GET,
"https://example.dandi/server-info",
json={
"version": "1.0.0",
"cli-minimal-version": "0.5.0",
"cli-bad-versions": [__version__],
"services": {
"webui": {"url": "https://gui.dandi"},
"api": {"url": "https://api.dandi"},
"jupyterhub": {"url": "https://hub.dandi"},
},
},
)
with pytest.raises(BadCliVersionError) as excinfo:
get_instance("https://example.dandi/")
assert str(excinfo.value) == (
f"Client version {__version__} is rejected by server!"
f" Server requires at least version 0.5.0 (but not {__version__})"
)
@responses.activate
def test_get_instance_id_bad_response():
responses.add(
responses.GET,
f"{redirector_base}/server-info",
body="404 -- not found",
status=404,
)
assert get_instance("dandi") is known_instances["dandi"]
@responses.activate
def test_get_instance_known_url_bad_response():
responses.add(
responses.GET,
f"{redirector_base}/server-info",
body="404 -- not found",
status=404,
)
assert get_instance(redirector_base) is known_instances["dandi"]
@responses.activate
def test_get_instance_unknown_url_bad_response():
responses.add(
responses.GET,
"https://dandi.nil/server-info",
body="404 -- not found",
status=404,
)
with pytest.raises(RuntimeError) as excinfo:
get_instance("https://dandi.nil")
assert str(excinfo.value) == (
"Could not retrieve server info from https://dandi.nil,"
" and client does not recognize URL"
)
@responses.activate
def test_get_instance_bad_version_from_server():
responses.add(
responses.GET,
"https://example.dandi/server-info",
json={
"version": "1.0.0",
"cli-minimal-version": "foobar",
"cli-bad-versions": [],
"services": {
"webui": {"url": "https://gui.dandi"},
"api": {"url": "https://api.dandi"},
"jupyterhub": {"url": "https://hub.dandi"},
},
},
)
with pytest.raises(ValueError) as excinfo:
get_instance("https://example.dandi/")
assert str(excinfo.value).startswith(
"https://example.dandi/ returned an incorrectly formatted version;"
" please contact that server's administrators: "
)
assert "foobar" in str(excinfo.value)
def test_get_instance_actual_dandi():
inst = get_instance("dandi")
assert inst.api is not None
if "DANDI_REDIRECTOR_BASE" in os.environ:
using_docker = pytest.mark.usefixtures("local_dandi_api")
else:
def using_docker(f):
return f
@pytest.mark.redirector
@using_docker
def test_server_info():
r = requests.get(f"{redirector_base}/server-info")
r.raise_for_status()
data = r.json()
assert "version" in data
assert Version(data["version"]) >= Version("1.2.0")
assert "cli-minimal-version" in data
assert "cli-bad-versions" in data
assert "services" in data
def test_get_module_version():
import pynwb
import dandi
assert get_module_version(dandi) == __version__
assert get_module_version("dandi") == __version__
assert get_module_version("pynwb") == pynwb.__version__
assert get_module_version("abracadabra123") is None
@pytest.mark.parametrize(
"filename,mtype",
[
("foo.txt", "text/plain"),
("foo", "application/octet-stream"),
("foo.gz", "application/gzip"),
("foo.tar.gz", "application/gzip"),
("foo.tgz", "application/gzip"),
("foo.taz", "application/gzip"),
("foo.svg.gz", "application/gzip"),
("foo.svgz", "application/gzip"),
("foo.Z", "application/x-compress"),
("foo.tar.Z", "application/x-compress"),
("foo.bz2", "application/x-bzip2"),
("foo.tar.bz2", "application/x-bzip2"),
("foo.tbz2", "application/x-bzip2"),
("foo.xz", "application/x-xz"),
("foo.tar.xz", "application/x-xz"),
("foo.txz", "application/x-xz"),
],
)
def test_get_mime_type(filename, mtype):
assert get_mime_type(filename) == mtype
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
influxdb/tests/__init__.py
|
# -*- coding: utf-8 -*-
"""Configure the tests package for InfluxDBClient."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
import unittest
using_pypy = hasattr(sys, "pypy_version_info")
skip_if_pypy = unittest.skipIf(using_pypy, "Skipping this test on pypy.")
_skip_server_tests = os.environ.get(
'INFLUXDB_PYTHON_SKIP_SERVER_TESTS',
None) == 'True'
skip_server_tests = unittest.skipIf(_skip_server_tests,
"Skipping server tests...")
|
[] |
[] |
[
"INFLUXDB_PYTHON_SKIP_SERVER_TESTS"
] |
[]
|
["INFLUXDB_PYTHON_SKIP_SERVER_TESTS"]
|
python
| 1 | 0 | |
configure.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""configure script to get build parameters from user."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import os
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
_DEFAULT_CUDA_VERSION = '10.2'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_TENSORRT_VERSION = '6'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_TF_OPENCL_VERSION = '1.2'
_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
_SUPPORTED_ANDROID_NDK_VERSIONS = [10, 11, 12, 13, 14, 15, 16, 17, 18]
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_BAZELRC_FILENAME = '.tf_configure.bazelrc'
_TF_WORKSPACE_ROOT = ''
_TF_BAZELRC = ''
_TF_CURRENT_BAZEL_VERSION = None
_TF_MIN_BAZEL_VERSION = '0.27.1'
_TF_MAX_BAZEL_VERSION = '0.29.1'
NCCL_LIB_PATHS = [
'lib64/', 'lib/powerpc64le-linux-gnu/', 'lib/x86_64-linux-gnu/', ''
]
# List of files to configure when building Bazel on Apple platforms.
APPLE_BAZEL_FILES = [
'tensorflow/lite/experimental/ios/BUILD',
'tensorflow/lite/experimental/objc/BUILD',
'tensorflow/lite/experimental/swift/BUILD'
]
# List of files to move when building for iOS.
IOS_FILES = [
'tensorflow/lite/experimental/objc/TensorFlowLiteObjC.podspec',
'tensorflow/lite/experimental/swift/TensorFlowLiteSwift.podspec',
]
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
Args:
target: items to link to.
link_name: name of the link.
"""
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
"""Replace old string with new string in file.
Args:
filename: string for filename.
old: string to replace.
new: new string to replace to.
"""
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
def run_shell(cmd, allow_non_zero=False):
if allow_non_zero:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd)
return output.decode('UTF-8').strip()
def cygpath(path):
"""Convert path from posix to windows."""
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
]).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(environ_cp,
'PYTHON_BIN_PATH',
ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [%s]\n' % python_lib_paths[0])
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
python_major_version = get_python_major_version(python_bin_path)
if python_major_version == '2':
write_to_bazelrc('build --host_force_python=PY2')
# Convert python path to Windows style before writing into bazel.rc
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"%s"' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# If choosen python_lib_path is from a path specified in the PYTHONPATH
# variable, need to tell bazel to include PYTHONPATH
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
if python_lib_path in python_paths:
write_action_env_to_bazelrc('PYTHONPATH', environ_cp.get('PYTHONPATH'))
# Write tools/python_bin_path.sh
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
def reset_tf_configure_bazelrc():
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
These files could interfere with Bazel parsing.
"""
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build TensorFlow with %s support?' % query_item
if not yes_reply:
yes_reply = '%s support will be enabled for TensorFlow.' % query_item
if not no_reply:
no_reply = 'No %s' % yes_reply
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' %
(var_name, ', '.join(true_strings), ', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: %s' % user_input_origin)
return var
def set_build_var(environ_cp,
var_name,
query_item,
option_name,
enabled_by_default,
bazel_config_name=None):
"""Set if query_item will be enabled for the build.
Ask user if query_item will be enabled. Default is used if no input is given.
Set subprocess environment variable and write to .bazelrc if enabled.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
option_name: string for option to define in .bazelrc.
enabled_by_default: boolean for default behavior.
bazel_config_name: Name for Bazel --config argument to enable build feature.
"""
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
write_to_bazelrc('build --config=%s' % bazel_config_name)
elif bazel_config_name is not None:
# TODO(mikecase): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None,
bazel_config_name=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
bazel_config_name: adding config to .bazelrc instead of action_env.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
if not bazel_config_name:
write_action_env_to_bazelrc(var_name, var)
elif var:
write_to_bazelrc('build --config=%s' % bazel_config_name)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
# Treat "0.24" as "0.24.0"
if len(version_segments) == 2:
version_segments.append('0')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version, max_version):
"""Check installed bazel version is between min_version and max_version.
Args:
min_version: string for minimum bazel version (must exist!).
max_version: string for maximum bazel version (must exist!).
Returns:
The bazel version detected.
"""
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(0)
curr_version = run_shell(
['bazel', '--batch', '--bazelrc=/dev/null', 'version'])
for line in curr_version.split('\n'):
if 'Build label: ' in line:
curr_version = line.split('Build label: ')[1]
break
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
max_version_int = convert_version_to_int(max_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(1)
if (curr_version_int > max_version_int and
'TF_IGNORE_MAX_BAZEL_VERSION' not in os.environ):
print('Please downgrade your bazel installation to version %s or lower to '
'build TensorFlow! To downgrade: download the installer for the old '
'version (from https://github.com/bazelbuild/bazel/releases) then '
'run the installer.' % max_version)
sys.exit(1)
return curr_version
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
Also append CC optimization flags to bazel.rc..
Args:
environ_cp: copy of the os.environ.
"""
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native -Wno-sign-compare'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
# It should be safe on the same build host.
if not is_ppc64le() and not is_windows():
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
Args:
environ_cp: copy of the os.environ.
"""
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='cuda_clang')
def set_tf_download_clang(environ_cp):
"""Set TF_DOWNLOAD_CLANG action_env."""
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='download_clang')
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
# Reset and retry
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
# Set CLANG_CUDA_COMPILER_PATH
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
resolve_symlinks=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
resolve_symlinks: (Bool) Translate symbolic links into the real filepath.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError('Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' %
(var_name, n_ask_attempts))
if resolve_symlinks and os.path.islink(val):
val = os.path.realpath(val)
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' %
environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc(
'ANDROID_NDK_API_LEVEL',
get_ndk_api_level(environ_cp, android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
"""Set Android variables and write Android SDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def get_ndk_api_level(environ_cp, android_ndk_home_path):
"""Gets the appropriate NDK API level to use for the provided Android NDK path."""
# First check to see if we're using a blessed version of the NDK.
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_version = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_version) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The NDK version in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' %
(android_ndk_home_path, ndk_version, _SUPPORTED_ANDROID_NDK_VERSIONS))
# Now grab the NDK API level to use. Note that this is different from the
# SDK API level, as the NDK API level is effectively the *min* target SDK
# version.
platforms = os.path.join(android_ndk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [
x.replace('android-', '') for x in api_levels if 'android-' in x
]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_ndk_home_path, 'platforms',
'android-' + api_level))
android_ndk_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_API_LEVEL',
var_default='21', # 21 is required for ARM64 support.
ask_for_var=('Please specify the (min) Android NDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the NDK path.')
return android_ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
# os.readlink is only available in linux
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var='Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
resolve_symlinks=True,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
"""Reformat the version string to have the given number of sequences.
For example:
Given (7, 2) -> 7.0
(7.0.1, 2) -> 7.0
(5, 1) -> 5
(5.0.3.2, 1) -> 5
Args:
version_str: String, the version string.
sequence_count: int, an integer.
Returns:
string, reformatted version string.
"""
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_tf_cuda_paths(environ_cp):
"""Set TF_CUDA_PATHS."""
ask_cuda_paths = (
'Please specify the comma-separated list of base paths to look for CUDA '
'libraries and headers. [Leave empty to use the default]: ')
tf_cuda_paths = get_from_env_or_user_or_default(environ_cp, 'TF_CUDA_PATHS',
ask_cuda_paths, '')
if tf_cuda_paths:
environ_cp['TF_CUDA_PATHS'] = tf_cuda_paths
def set_tf_cuda_version(environ_cp):
"""Set TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
tf_cuda_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDA_VERSION',
ask_cuda_version,
_DEFAULT_CUDA_VERSION)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
def set_tf_cudnn_version(environ_cp):
"""Set TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s]: ') % _DEFAULT_CUDNN_VERSION
tf_cudnn_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDNN_VERSION',
ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def set_tf_tensorrt_version(environ_cp):
"""Set TF_TENSORRT_VERSION."""
if not is_linux():
raise ValueError('Currently TensorRT is only supported on Linux platform.')
if not int(environ_cp.get('TF_NEED_TENSORRT', False)):
return
ask_tensorrt_version = (
'Please specify the TensorRT version you want to use. '
'[Leave empty to default to TensorRT %s]: ') % _DEFAULT_TENSORRT_VERSION
tf_tensorrt_version = get_from_env_or_user_or_default(
environ_cp, 'TF_TENSORRT_VERSION', ask_tensorrt_version,
_DEFAULT_TENSORRT_VERSION)
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
def set_tf_nccl_version(environ_cp):
"""Set TF_NCCL_VERSION."""
if not is_linux():
raise ValueError('Currently NCCL is only supported on Linux platform.')
if 'TF_NCCL_VERSION' in environ_cp:
return
ask_nccl_version = (
'Please specify the locally installed NCCL version you want to use. '
'[Leave empty to use http://github.com/nvidia/nccl]: ')
tf_nccl_version = get_from_env_or_user_or_default(environ_cp,
'TF_NCCL_VERSION',
ask_nccl_version, '')
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
Args:
environ_cp: copy of the os.environ.
Returns:
string of native cuda compute capabilities, separated by comma.
"""
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
"""Set TF_CUDA_COMPUTE_CAPABILITIES."""
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated '
'CUDA compute capabilities you want to '
'build with.\nYou can find the compute '
'capability of your device at: '
'https://developer.nvidia.com/cuda-gpus.\nPlease'
' note that each additional compute '
'capability significantly increases your '
'build time and binary size, and that '
'TensorFlow only supports compute '
'capabilities >= 3.5 [Default is: %s]: ' %
default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
print('Invalid compute capability: %s' % compute_capability)
all_valid = False
else:
ver = float(m.group(0))
if ver < 3.0:
print('ERROR: TensorFlow only supports CUDA compute capabilities 3.0 '
'and higher. Please re-specify the list of compute '
'capabilities excluding version %s.' % ver)
all_valid = False
if ver < 3.5:
print('WARNING: XLA does not support CUDA compute capabilities '
'lower than 3.5. Disable XLA when running on older GPUs.')
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
def set_host_cxx_compiler(environ_cp):
"""Set HOST_CXX_COMPILER."""
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
"""Set HOST_C_COMPILER."""
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host '
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def set_computecpp_toolkit_path(environ_cp):
"""Set COMPUTECPP_TOOLKIT_PATH."""
def toolkit_exists(toolkit_path):
"""Check if a computecpp toolkit path is valid."""
if is_linux():
sycl_rt_lib_path = 'lib/libComputeCpp.so'
else:
sycl_rt_lib_path = ''
sycl_rt_lib_path_full = os.path.join(toolkit_path, sycl_rt_lib_path)
exists = os.path.exists(sycl_rt_lib_path_full)
if not exists:
print('Invalid SYCL %s library path. %s cannot be found' %
(_TF_OPENCL_VERSION, sycl_rt_lib_path_full))
return exists
computecpp_toolkit_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='COMPUTECPP_TOOLKIT_PATH',
var_default=_DEFAULT_COMPUTECPP_TOOLKIT_PATH,
ask_for_var=(
'Please specify the location where ComputeCpp for SYCL %s is '
'installed.' % _TF_OPENCL_VERSION),
check_success=toolkit_exists,
error_msg='Invalid SYCL compiler path. %s cannot be found.',
suppress_default_error=True)
write_action_env_to_bazelrc('COMPUTECPP_TOOLKIT_PATH',
computecpp_toolkit_path)
def set_trisycl_include_dir(environ_cp):
"""Set TRISYCL_INCLUDE_DIR."""
ask_trisycl_include_dir = ('Please specify the location of the triSYCL '
'include directory. (Use --config=sycl_trisycl '
'when building with Bazel) '
'[Default is %s]: ') % (
_DEFAULT_TRISYCL_INCLUDE_DIR)
while True:
trisycl_include_dir = get_from_env_or_user_or_default(
environ_cp, 'TRISYCL_INCLUDE_DIR', ask_trisycl_include_dir,
_DEFAULT_TRISYCL_INCLUDE_DIR)
if os.path.exists(trisycl_include_dir):
break
print('Invalid triSYCL include directory, %s cannot be found' %
(trisycl_include_dir))
# Set TRISYCL_INCLUDE_DIR
environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir
write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', trisycl_include_dir)
def system_specific_test_config(env):
"""Add default build and test flags required for TF tests to bazelrc."""
write_to_bazelrc('test --flaky_test_attempts=3')
write_to_bazelrc('test --test_size_filters=small,medium')
write_to_bazelrc(
'test --test_tag_filters=-benchmark-test,-no_oss,-oss_serial')
write_to_bazelrc('test --build_tag_filters=-benchmark-test,-no_oss')
if is_windows():
if env.get('TF_NEED_CUDA', None) == '1':
write_to_bazelrc(
'test --test_tag_filters=-no_windows,-no_windows_gpu,-no_gpu')
write_to_bazelrc(
'test --build_tag_filters=-no_windows,-no_windows_gpu,-no_gpu')
else:
write_to_bazelrc('test --test_tag_filters=-no_windows,-gpu')
write_to_bazelrc('test --build_tag_filters=-no_windows,-gpu')
elif is_macos():
write_to_bazelrc('test --test_tag_filters=-gpu,-nomac,-no_mac')
write_to_bazelrc('test --build_tag_filters=-gpu,-nomac,-no_mac')
elif is_linux():
if ((env.get('TF_NEED_CUDA', None) == '1') or
(env.get('TF_NEED_ROCM', None) == '1')):
write_to_bazelrc('test --test_tag_filters=-no_gpu')
write_to_bazelrc('test --build_tag_filters=-no_gpu')
write_to_bazelrc('test --test_env=LD_LIBRARY_PATH')
else:
write_to_bazelrc('test --test_tag_filters=-gpu')
write_to_bazelrc('test --build_tag_filters=-gpu')
def set_system_libs_flag(environ_cp):
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if syslibs:
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
if 'PREFIX' in environ_cp:
write_to_bazelrc('build --define=PREFIX=%s' % environ_cp['PREFIX'])
if 'LIBDIR' in environ_cp:
write_to_bazelrc('build --define=LIBDIR=%s' % environ_cp['LIBDIR'])
if 'INCLUDEDIR' in environ_cp:
write_to_bazelrc('build --define=INCLUDEDIR=%s' % environ_cp['INCLUDEDIR'])
def is_reduced_optimize_huge_functions_available(environ_cp):
"""Check to see if the system supports /d2ReducedOptimizeHugeFunctions.
The above compiler flag is a new compiler flag introduced to the Visual Studio
compiler in version 16.4 (available in Visual Studio 2019, Preview edition
only, as of 2019-11-19). TensorFlow needs this flag to massively reduce
compile times, but until 16.4 is officially released, we can't depend on it.
See also https://groups.google.com/a/tensorflow.org/g/build/c/SsW98Eo7l3o
Because it's very annoying to check this manually (to check the MSVC installed
versions, you need to use the registry, and it's not clear if Bazel will be
using that install version anyway), we expect enviroments who know they may
use this flag to export TF_VC_VERSION=16.4
TODO(angerson, gunan): Remove this function when TensorFlow's minimum VS
version is upgraded to 16.4.
Arguments:
environ_cp: Environment of the current execution
Returns:
boolean, whether or not /d2ReducedOptimizeHugeFunctions is available on this
machine.
"""
return float(environ_cp.get('TF_VC_VERSION', '0')) >= 16.4
def set_windows_build_flags(environ_cp):
"""Set Windows specific build options."""
# The non-monolithic build is not supported yet
write_to_bazelrc('build --config monolithic')
# Suppress warning messages
write_to_bazelrc('build --copt=-w --host_copt=-w')
# Fix winsock2.h conflicts
write_to_bazelrc(
'build --copt=-DWIN32_LEAN_AND_MEAN --host_copt=-DWIN32_LEAN_AND_MEAN '
'--copt=-DNOGDI --host_copt=-DNOGDI --copt=-D_USE_MATH_DEFINES')
# Output more verbose information when something goes wrong
write_to_bazelrc('build --verbose_failures')
# The host and target platforms are the same in Windows build. So we don't
# have to distinct them. This avoids building the same targets twice.
write_to_bazelrc('build --distinct_host_configuration=false')
if is_reduced_optimize_huge_functions_available(environ_cp):
write_to_bazelrc(
'build --copt=/d2ReducedOptimizeHugeFunctions --host_copt=/d2ReducedOptimizeHugeFunctions'
)
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text))
def configure_ios():
"""Configures TensorFlow for iOS builds.
This function will only be executed if `is_macos()` is true.
"""
if not is_macos():
return
for filepath in APPLE_BAZEL_FILES:
existing_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath + '.apple')
renamed_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath)
symlink_force(existing_filepath, renamed_filepath)
for filepath in IOS_FILES:
filename = os.path.basename(filepath)
new_filepath = os.path.join(_TF_WORKSPACE_ROOT, filename)
symlink_force(filepath, new_filepath)
def validate_cuda_config(environ_cp):
"""Run find_cuda_config.py and return cuda_toolkit_path, or None."""
def maybe_encode_env(env):
"""Encodes unicode in env to str on Windows python 2.x."""
if not is_windows() or sys.version_info[0] != 2:
return env
for k, v in env.items():
if isinstance(k, unicode):
k = k.encode('ascii')
if isinstance(v, unicode):
v = v.encode('ascii')
env[k] = v
return env
cuda_libraries = ['cuda', 'cudnn']
if is_linux():
if int(environ_cp.get('TF_NEED_TENSORRT', False)):
cuda_libraries.append('tensorrt')
if environ_cp.get('TF_NCCL_VERSION', None):
cuda_libraries.append('nccl')
proc = subprocess.Popen(
[environ_cp['PYTHON_BIN_PATH'], 'third_party/gpus/find_cuda_config.py'] +
cuda_libraries,
stdout=subprocess.PIPE,
env=maybe_encode_env(environ_cp))
if proc.wait():
# Errors from find_cuda_config.py were sent to stderr.
print('Asking for detailed CUDA configuration...\n')
return False
config = dict(
tuple(line.decode('ascii').rstrip().split(': ')) for line in proc.stdout)
print('Found CUDA %s in:' % config['cuda_version'])
print(' %s' % config['cuda_library_dir'])
print(' %s' % config['cuda_include_dir'])
print('Found cuDNN %s in:' % config['cudnn_version'])
print(' %s' % config['cudnn_library_dir'])
print(' %s' % config['cudnn_include_dir'])
if 'tensorrt_version' in config:
print('Found TensorRT %s in:' % config['tensorrt_version'])
print(' %s' % config['tensorrt_library_dir'])
print(' %s' % config['tensorrt_include_dir'])
if config.get('nccl_version', None):
print('Found NCCL %s in:' % config['nccl_version'])
print(' %s' % config['nccl_library_dir'])
print(' %s' % config['nccl_include_dir'])
print('\n')
environ_cp['CUDA_TOOLKIT_PATH'] = config['cuda_toolkit_path']
return True
def main():
global _TF_WORKSPACE_ROOT
global _TF_BAZELRC
global _TF_CURRENT_BAZEL_VERSION
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=os.path.abspath(os.path.dirname(__file__)),
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
_TF_WORKSPACE_ROOT = args.workspace
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
current_bazel_version = check_bazel_version(_TF_MIN_BAZEL_VERSION,
_TF_MAX_BAZEL_VERSION)
_TF_CURRENT_BAZEL_VERSION = convert_version_to_int(current_bazel_version)
reset_tf_configure_bazelrc()
cleanup_makefile()
setup_python(environ_cp)
if is_windows():
environ_cp['TF_NEED_OPENCL_SYCL'] = '0'
environ_cp['TF_NEED_COMPUTECPP'] = '0'
environ_cp['TF_NEED_OPENCL'] = '0'
environ_cp['TF_CUDA_CLANG'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
environ_cp['TF_SET_ANDROID_WORKSPACE'] = '0'
if is_macos():
environ_cp['TF_NEED_TENSORRT'] = '0'
else:
environ_cp['TF_CONFIGURE_IOS'] = '0'
xla_enabled_by_default = is_linux() or is_macos()
set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support',
xla_enabled_by_default, 'xla')
set_action_env_var(
environ_cp,
'TF_NEED_OPENCL_SYCL',
'OpenCL SYCL',
False,
bazel_config_name='sycl')
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
set_host_cxx_compiler(environ_cp)
set_host_c_compiler(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_COMPUTECPP', 'ComputeCPP', True)
if environ_cp.get('TF_NEED_COMPUTECPP') == '1':
set_computecpp_toolkit_path(environ_cp)
else:
set_trisycl_include_dir(environ_cp)
set_action_env_var(
environ_cp, 'TF_NEED_ROCM', 'ROCm', False, bazel_config_name='rocm')
if (environ_cp.get('TF_NEED_ROCM') == '1' and
'LD_LIBRARY_PATH' in environ_cp and
environ_cp.get('LD_LIBRARY_PATH') != '1'):
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
if (environ_cp.get('TF_NEED_ROCM') == '1' and environ_cp.get('ROCM_PATH')):
write_action_env_to_bazelrc('ROCM_PATH', environ_cp.get('ROCM_PATH'))
write_action_env_to_bazelrc('ROCM_ROOT', environ_cp.get('ROCM_PATH'))
environ_cp['TF_NEED_CUDA'] = str(
int(get_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)))
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
set_action_env_var(
environ_cp,
'TF_NEED_TENSORRT',
'TensorRT',
False,
bazel_config_name='tensorrt')
environ_save = dict(environ_cp)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
if validate_cuda_config(environ_cp):
cuda_env_names = [
'TF_CUDA_VERSION',
'TF_CUBLAS_VERSION',
'TF_CUDNN_VERSION',
'TF_TENSORRT_VERSION',
'TF_NCCL_VERSION',
'TF_CUDA_PATHS',
# Items below are for backwards compatibility when not using
# TF_CUDA_PATHS.
'CUDA_TOOLKIT_PATH',
'CUDNN_INSTALL_PATH',
'NCCL_INSTALL_PATH',
'NCCL_HDR_PATH',
'TENSORRT_INSTALL_PATH'
]
# Note: set_action_env_var above already writes to bazelrc.
for name in cuda_env_names:
if name in environ_cp:
write_action_env_to_bazelrc(name, environ_cp[name])
break
# Restore settings changed below if CUDA config could not be validated.
environ_cp = dict(environ_save)
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
set_tf_tensorrt_version(environ_cp)
set_tf_nccl_version(environ_cp)
set_tf_cuda_paths(environ_cp)
else:
raise UserInputError(
'Invalid CUDA setting were provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Ask whether we should download the clang toolchain.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
# SYCL / ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_ROCM') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('SYCL / CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', False,
('Would you like to interactively configure ./WORKSPACE for '
'Android builds?'), 'Searching for NDK and SDK installations.',
'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
system_specific_test_config(os.environ)
set_action_env_var(environ_cp, 'TF_CONFIGURE_IOS', 'iOS', False)
if environ_cp.get('TF_CONFIGURE_IOS') == '1':
configure_ios()
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See .bazelrc for more '
'details.')
config_info_line('mkl', 'Build with MKL support.')
config_info_line('monolithic', 'Config for mostly static monolithic build.')
config_info_line('ngraph', 'Build with Intel nGraph support.')
config_info_line('numa', 'Build with NUMA support.')
config_info_line(
'dynamic_kernels',
'(Experimental) Build kernels into separate shared objects.')
config_info_line('v2', 'Build TensorFlow 2.x instead of 1.x.')
print('Preconfigured Bazel build configs to DISABLE default on features:')
config_info_line('noaws', 'Disable AWS S3 filesystem support.')
config_info_line('nogcp', 'Disable GCP support.')
config_info_line('nohdfs', 'Disable HDFS support.')
config_info_line('nonccl', 'Disable NVIDIA NCCL support.')
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
connexion/decorators/security.py
|
# Authentication and authorization related decorators
import base64
import functools
import logging
import os
import textwrap
import requests
from six.moves import http_cookies
from connexion.utils import get_function_from_name
from ..exceptions import (ConnexionException, OAuthProblem,
OAuthResponseProblem, OAuthScopeProblem)
logger = logging.getLogger('connexion.api.security')
# use connection pool for OAuth tokeninfo
adapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)
session = requests.Session()
session.mount('http://', adapter)
session.mount('https://', adapter)
def get_tokeninfo_func(security_definition):
"""
:type security_definition: dict
:rtype: function
>>> get_tokeninfo_url({'x-tokenInfoFunc': 'foo.bar'})
'<function foo.bar>'
"""
token_info_func = (security_definition.get("x-tokenInfoFunc") or
os.environ.get('TOKENINFO_FUNC'))
if token_info_func:
return get_function_from_name(token_info_func)
token_info_url = (security_definition.get('x-tokenInfoUrl') or
os.environ.get('TOKENINFO_URL'))
if token_info_url:
return functools.partial(get_tokeninfo_remote, token_info_url)
return None
def get_scope_validate_func(security_definition):
"""
:type security_definition: dict
:rtype: function
>>> get_scope_validate_func({'x-scopeValidateFunc': 'foo.bar'})
'<function foo.bar>'
"""
func = (security_definition.get("x-scopeValidateFunc") or
os.environ.get('SCOPEVALIDATE_FUNC'))
if func:
return get_function_from_name(func)
return validate_scope
def get_basicinfo_func(security_definition):
"""
:type security_definition: dict
:rtype: function
>>> get_basicinfo_func({'x-basicInfoFunc': 'foo.bar'})
'<function foo.bar>'
"""
func = (security_definition.get("x-basicInfoFunc") or
os.environ.get('BASICINFO_FUNC'))
if func:
return get_function_from_name(func)
return None
def get_apikeyinfo_func(security_definition):
"""
:type security_definition: dict
:rtype: function
>>> get_apikeyinfo_func({'x-apikeyInfoFunc': 'foo.bar'})
'<function foo.bar>'
"""
func = (security_definition.get("x-apikeyInfoFunc") or
os.environ.get('APIKEYINFO_FUNC'))
if func:
return get_function_from_name(func)
return None
def get_bearerinfo_func(security_definition):
"""
:type security_definition: dict
:rtype: function
>>> get_bearerinfo_func({'x-bearerInfoFunc': 'foo.bar'})
'<function foo.bar>'
"""
func = (security_definition.get("x-bearerInfoFunc") or
os.environ.get('BEARERINFO_FUNC'))
if func:
return get_function_from_name(func)
return None
def security_passthrough(function):
"""
:type function: types.FunctionType
:rtype: types.FunctionType
"""
return function
def security_deny(function):
"""
:type function: types.FunctionType
:rtype: types.FunctionType
"""
def deny(*args, **kwargs):
raise ConnexionException("Error in security definitions")
return deny
def get_authorization_info(auth_funcs, request, required_scopes):
for func in auth_funcs:
token_info = func(request, required_scopes)
if token_info is not None:
return token_info
logger.info("... No auth provided. Aborting with 401.")
raise OAuthProblem(description='No authorization token provided')
def validate_scope(required_scopes, token_scopes):
"""
:param required_scopes: Scopes required to access operation
:param token_scopes: Scopes granted by authorization server
:rtype: bool
"""
required_scopes = set(required_scopes)
if isinstance(token_scopes, list):
token_scopes = set(token_scopes)
else:
token_scopes = set(token_scopes.split())
logger.debug("... Scopes required: %s", required_scopes)
logger.debug("... Token scopes: %s", token_scopes)
if not required_scopes <= token_scopes:
logger.info(textwrap.dedent("""
... Token scopes (%s) do not match the scopes necessary to call endpoint (%s).
Aborting with 403.""").replace('\n', ''),
token_scopes, required_scopes)
return False
return True
def verify_authorization_token(request, token_info_func):
"""
:param request: ConnexionRequest
:param token_info_func: types.FunctionType
:rtype: dict
"""
authorization = request.headers.get('Authorization')
if not authorization:
return None
try:
auth_type, token = authorization.split(None, 1)
except ValueError:
raise OAuthProblem(description='Invalid authorization header')
if auth_type.lower() != 'bearer':
return None
token_info = token_info_func(token)
if token_info is None:
raise OAuthResponseProblem(
description='Provided token is not valid',
token_response=None
)
return token_info
def verify_oauth(token_info_func, scope_validate_func):
def wrapper(request, required_scopes):
token_info = verify_authorization_token(request, token_info_func)
if token_info is None:
return None
# Fallback to 'scopes' for backward compability
token_scopes = token_info.get('scope', token_info.get('scopes', ''))
if not scope_validate_func(required_scopes, token_scopes):
raise OAuthScopeProblem(
description='Provided token doesn\'t have the required scope',
required_scopes=required_scopes,
token_scopes=token_scopes
)
return token_info
return wrapper
def verify_basic(basic_info_func):
def wrapper(request, required_scopes):
authorization = request.headers.get('Authorization')
if not authorization:
return None
try:
auth_type, user_pass = authorization.split(None, 1)
except ValueError:
raise OAuthProblem(description='Invalid authorization header')
if auth_type.lower() != 'basic':
return None
try:
username, password = base64.b64decode(user_pass).decode('latin1').split(':', 1)
except Exception:
raise OAuthProblem(description='Invalid authorization header')
token_info = basic_info_func(username, password, required_scopes=required_scopes)
if token_info is None:
raise OAuthResponseProblem(
description='Provided authorization is not valid',
token_response=None
)
return token_info
return wrapper
def get_cookie_value(cookies, name):
'''
Returns cookie value by its name. None if no such value.
:param cookies: str: cookies raw data
:param name: str: cookies key
'''
cookie_parser = http_cookies.SimpleCookie()
cookie_parser.load(str(cookies))
try:
return cookie_parser[name].value
except KeyError:
return None
def verify_apikey(apikey_info_func, loc, name):
def wrapper(request, required_scopes):
if loc == 'query':
apikey = request.query.get(name)
elif loc == 'header':
apikey = request.headers.get(name)
elif loc == 'cookie':
cookieslist = request.headers.get('Cookie')
apikey = get_cookie_value(cookieslist, name)
else:
return None
if apikey is None:
return None
token_info = apikey_info_func(apikey, required_scopes=required_scopes)
if token_info is None:
raise OAuthResponseProblem(
description='Provided apikey is not valid',
token_response=None
)
return token_info
return wrapper
def verify_bearer(bearer_info_func):
"""
:param bearer_info_func: types.FunctionType
:rtype: types.FunctionType
"""
def wrapper(request, required_scopes):
return verify_authorization_token(request, bearer_info_func)
return wrapper
def verify_security(auth_funcs, required_scopes, function):
@functools.wraps(function)
def wrapper(request):
token_info = get_authorization_info(auth_funcs, request, required_scopes)
# Fallback to 'uid' for backward compability
request.context['user'] = token_info.get('sub', token_info.get('uid'))
request.context['token_info'] = token_info
return function(request)
return wrapper
def get_tokeninfo_remote(token_info_url, token):
"""
Retrieve oauth token_info remotely using HTTP
:param token_info_url: Url to get information about the token
:type token_info_url: str
:param token: oauth token from authorization header
:type token: str
:rtype: dict
"""
token_request = session.get(token_info_url, headers={'Authorization': 'Bearer {}'.format(token)}, timeout=5)
if not token_request.ok:
return None
return token_request.json()
|
[] |
[] |
[
"BEARERINFO_FUNC",
"APIKEYINFO_FUNC",
"SCOPEVALIDATE_FUNC",
"TOKENINFO_FUNC",
"TOKENINFO_URL",
"BASICINFO_FUNC"
] |
[]
|
["BEARERINFO_FUNC", "APIKEYINFO_FUNC", "SCOPEVALIDATE_FUNC", "TOKENINFO_FUNC", "TOKENINFO_URL", "BASICINFO_FUNC"]
|
python
| 6 | 0 | |
distributed/utils.py
|
import asyncio
import functools
import importlib
import inspect
import json
import logging
import multiprocessing
import os
import pkgutil
import re
import shutil
import socket
import sys
import tempfile
import threading
import warnings
import weakref
import xml.etree.ElementTree
from asyncio import TimeoutError
from collections import OrderedDict, UserDict, deque
from concurrent.futures import CancelledError, ThreadPoolExecutor # noqa: F401
from contextlib import contextmanager, suppress
from hashlib import md5
from importlib.util import cache_from_source
from time import sleep
from typing import Any as AnyType
from typing import Dict, List
import click
import tblib.pickling_support
try:
import resource
except ImportError:
resource = None
import tlz as toolz
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from dask import istask
from dask.utils import parse_timedelta as _parse_timedelta
from dask.widgets import get_template
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from .compatibility import PYPY, WINDOWS
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
# For some reason this is required in python >= 3.9
if WINDOWS:
import multiprocessing.popen_spawn_win32
else:
import multiprocessing.popen_spawn_posix
logger = _logger = logging.getLogger(__name__)
no_default = "__no_default__"
def _initialize_mp_context():
if WINDOWS or PYPY:
return multiprocessing
else:
method = dask.config.get("distributed.worker.multiprocessing-method")
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ["distributed"]
if "pkg_resources" in sys.modules:
preload.append("pkg_resources")
from .versions import optional_packages, required_packages
for pkg, _ in required_packages + optional_packages:
try:
importlib.import_module(pkg)
except ImportError:
pass
else:
preload.append(pkg)
ctx.set_forkserver_preload(preload)
return ctx
mp_context = _initialize_mp_context()
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in inspect.getfullargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except OSError as e:
warnings.warn(
"Couldn't detect a suitable IP address for "
"reaching %r, defaulting to hostname: %s" % (host, e),
RuntimeWarning,
)
addr_info = socket.getaddrinfo(
socket.gethostname(), port, family, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)[0]
return addr_info[4][0]
finally:
sock.close()
def get_ip(host="8.8.8.8", port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET)
def get_ipv6(host="2001:4860:4860::8888", port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6)
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
net_if_addrs = psutil.net_if_addrs()
if ifname not in net_if_addrs:
allowed_ifnames = list(net_if_addrs.keys())
raise ValueError(
"{!r} is not a valid network interface. "
"Valid network interfaces are: {}".format(ifname, allowed_ifnames)
)
for info in net_if_addrs[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError(f"interface {ifname!r} doesn't have an IPv4 address")
async def All(args, quiet_exceptions=()):
"""Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
return results
async def Any(args, quiet_exceptions=()):
"""Wait on many tasks at the same time and return when any is finished
Err once any of the tasks err.
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
break
return results
def sync(loop, func, *args, callback_timeout=None, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
callback_timeout = _parse_timedelta(callback_timeout, "s")
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and (
(isinstance(loop, PollIOLoop) and getattr(loop, "_closing", False))
or (hasattr(loop, "asyncio_loop") and loop.asyncio_loop._closed)
):
raise RuntimeError("IOLoop is closed")
try:
if loop.asyncio_loop.is_closed(): # tornado 6
raise RuntimeError("IOLoop is closed")
except AttributeError:
pass
e = threading.Event()
main_tid = threading.get_ident()
result = [None]
error = [False]
@gen.coroutine
def f():
# We flag the thread state asynchronous, which will make sync() call
# within `func` use async semantic. In order to support concurrent
# calls to sync(), `asynchronous` is used as a ref counter.
thread_state.asynchronous = getattr(thread_state, "asynchronous", 0)
thread_state.asynchronous += 1
try:
if main_tid == threading.get_ident():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
result[0] = yield future
except Exception:
error[0] = sys.exc_info()
finally:
assert thread_state.asynchronous > 0
thread_state.asynchronous -= 1
e.set()
loop.add_callback(f)
if callback_timeout is not None:
if not e.wait(callback_timeout):
raise TimeoutError(f"timed out after {callback_timeout} s.")
else:
while not e.is_set():
e.wait(10)
if error[0]:
typ, exc, tb = error[0]
raise exc.with_traceback(tb)
else:
return result[0]
class LoopRunner:
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
current = IOLoop.current()
if loop is None:
if asynchronous:
self._loop = current
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
else:
self._loop = loop
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if self._asynchronous or real_runner is not None or count > 0:
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
# run loop forever if it's not running already
try:
if (
getattr(loop, "asyncio_loop", None) is None
or not loop.asyncio_loop.is_running()
):
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if start_exc[0] is not None and not isinstance(start_exc[0], RuntimeError):
if not isinstance(
start_exc[0], Exception
): # track down infrequent error
raise TypeError(
f"not an exception: {start_exc[0]!r}",
)
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
with suppress(KeyError): # IOLoop can be missing
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, "w") as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def is_kernel():
"""Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if "IPython" not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), "kernel", None) is not None
hex_pattern = re.compile("[a-f]+")
@functools.lru_cache(100000)
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += "-" + word
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return result
except Exception:
return "Other"
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return key_split(x)
elif typ is bytes:
return key_split_group(x.decode())
else:
return key_split(x)
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root="distributed"):
"""
Change all StreamHandlers for the given logger to the given level
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
"""Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('') # Maps as localhost for binding e.g. 'tcp://:8811'
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
if not hostname:
hostname = "localhost"
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(
hostname, 1234, fam, socket.SOCK_STREAM # dummy port number
)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [
os.path.join("distributed", "worker"),
os.path.join("distributed", "scheduler"),
os.path.join("tornado", "gen.py"),
os.path.join("concurrent", "futures"),
]
while exc_traceback and any(
b in exc_traceback.tb_frame.f_code.co_filename for b in bad
):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
"""Truncate exception to be about a certain length"""
if len(str(e)) > n:
try:
return type(e)("Long error message", str(e)[:n])
except Exception:
return Exception("Long error message", type(e), str(e)[:n])
else:
return e
def validate_key(k):
"""Validate a key as received on a stream."""
typ = type(k)
if typ is not str and typ is not bytes:
raise TypeError(f"Unexpected key type {typ} (value: {k!r})")
def _maybe_complex(task):
"""Possibly contains a nested task"""
return (
istask(task)
or type(task) is list
and any(map(_maybe_complex, task))
or type(task) is dict
and any(map(_maybe_complex, task.values()))
)
def seek_delimiter(file, delimiter, blocksize):
"""Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b""
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter) :]
def read_block(f, offset, length, delimiter=None):
"""Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2 ** 16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2 ** 16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
@contextmanager
def tmpfile(extension=""):
extension = "." + extension.lstrip(".")
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
try:
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def ensure_bytes(s):
"""Attempt to turn `s` into bytes.
Parameters
----------
s : Any
The object to be converted. Will correctly handled
* str
* bytes
* objects implementing the buffer protocol (memoryview, ndarray, etc.)
Returns
-------
b : bytes
Raises
------
TypeError
When `s` cannot be converted
Examples
--------
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
elif hasattr(s, "encode"):
return s.encode()
else:
try:
return bytes(s)
except Exception as e:
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s
) from e
def open_port(host=""):
"""Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
"""Loads modules for a file (.py, .zip, .egg)"""
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in (".py",): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == ".py": # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with suppress(OSError):
os.remove(cache_file)
if ext in (".egg", ".zip", ".pyz"):
if path not in sys.path:
sys.path.insert(0, path)
names = (mod_info.name for mod_info in pkgutil.iter_modules([path]))
names_to_import.extend(names)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
importlib.invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(importlib.reload(importlib.import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar])
def nbytes(frame, _bytes_like=(bytes, bytearray)):
"""Number of bytes of a frame or memoryview"""
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def json_load_robust(fn, load=json.load):
"""Reads a JSON file from disk that may be being written as we read"""
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
class DequeHandler(logging.Handler):
"""A logging.Handler that records records into a deque"""
_instances = weakref.WeakSet()
def __init__(self, *args, n=10000, **kwargs):
self.deque = deque(maxlen=n)
super().__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
"""Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
is_server_extension = False
if "notebook" in sys.modules:
import traitlets
from notebook.notebookapp import NotebookApp
is_server_extension = traitlets.config.Application.initialized() and isinstance(
traitlets.config.Application.instance(), NotebookApp
)
if not is_server_extension:
is_kernel_and_no_running_loop = False
if is_kernel():
try:
asyncio.get_running_loop()
except RuntimeError:
is_kernel_and_no_running_loop = True
if not is_kernel_and_no_running_loop:
# TODO: Use tornado's AnyThreadEventLoopPolicy, instead of class below,
# once tornado > 6.0.3 is available.
if WINDOWS and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
# https://github.com/tornadoweb/tornado/issues/2608
BaseEventLoopPolicy = asyncio.WindowsSelectorEventLoopPolicy
else:
BaseEventLoopPolicy = asyncio.DefaultEventLoopPolicy
class AnyThreadEventLoopPolicy(BaseEventLoopPolicy):
def get_event_loop(self):
try:
return super().get_event_loop()
except (RuntimeError, AssertionError):
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
@functools.lru_cache(1000)
def has_keyword(func, keyword):
return keyword in inspect.signature(func).parameters
@functools.lru_cache(1000)
def command_has_keyword(cmd, k):
if cmd is not None:
if isinstance(cmd, str):
try:
from importlib import import_module
cmd = import_module(cmd)
except ImportError:
raise ImportError("Module for command %s is not available" % cmd)
if isinstance(getattr(cmd, "main"), click.core.Command):
cmd = cmd.main
if isinstance(cmd, click.core.Command):
cmd_params = {
p.human_readable_name
for p in cmd.params
if isinstance(p, click.core.Option)
}
return k in cmd_params
return False
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = [
"#440154",
"#471669",
"#472A79",
"#433C84",
"#3C4D8A",
"#355D8C",
"#2E6C8E",
"#287A8E",
"#23898D",
"#1E978A",
"#20A585",
"#2EB27C",
"#45BF6F",
"#64CB5D",
"#88D547",
"#AFDC2E",
"#D7E219",
"#FDE724",
]
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
def _iscoroutinefunction(f):
return inspect.iscoroutinefunction(f) or gen.is_coroutine_function(f)
@functools.lru_cache(None)
def _iscoroutinefunction_cached(f):
return _iscoroutinefunction(f)
def iscoroutinefunction(f):
# Attempt to use lru_cache version and fall back to non-cached version if needed
try:
return _iscoroutinefunction_cached(f)
except TypeError: # unhashable type
return _iscoroutinefunction(f)
@contextmanager
def warn_on_duration(duration, msg):
start = time()
yield
stop = time()
if stop - start > _parse_timedelta(duration):
warnings.warn(msg, stacklevel=2)
def format_dashboard_link(host, port):
template = dask.config.get("distributed.dashboard.link")
if dask.config.get("distributed.scheduler.dashboard.tls.cert"):
scheme = "https"
else:
scheme = "http"
return template.format(
**toolz.merge(os.environ, dict(scheme=scheme, host=host, port=port))
)
def parse_ports(port):
"""Parse input port information into list of ports
Parameters
----------
port : int, str, None
Input port or ports. Can be an integer like 8787, a string for a
single port like "8787", a string for a sequential range of ports like
"8000:8200", or None.
Returns
-------
ports : list
List of ports
Examples
--------
A single port can be specified using an integer:
>>> parse_ports(8787)
[8787]
or a string:
>>> parse_ports("8787")
[8787]
A sequential range of ports can be specified by a string which indicates
the first and last ports which should be included in the sequence of ports:
>>> parse_ports("8787:8790")
[8787, 8788, 8789, 8790]
An input of ``None`` is also valid and can be used to indicate that no port
has been specified:
>>> parse_ports(None)
[None]
"""
if isinstance(port, str) and ":" not in port:
port = int(port)
if isinstance(port, (int, type(None))):
ports = [port]
else:
port_start, port_stop = map(int, port.split(":"))
if port_stop <= port_start:
raise ValueError(
"When specifying a range of ports like port_start:port_stop, "
"port_stop must be greater than port_start, but got "
f"port_start={port_start} and port_stop={port_stop}"
)
ports = list(range(port_start, port_stop + 1))
return ports
is_coroutine_function = iscoroutinefunction
class Log(str):
"""A container for newline-delimited string of log entries"""
def _repr_html_(self):
return get_template("log.html.j2").render(log=self)
class Logs(dict):
"""A container for a dict mapping names to strings of log entries"""
def _repr_html_(self):
return get_template("logs.html.j2").render(logs=self)
def cli_keywords(d: dict, cls=None, cmd=None):
"""Convert a kwargs dictionary into a list of CLI keywords
Parameters
----------
d : dict
The keywords to convert
cls : callable
The callable that consumes these terms to check them for validity
cmd : string or object
A string with the name of a module, or the module containing a
click-generated command with a "main" function, or the function itself.
It may be used to parse a module's custom arguments (i.e., arguments that
are not part of Worker class), such as nprocs from dask-worker CLI or
enable_nvlink from dask-cuda-worker CLI.
Examples
--------
>>> cli_keywords({"x": 123, "save_file": "foo.txt"})
['--x', '123', '--save-file', 'foo.txt']
>>> from dask.distributed import Worker
>>> cli_keywords({"x": 123}, Worker)
Traceback (most recent call last):
...
ValueError: Class distributed.worker.Worker does not support keyword x
"""
from dask.utils import typename
if cls or cmd:
for k in d:
if not has_keyword(cls, k) and not command_has_keyword(cmd, k):
if cls and cmd:
raise ValueError(
"Neither class %s or module %s support keyword %s"
% (typename(cls), typename(cmd), k)
)
elif cls:
raise ValueError(
f"Class {typename(cls)} does not support keyword {k}"
)
else:
raise ValueError(
f"Module {typename(cmd)} does not support keyword {k}"
)
def convert_value(v):
out = str(v)
if " " in out and "'" not in out and '"' not in out:
out = '"' + out + '"'
return out
return sum(
(["--" + k.replace("_", "-"), convert_value(v)] for k, v in d.items()), []
)
def is_valid_xml(text):
return xml.etree.ElementTree.fromstring(text) is not None
_offload_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="Dask-Offload")
weakref.finalize(_offload_executor, _offload_executor.shutdown)
def import_term(name: str):
"""Return the fully qualified term
Examples
--------
>>> import_term("math.sin") # doctest: +SKIP
<function math.sin(x, /)>
"""
try:
module_name, attr_name = name.rsplit(".", 1)
except ValueError:
return importlib.import_module(name)
module = importlib.import_module(module_name)
return getattr(module, attr_name)
async def offload(fn, *args, **kwargs):
loop = asyncio.get_event_loop()
return await loop.run_in_executor(_offload_executor, lambda: fn(*args, **kwargs))
class EmptyContext:
def __enter__(self):
pass
def __exit__(self, *args):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args):
pass
empty_context = EmptyContext()
class LRU(UserDict):
"""Limited size mapping, evicting the least recently looked-up key when full"""
def __init__(self, maxsize):
super().__init__()
self.data = OrderedDict()
self.maxsize = maxsize
def __getitem__(self, key):
value = super().__getitem__(key)
self.data.move_to_end(key)
return value
def __setitem__(self, key, value):
if len(self) >= self.maxsize:
self.data.popitem(last=False)
super().__setitem__(key, value)
def clean_dashboard_address(addrs: AnyType, default_listen_ip: str = "") -> List[Dict]:
"""
Examples
--------
>>> clean_dashboard_address(8787)
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address(":8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("foo:8787")
[{'address': 'foo', 'port': 8787}]
>>> clean_dashboard_address([8787, 8887])
[{'address': '', 'port': 8787}, {'address': '', 'port': 8887}]
>>> clean_dashboard_address(":8787,:8887")
[{'address': '', 'port': 8787}, {'address': '', 'port': 8887}]
"""
if default_listen_ip == "0.0.0.0":
default_listen_ip = "" # for IPV6
if isinstance(addrs, str):
addrs = addrs.split(",")
if not isinstance(addrs, list):
addrs = [addrs]
addresses = []
for addr in addrs:
try:
addr = int(addr)
except (TypeError, ValueError):
pass
if isinstance(addr, str):
addr = addr.split(":")
if isinstance(addr, (tuple, list)):
if len(addr) == 2:
host, port = (addr[0], int(addr[1]))
elif len(addr) == 1:
[host], port = addr, 0
else:
raise ValueError(addr)
elif isinstance(addr, int):
host = default_listen_ip
port = addr
addresses.append({"address": host, "port": port})
return addresses
_deprecations = {
"deserialize_for_cli": "dask.config.deserialize",
"serialize_for_cli": "dask.config.serialize",
"format_bytes": "dask.utils.format_bytes",
"format_time": "dask.utils.format_time",
"funcname": "dask.utils.funcname",
"parse_bytes": "dask.utils.parse_bytes",
"parse_timedelta": "dask.utils.parse_timedelta",
"typename": "dask.utils.typename",
}
def __getattr__(name):
if name in _deprecations:
use_instead = _deprecations[name]
warnings.warn(
f"{name} is deprecated and will be removed in a future release. "
f"Please use {use_instead} instead.",
category=FutureWarning,
stacklevel=2,
)
return import_term(use_instead)
else:
raise AttributeError(f"module {__name__} has no attribute {name}")
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
wireless/sims/instance-get-example-1/instance-get-example-1.7.x.java
|
import com.twilio.Twilio;
import com.twilio.rest.wireless.v1.Sim;
public class Example {
// Get your Account SID and Auth Token from https://twilio.com/console
// To set up environment variables, see http://twil.io/secure
public static final String ACCOUNT_SID = System.getenv("TWILIO_ACCOUNT_SID");
public static final String AUTH_TOKEN = System.getenv("TWILIO_AUTH_TOKEN");
public static void main(String[] args) {
// Initialize the client
Twilio.init(ACCOUNT_SID, AUTH_TOKEN);
Sim sim = Sim.fetcher("DEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").fetch();
System.out.println(sim);
}
}
|
[
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\""
] |
[] |
[
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
] |
[]
|
["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
|
java
| 2 | 0 | |
pkg/kubectl/main.go
|
package kubectl
import (
goflag "flag"
"fmt"
"math/rand"
"os"
"time"
"github.com/rancher/k3s/pkg/server"
"github.com/spf13/pflag"
utilflag "k8s.io/apiserver/pkg/util/flag"
"k8s.io/apiserver/pkg/util/logs"
"k8s.io/kubernetes/pkg/kubectl/cmd"
)
func Main() {
kubenv := os.Getenv("KUBECONFIG")
if kubenv == "" {
config, err := server.HomeKubeConfig(false)
if _, serr := os.Stat(config); err == nil && serr == nil {
os.Setenv("KUBECONFIG", config)
}
}
main()
}
func main() {
rand.Seed(time.Now().UnixNano())
command := cmd.NewDefaultKubectlCommand()
// TODO: once we switch everything over to Cobra commands, we can go back to calling
// utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the
// normalize func and add the go flag set by hand.
pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc)
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
// utilflag.InitFlags()
logs.InitLogs()
defer logs.FlushLogs()
if err := command.Execute(); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
emissionsapi/db.py
|
# Copyright 2019, The Emissions API Developers
# https://emissions-api.org
# This software is available under the terms of an MIT license.
# See LICENSE fore more information.
"""Database layer for the Emmission API.
"""
from functools import wraps
import logging
import sqlalchemy
from sqlalchemy import and_, or_, create_engine, Column, DateTime, Float, \
String, PickleType
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import pandas
import psycopg2.extensions
import geoalchemy2
from emissionsapi.config import config
# Logger
logger = logging.getLogger(__name__)
# Database uri as described in
# https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
# Retrieved as environment variable.
database = config('database') or 'postgresql://user:user@localhost/db'
# Products
products = config('products') or {
'carbonmonoxide': {
'storage': 'data',
'product': 'carbonmonoxide_total_column',
'product_key': 'L2__CO____',
'description': 'Average atmosphere mole content of carbon monoxide in '
'`mol m¯²`'
}
}
# H3 Resolution
resolution = config('resolution') or 4
# Global session variable. Set on initialization.
__session__ = None
# Base Class of all ORM objects.
Base = declarative_base()
# Add psycopg2 adapter for pandas Series
psycopg2.extensions.register_adapter(
pandas.core.series.Series,
lambda arr: psycopg2.extensions.adapt(list(arr)))
class AlembicVersion(Base):
__tablename__ = 'alembic_version'
version_num = Column(String(32), primary_key=True)
class File(Base):
"""ORM object for the nc files.
"""
__tablename__ = 'file'
filename = Column(String, primary_key=True)
"""Name of processed data file"""
class Cache(Base):
"""ORM object for the request cache
"""
__tablename__ = 'cache'
request = Column(String, primary_key=True)
"""Primary key identifying the request"""
begin = Column(DateTime)
"""Begin of the time interval involved in this request (used for
efficiently invalidating caches)
"""
end = Column(DateTime)
"""End of the time interval involved in this request (used for efficiently
invalidating caches)
"""
response = Column(PickleType)
"""Cached response"""
@classmethod
def invalidate(cache, session, earliest, latest):
"""Invalidates/deletes all cached responses in the given interval to
ensure these data is generated anew. This is meant to be run when the
underlying data for this interval changes, for instance since new data
has been imported.
:param session: SQLAlchemy Session
:type session: sqlalchemy.orm.session.Session
:param earliest: Earliest time of the interval to invalidate
:type earliest: datetime.datetime
:param latest: Latest time of the interval to invalidate
:type latest: datetime.datetime
"""
logger.debug('Invalidating cache in interval %s..%s',
earliest.isoformat(), latest.isoformat())
session.query(cache)\
.filter(and_(or_(cache.begin.is_(None),
cache.begin <= latest),
or_(cache.end.is_(None),
cache.end > earliest)))\
.delete()
session.commit()
for name, attributes in products.items():
attributes['table'] = sqlalchemy.Table(
name, Base.metadata,
Column('value', Float),
Column('timestamp', DateTime, index=True),
Column('geom', geoalchemy2.Geometry(geometry_type='POINT')))
def with_session(f):
"""Wrapper for f to make a SQLAlchemy session present within the function
:param f: Function to call
:type f: Function
:raises e: Possible exception of f
:return: Result of f
"""
@wraps(f)
def decorated(*args, **kwargs):
# Get new session
session = get_session()
try:
# Call f with the session and all the other arguments
result = f(session, *args, **kwargs)
except Exception as e:
# Rollback session, something bad happend.
session.rollback()
session.close()
raise e
# Close session and return the result of f
session.close()
return result
return decorated
def get_session():
"""Get a new session.
Lazy load the database connection and create the tables.
Returns:
sqlalchemy.orm.session.Session -- SQLAlchemy Session object
"""
global __session__
# Create database connection, tables and Sessionmaker if neccessary.
if not __session__:
Engine = create_engine(
database, echo=logger.getEffectiveLevel() == logging.DEBUG)
__session__ = sessionmaker(bind=Engine)
Base.metadata.create_all(Engine)
# Return new session object
return __session__()
def insert_dataset(session, data, tbl):
'''Batch insert data into the database using PostGIS specific functions.
:param session: SQLAlchemy Session
:type session: sqlalchemy.orm.session.Session
:param data: DataFrame containing value, timestamp, longitude and latitude
:type data: pandas.core.frame.DataFrame
:param tbl: Base class representing the database table for the data
:type tbl: sqlalchemy.ext.declarative.api.DeclarativeMeta
'''
values = sqlalchemy.select([sqlalchemy.func.unnest(data.value),
sqlalchemy.func.unnest(data.timestamp),
sqlalchemy.func.ST_MakePoint(
sqlalchemy.func.unnest(data.longitude),
sqlalchemy.func.unnest(data.latitude))])
query = sqlalchemy.insert(tbl).from_select(tbl.columns, values)
session.execute(query)
def get_points(session, tbl):
"""Get all points.
:param session: SQLAlchemy Session
:type session: sqlalchemy.orm.session.Session
:param tbl: Table to get data from
:type tbl: sqlalchemy.sql.schema.Table
:return: SQLAlchemy Query returning tuples of value, timestamp, longitude,
and latitude.
:rtype: sqlalchemy.orm.query.Query
"""
return session.query(
tbl.c.value,
tbl.c.timestamp,
tbl.c.geom.ST_X(),
tbl.c.geom.ST_Y())
def get_averages(session, tbl):
"""Get daily averages of all points.
:param session: SQLAlchemy Session
:type session: sqlalchemy.orm.session.Session
:param tbl: Table to get data from
:type tbl: sqlalchemy.sql.schema.Table
:return: SQLAlchemy Query with tuple of the daily carbon monoxide average,
the maximal timestamp the minimal timestamp and the timestamp
truncated by day.
:rtype: sqlalchemy.orm.query.Query
"""
day = sqlalchemy.func.date(tbl.c.timestamp)
return session.query(
sqlalchemy.func.avg(tbl.c.value),
sqlalchemy.func.max(tbl.c.timestamp),
sqlalchemy.func.min(tbl.c.timestamp),
day).group_by(day)
def get_statistics(session, tbl, interval_length='day'):
"""Get statistical data like amount, average, min, or max values for a
specified time interval. Optionally, time and location filters can be
applied.
:param session: SQLAlchemy Session
:type session: sqlalchemy.orm.session.Session
:param tbl: Table to get data from
:type tbl: sqlalchemy.sql.schema.Table
:param interval_length: Length of the time interval for which data is being
aggregated as accepted by PostgreSQL's date_trunc_
function like ``day`` or ``week``.
:type interval_length: str
:return: SQLAlchemy Query requesting the following statistical values for
the specified time interval:
- number of considered measurements
- average product value
- minimum product value
- maximum product value
- time of the first measurement
- time of the last measurement
- start of the interval
:rtype: sqlalchemy.orm.query.Query
.. _date_trunc: https://postgresql.org/docs/9.1/functions-datetime.html
"""
interval = sqlalchemy.func.date_trunc(interval_length,
tbl.c.timestamp)
return session.query(
sqlalchemy.func.count(tbl.c.value),
sqlalchemy.func.avg(tbl.c.value),
sqlalchemy.func.stddev(tbl.c.value),
sqlalchemy.func.min(tbl.c.value),
sqlalchemy.func.max(tbl.c.value),
sqlalchemy.func.min(tbl.c.timestamp),
sqlalchemy.func.max(tbl.c.timestamp),
interval).group_by(interval)
def filter_query(query, tbl, wkt=None, distance=None, begin=None, end=None):
"""Filter query by time and location.
:param query: SQLAlchemy Query
:type query: sqlalchemy.orm.Query
:param tbl: Table to get data from
:type tbl: sqlalchemy.sql.schema.Table
:param wkt: WKT Element specifying an area in which to search for points,
defaults to None.
:type wkt: geoalchemy2.WKTElement, optional
:param distance: Distance as defined in PostGIS' ST_DWithin_ function.
:type distance: float, optional
:param begin: Get only points after this timestamp, defaults to None
:type begin: datetime.datetime, optional
:param end: Get only points before this timestamp, defaults to None
:type end: datetime.datetime, optional
:return: SQLAlchemy Query filtered by time and location.
:rtype: sqlalchemy.orm.query.Query
.. _ST_DWithin: https://postgis.net/docs/ST_DWithin.html
"""
# Filter by WKT
if wkt is not None:
if distance is not None:
query = query.filter(geoalchemy2.func.ST_DWITHIN(
tbl.c.geom, wkt, distance))
else:
query = query.filter(geoalchemy2.func.ST_WITHIN(
tbl.c.geom, wkt))
# Filter for points after the time specified as begin
if begin is not None:
query = query.filter(begin <= tbl.c.timestamp)
# Filter for points before the time specified as end
if end is not None:
query = query.filter(end > tbl.c.timestamp)
return query
def limit_offset_query(query, limit=None, offset=None):
"""Apply limit and offset to the query.
:param query: SQLAlchemy Query
:type query: sqlalchemy.orm.Query
:param limit: Limit number of Items returned, defaults to None
:type limit: int, optional
:param offset: Specify the offset of the first hit to return,
defaults to None
:type offset: int, optional
:return: SQLAlchemy Query with limit and offset applied.
:rtype: sqlalchemy.orm.query.Query
"""
# Apply limit
if limit is not None:
query = query.limit(limit)
# Apply offset
if offset is not None:
query = query.offset(offset)
return query
def get_data_range(session, tbl):
"""Get the range of data currently available from the API.
:param session: SQLAlchemy Session
:type session: sqlalchemy.orm.session.Session
:param tbl: Table to get data from
:type tbl: sqlalchemy.sql.schema.Table
:return: SQLAlchemy Query requesting the minimum and maximum measurement
time from all values.
:rtype: sqlalchemy.orm.query.Query
"""
return session.query(
sqlalchemy.func.min(tbl.c.timestamp),
sqlalchemy.func.max(tbl.c.timestamp))
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
examples/github-api/Server.py
|
import sys
# add uplink directory to path
sys.path.insert(0, "../../")
import uplink
from uplink import *
import asyncio
import json
import os
from datetime import datetime, timedelta
from flask import Flask, request, jsonify
app = Flask(__name__)
BASE_URL = "https://api.github.com"
CLIENT_ID = os.environ["CLIENT_ID"]
CLIENT_SECRET = os.environ["CLIENT_SECRET"]
@headers({"Accept": "application/vnd.github.v3+json"})
class Github(Consumer):
@get("/search/repositories?q={keyword} in:name,description,readme")
def repos_for_keyword(
self,
keyword,
client_id: Query = CLIENT_ID,
client_secret: Query = CLIENT_SECRET,
):
""" Get a list of repositories which have a given keyword in the name, description or readme """
pass
@get("/repos/{user}/{repo_name}/commits")
def commits_for_repo(
self,
user,
repo_name,
since: Query,
client_id: Query = CLIENT_ID,
client_secret: Query = CLIENT_SECRET,
):
""" Get a list of commits in a repo since some start date """
pass
github = Github(BASE_URL, client=uplink.AiohttpClient())
loop = asyncio.get_event_loop()
# Helpers
async def _repos_for_keyword(keyword):
""" Get repos which match the keyword search """
r = await github.repos_for_keyword(keyword)
r_json = await r.json()
return [item["full_name"] for item in r_json["items"]]
async def _users_for_repo(user, repo_name, oldest_age=55):
""" Returns users that have commited in a repo in the last N weeks """
since = (datetime.now() - timedelta(weeks=oldest_age)).isoformat()
r = await github.commits_for_repo(user, repo_name, since=since)
r_json = await r.json()
users = set()
for commit in r_json:
if "author" in commit and commit["author"] is not None:
user = (
commit["author"]["login"],
commit["commit"]["author"]["email"],
commit["commit"]["author"]["name"],
)
users.add(user)
return list(users)
# Flask routes
@app.route("/repos", methods=["GET"])
def repos_for_keyword():
"""
/repos?keyword=<keyword>
Finds all repos which contain the given keyword in the name, readme, or description """
if "keyword" not in request.args:
return "", 400
keyword = request.args["keyword"]
future = _repos_for_keyword(keyword)
repos = loop.run_until_complete(future)
return jsonify(repos)
@app.route("/users/<user>/repo/<repo_name>", methods=["GET"])
def users_for_repo(user, repo_name):
"""
/users/<user>/repo/<repo_name>[?oldest-age=<age in weeks>]
Returns list of users who have commited in the resource user/repo in the last given amount of
weeks """
oldest_age = (
55 if "oldest-age" not in request.args else request.args["oldest-age"]
)
future = _users_for_repo(user, repo_name, oldest_age=oldest_age)
users = loop.run_until_complete(future)
return jsonify(users)
@app.route("/users", methods=["GET"])
def users_for_keyword():
"""
/users?keyword=<keyword>[?oldest-age=<age in weeks>]
Find the top users who have commited in repositories matching the keyword in the last month """
if "keyword" not in request.args:
return "", 400
keyword = request.args["keyword"]
oldest_age = (
55 if "oldest-age" not in request.args else request.args["oldest-age"]
)
repos_future = _repos_for_keyword(keyword)
repos = loop.run_until_complete(repos_future)
# gather futures for getting users from each repo
users_futures = []
users = set()
for repo in repos:
user, repo_name = repo.split("/")
users_futures.append(
_users_for_repo(user, repo_name, oldest_age=oldest_age)
)
# barrier on all the users futures
users_results = loop.run_until_complete(asyncio.wait(users_futures))
# gather the results
for users_result in users_results:
for task in users_result:
if task.result():
users.update(set(task.result()))
return jsonify(list(users))
app.run("0.0.0.0")
|
[] |
[] |
[
"CLIENT_SECRET",
"CLIENT_ID"
] |
[]
|
["CLIENT_SECRET", "CLIENT_ID"]
|
python
| 2 | 0 | |
backend/TravelManagementSystem/asgi.py
|
"""
ASGI config for TravelManagementSystem project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TravelManagementSystem.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/server/main.go
|
package main
import (
"os"
"os/signal"
"syscall"
"github.com/NeowayLabs/logger"
"github.com/ricardolonga/goteca"
"github.com/ricardolonga/goteca/http"
"github.com/ricardolonga/goteca/mongo"
"gopkg.in/mgo.v2"
)
func main() {
session, err := mgo.Dial(os.Getenv("MONGO_URL"))
if err != nil {
logger.Fatal("error on Mongo connection: %q", err)
}
repository := mongo.NewDao(session)
service := goteca.NewService(repository)
handler := http.NewHandler(service)
server := http.NewServer("8080", handler)
server.ListenAndServe()
stopChan := make(chan os.Signal)
signal.Notify(stopChan, syscall.SIGTERM, syscall.SIGINT)
<-stopChan
server.Shutdown()
}
|
[
"\"MONGO_URL\""
] |
[] |
[
"MONGO_URL"
] |
[]
|
["MONGO_URL"]
|
go
| 1 | 0 | |
components/automate-cli/cmd/chef-automate/deploy.go
|
// Copyright © 2017 Chef Software
package main
import (
"fmt"
"os"
"path"
"github.com/spf13/cobra"
dc "github.com/chef/automate/api/config/deployment"
api "github.com/chef/automate/api/interservice/deployment"
"github.com/chef/automate/components/automate-cli/pkg/status"
"github.com/chef/automate/components/automate-deployment/pkg/airgap"
"github.com/chef/automate/components/automate-deployment/pkg/client"
"github.com/chef/automate/components/automate-deployment/pkg/manifest"
mc "github.com/chef/automate/components/automate-deployment/pkg/manifest/client"
"github.com/chef/automate/lib/version"
)
var deployLong = `Deploy a new Chef Automate instance using the supplied configuration.
- <CONFIG_FILE> must be a valid path to a TOML formatted configuration file`
var promptMLSA = `
To continue, you'll need to accept our terms of service:
Terms of Service
https://www.chef.io/terms-of-service
Master License and Services Agreement
https://www.chef.io/online-master-agreement
I agree to the Terms of Service and the Master License and Services Agreement
`
var errMLSA = "Chef Software Terms of Service and Master License and Services Agreement were not accepted"
var errProvisonInfra = `Architecture does not match with the requested one.
If you want to provision cluster then you have to first run provision command.
chef-automate provision-infra
After that you can run this command`
var invalidConfig = "Invalid toml config file, please check your toml file."
var invalidChannelName = "Invalid channel permited channels are dev and stable"
var deployCmdFlags = struct {
channel string
upgradeStrategy string
keyPath string
certPath string
adminPassword string
hartifactsPath string
overrideOrigin string
manifestDir string
fqdn string
airgap string
skipPreflight bool
acceptMLSA bool
enableChefServer bool
enableDeploymentOrderStressMode bool
enableWorkflow bool
products []string
bootstrapBundlePath string
userAuth bool
}{}
// deployCmd represents the new command
var deployCmd = newDeployCmd()
func init() {
RootCmd.AddCommand(deployCmd)
}
func newDeployCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "deploy [/path/to/config.toml]",
Short: "Deploy Chef Automate",
Long: deployLong,
Annotations: map[string]string{
NoCheckVersionAnnotation: NoCheckVersionAnnotation,
},
Args: cobra.RangeArgs(0, 1),
RunE: runDeployCmd,
}
// flags for Deploy Command
cmd.PersistentFlags().BoolVar(
&deployCmdFlags.skipPreflight,
"skip-preflight",
false,
"Deploy regardless of pre-flight conditions")
cmd.PersistentFlags().StringVar(
&deployCmdFlags.overrideOrigin,
"override-origin",
"",
"Optional origin to install local .hart packages from")
cmd.PersistentFlags().StringVar(
&deployCmdFlags.hartifactsPath,
"hartifacts",
"",
"Optional path to cache of local .hart packages")
cmd.PersistentFlags().StringVar(
&deployCmdFlags.manifestDir,
"manifest-dir",
"",
"Optional path to local automate manifest files")
cmd.PersistentFlags().StringVar(
&deployCmdFlags.channel,
"channel",
"",
"Release channel to deploy all services from")
cmd.PersistentFlags().StringVar(
&deployCmdFlags.upgradeStrategy,
"upgrade-strategy",
"at-once",
"Upgrade strategy to use for this deployment.")
cmd.PersistentFlags().StringVar(
&deployCmdFlags.certPath,
"certificate",
"",
"The path to a certificate that should be used for external TLS connections (web and API).")
cmd.PersistentFlags().StringVar(
&deployCmdFlags.keyPath,
"private-key",
"",
"The path to a private key corresponding to the TLS certificate.")
cmd.PersistentFlags().StringVar(
&deployCmdFlags.adminPassword,
"admin-password",
"",
"The password for the initial admin user. Auto-generated by default.")
cmd.PersistentFlags().BoolVar(
&deployCmdFlags.acceptMLSA,
"accept-terms-and-mlsa",
false,
"Agree to the Chef Software Terms of Service and the Master License and Services Agreement")
cmd.PersistentFlags().StringVar(
&deployCmdFlags.fqdn,
"fqdn",
"",
"The fully-qualified domain name that Chef Automate can be accessed at. (default: hostname of this machine)")
cmd.PersistentFlags().StringVar(
&deployCmdFlags.airgap,
"airgap-bundle",
"",
"Path to an airgap install bundle")
cmd.PersistentFlags().BoolVar(
&deployCmdFlags.enableChefServer,
"enable-chef-server",
false,
"Deploy Chef Server services along with Chef Automate")
cmd.PersistentFlags().BoolVar(
&deployCmdFlags.enableDeploymentOrderStressMode,
"enable-deploy-order-stress-mode",
false,
"Deploy services in the order that stresses hab the most")
cmd.PersistentFlags().BoolVar(
&deployCmdFlags.enableWorkflow,
"enable-workflow",
false,
"Deploy Workflow services along with Chef Automate")
cmd.PersistentFlags().StringSliceVar(
&deployCmdFlags.products,
"product",
nil,
"Product to deploy")
cmd.PersistentFlags().StringVar(
&deployCmdFlags.bootstrapBundlePath,
"bootstrap-bundle",
"",
"Path to bootstrap bundle")
cmd.PersistentFlags().BoolVarP(
&deployCmdFlags.userAuth,
"yes",
"y",
false,
"Do not prompt for confirmation; accept defaults and continue")
if !isDevMode() {
for _, flagName := range []string{
"override-origin",
"hartifacts",
"manifest-dir",
// passwords are not validated until the end of the deploy, which makes this
// feature dangerous. But we still want to have it in Ci, so we mark it as
// hidden
"admin-password",
"enable-chef-server",
"enable-deploy-order-stress-mode",
"enable-workflow",
"bootstrap-bundle",
} {
err := cmd.PersistentFlags().MarkHidden(flagName)
if err != nil {
fmt.Printf("failed configuring cobra: %s\n", err.Error())
panic(":(")
}
}
}
return cmd
}
func runDeployCmd(cmd *cobra.Command, args []string) error {
var configPath = ""
if len(args) > 0 {
configPath = args[0]
}
var deployer, derr = getDeployer(configPath)
if derr != nil {
return status.Wrap(derr, status.ConfigError, invalidConfig)
}
if deployer != nil {
conf := new(dc.AutomateConfig)
if err := mergeFlagOverrides(conf); err != nil {
return status.Wrap(
err,
status.ConfigError,
"Merging command flag overrides into Chef Automate config failed",
)
}
if deployCmdFlags.userAuth {
deployCmdFlags.acceptMLSA = deployCmdFlags.userAuth
}
if len(deployCmdFlags.channel) > 0 && (deployCmdFlags.channel == "dev" || deployCmdFlags.channel == "current") {
writer.Printf("deploying with channel : %s \n", deployCmdFlags.channel)
args = append(args, "--"+deployCmdFlags.channel)
return deployer.doDeployWork(args)
} else if len(deployCmdFlags.channel) == 0 {
writer.Printf("deploying with default channel \n")
return deployer.doDeployWork(args)
} else {
return status.Wrap(derr, status.ConfigError, invalidChannelName)
}
}
writer.Printf("Automate deployment non HA mode proceeding...")
if !deployCmdFlags.acceptMLSA {
agree, err := writer.Confirm(promptMLSA)
if err != nil {
return status.Wrap(err, status.InvalidCommandArgsError, errMLSA)
}
if !agree {
return status.New(status.InvalidCommandArgsError, errMLSA)
}
}
if deployCmdFlags.keyPath != "" && deployCmdFlags.certPath == "" {
msg := "Cannot provide --private-key without also providing --certificate."
return status.New(status.InvalidCommandArgsError, msg)
}
if deployCmdFlags.certPath != "" && deployCmdFlags.keyPath == "" {
msg := "cannot provide --certificate without also providing --private-key."
return status.New(status.InvalidCommandArgsError, msg)
}
conf := new(dc.AutomateConfig)
var err error
if len(args) == 0 {
// Use default configuration if no configuration file was provided
conf, err = generatedConfig()
if err != nil {
return status.Annotate(err, status.ConfigError)
}
} else {
conf, err = dc.LoadUserOverrideConfigFile(args[0])
if err != nil {
return status.Wrapf(
err,
status.ConfigError,
"Loading configuration file %s failed",
args[0],
)
}
}
if err = mergeFlagOverrides(conf); err != nil {
return status.Wrap(
err,
status.ConfigError,
"Merging command flag overrides into Chef Automate config failed",
)
}
adminPassword := deployCmdFlags.adminPassword
if adminPassword == "" {
adminPassword, err = dc.GeneratePassword()
if err != nil {
return status.Wrap(err, status.ConfigError, "Generating the admin user password failed")
}
}
err = conf.AddCredentials("Local Administrator", "admin", adminPassword)
if err != nil {
return status.Wrap(err, status.ConfigError, "Applying the admin user password to configuration failed")
}
offlineMode := deployCmdFlags.airgap != ""
manifestPath := ""
if offlineMode {
writer.Title("Installing artifact")
metadata, err := airgap.Unpack(deployCmdFlags.airgap)
if err != nil {
return status.Annotate(err, status.AirgapUnpackInstallBundleError)
}
manifestPath = api.AirgapManifestPath
// We need to set the path for the hab binary so that the deployer does not
// try to go to the internet to get it
pathEnv := os.Getenv("PATH")
err = os.Setenv("PATH", fmt.Sprintf("%s:%s", path.Dir(metadata.HabBinPath), pathEnv))
if err != nil {
return err
}
} else {
manifestPath = conf.Deployment.GetV1().GetSvc().GetManifestDirectory().GetValue()
}
manifestProvider := manifest.NewLocalHartManifestProvider(
mc.NewDefaultClient(manifestPath),
conf.Deployment.GetV1().GetSvc().GetHartifactsPath().GetValue(),
conf.Deployment.GetV1().GetSvc().GetOverrideOrigin().GetValue())
err = client.Deploy(writer, conf, deployCmdFlags.skipPreflight, manifestProvider, version.BuildTime, offlineMode, deployCmdFlags.bootstrapBundlePath)
if err != nil && !status.IsStatusError(err) {
return status.Annotate(err, status.DeployError)
}
return err
}
func generatedConfig() (*dc.AutomateConfig, error) {
cfg, err := dc.GenerateInitConfig(
deployCmdFlags.channel,
deployCmdFlags.upgradeStrategy,
dc.InitialTLSCerts(deployCmdFlags.keyPath, deployCmdFlags.certPath),
dc.InitialFQDN(deployCmdFlags.fqdn),
)
if err != nil {
return nil, status.Wrap(err, status.ConfigError, "Generating initial default configuration failed")
}
return cfg.AutomateConfig(), nil
}
// mergeFlagOverrides merges the flag provided configuration options into the
// user override config. Because the override configuration will be persisted
// we only want to add overrides for flags that have been specifically set so
// that we don't accidentally set an override with a default value.
func mergeFlagOverrides(conf *dc.AutomateConfig) error {
overrideOpts := []dc.AutomateConfigOpt{}
if deployCmdFlags.manifestDir != "" {
overrideOpts = append(overrideOpts, dc.WithManifestDir(deployCmdFlags.manifestDir))
}
if deployCmdFlags.channel != "" {
overrideOpts = append(overrideOpts, dc.WithChannel(deployCmdFlags.channel))
}
if deployCmdFlags.hartifactsPath != "" {
overrideOpts = append(overrideOpts, dc.WithHartifacts(deployCmdFlags.hartifactsPath))
}
if deployCmdFlags.overrideOrigin != "" {
overrideOpts = append(overrideOpts, dc.WithOrigin(deployCmdFlags.overrideOrigin))
}
if deployCmdFlags.enableChefServer {
overrideOpts = append(overrideOpts, dc.WithChefServerEnabled(true))
}
if deployCmdFlags.enableDeploymentOrderStressMode {
overrideOpts = append(overrideOpts, dc.WithDeploymentOrderStressMode(true))
}
if deployCmdFlags.enableWorkflow {
overrideOpts = append(overrideOpts, dc.WithWorkflowEnabled(true))
}
if len(deployCmdFlags.products) > 0 {
overrideOpts = append(overrideOpts, dc.WithProducts(deployCmdFlags.products))
}
return dc.WithConfigOptions(conf, overrideOpts...)
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
tasks/dogstatsd.py
|
"""
Dogstatsd tasks
"""
from __future__ import print_function, absolute_import
import os
import shutil
from distutils.dir_util import copy_tree
import invoke
from invoke import task
from invoke.exceptions import Exit
from .build_tags import get_build_tags, get_default_build_tags
from .utils import get_build_flags, bin_name, get_root
from .utils import REPO_PATH
from .go import deps
# constants
DOGSTATSD_BIN_PATH = os.path.join(".", "bin", "dogstatsd")
STATIC_BIN_PATH = os.path.join(".", "bin", "static")
MAX_BINARY_SIZE = 15 * 1024
DOGSTATSD_TAG = "datadog/dogstatsd:master"
DEFAULT_BUILD_TAGS = [
"zlib",
"docker",
"kubelet",
]
@task
def build(ctx, rebuild=False, race=False, static=False, build_include=None,
build_exclude=None, use_embedded_libs=False):
"""
Build Dogstatsd
"""
build_include = DEFAULT_BUILD_TAGS if build_include is None else build_include.split(",")
build_exclude = [] if build_exclude is None else build_exclude.split(",")
build_tags = get_build_tags(build_include, build_exclude)
ldflags, gcflags, env = get_build_flags(ctx, static=static, use_embedded_libs=use_embedded_libs)
bin_path = DOGSTATSD_BIN_PATH
if static:
bin_path = STATIC_BIN_PATH
cmd = "go build {race_opt} {build_type} -tags '{build_tags}' -o {bin_name} "
cmd += "-gcflags=\"{gcflags}\" -ldflags=\"{ldflags}\" {REPO_PATH}/cmd/dogstatsd/"
args = {
"race_opt": "-race" if race else "",
"build_type": "-a" if rebuild else "",
"build_tags": " ".join(build_tags),
"bin_name": os.path.join(bin_path, bin_name("dogstatsd")),
"gcflags": gcflags,
"ldflags": ldflags,
"REPO_PATH": REPO_PATH,
}
ctx.run(cmd.format(**args), env=env)
# Render the configuration file template
#
# We need to remove cross compiling bits if any because go generate must
# build and execute in the native platform
env = {
"GOOS": "",
"GOARCH": "",
}
cmd = "go generate {}/cmd/dogstatsd"
ctx.run(cmd.format(REPO_PATH), env=env)
refresh_assets(ctx)
@task
def refresh_assets(ctx):
"""
Clean up and refresh Collector's assets and config files
"""
# ensure DOGSTATSD_BIN_PATH exists
if not os.path.exists(DOGSTATSD_BIN_PATH):
os.mkdir(DOGSTATSD_BIN_PATH)
dist_folder = os.path.join(DOGSTATSD_BIN_PATH, "dist")
if os.path.exists(dist_folder):
shutil.rmtree(dist_folder)
copy_tree("./cmd/dogstatsd/dist/", dist_folder)
@task
def run(ctx, rebuild=False, race=False, build_include=None, build_exclude=None,
skip_build=False):
"""
Run Dogstatsd binary. Build the binary before executing, unless
--skip-build was passed.
"""
if not skip_build:
print("Building dogstatsd...")
build(ctx, rebuild=rebuild, race=race, build_include=build_include,
build_exclude=build_exclude)
target = os.path.join(DOGSTATSD_BIN_PATH, bin_name("dogstatsd"))
ctx.run("{} start".format(target))
@task
def system_tests(ctx, skip_build=False):
"""
Run the system testsuite.
"""
if not skip_build:
print("Building dogstatsd...")
build(ctx)
env = {
"DOGSTATSD_BIN": os.path.join(get_root(), DOGSTATSD_BIN_PATH, bin_name("dogstatsd")),
}
cmd = "go test -tags '{build_tags}' -v {REPO_PATH}/test/system/dogstatsd/"
args = {
"build_tags": " ".join(get_default_build_tags()),
"REPO_PATH": REPO_PATH,
}
ctx.run(cmd.format(**args), env=env)
@task
def size_test(ctx, skip_build=False):
"""
Run the size test for the static binary
"""
if not skip_build:
print("Building dogstatsd...")
build(ctx, static=True)
bin_path = os.path.join(STATIC_BIN_PATH, bin_name("dogstatsd"))
stat_info = os.stat(bin_path)
size = stat_info.st_size / 1024
if size > MAX_BINARY_SIZE:
print("DogStatsD static build size too big: {} kB".format(size))
print("This means your PR added big classes or dependencies in the packages dogstatsd uses")
raise Exit(1)
print("DogStatsD static build size OK: {} kB".format(size))
@task
def omnibus_build(ctx, log_level="info", base_dir=None, gem_path=None,
skip_deps=False):
"""
Build the Dogstatsd packages with Omnibus Installer.
"""
if not skip_deps:
deps(ctx)
# omnibus config overrides
overrides = []
# base dir (can be overridden through env vars, command line takes precedence)
base_dir = base_dir or os.environ.get("DSD_OMNIBUS_BASE_DIR")
if base_dir:
overrides.append("base_dir:{}".format(base_dir))
overrides_cmd = ""
if overrides:
overrides_cmd = "--override=" + " ".join(overrides)
with ctx.cd("omnibus"):
cmd = "bundle install"
if gem_path:
cmd += " --path {}".format(gem_path)
ctx.run(cmd)
omnibus = "bundle exec omnibus.bat" if invoke.platform.WINDOWS else "bundle exec omnibus"
cmd = "{omnibus} build dogstatsd --log-level={log_level} {overrides}"
args = {
"omnibus": omnibus,
"log_level": log_level,
"overrides": overrides_cmd
}
ctx.run(cmd.format(**args))
@task
def integration_tests(ctx, install_deps=False, race=False, remote_docker=False):
"""
Run integration tests for dogstatsd
"""
if install_deps:
deps(ctx)
test_args = {
"go_build_tags": " ".join(get_default_build_tags()),
"race_opt": "-race" if race else "",
"exec_opts": "",
}
if remote_docker:
test_args["exec_opts"] = "-exec \"inv docker.dockerize-test\""
go_cmd = 'go test {race_opt} -tags "{go_build_tags}" {exec_opts}'.format(**test_args)
prefixes = [
"./test/integration/dogstatsd/...",
]
for prefix in prefixes:
ctx.run("{} {}".format(go_cmd, prefix))
@task
def image_build(ctx, skip_build=False):
"""
Build the docker image
"""
import docker
client = docker.from_env()
src = os.path.join(STATIC_BIN_PATH, bin_name("dogstatsd"))
dst = os.path.join("Dockerfiles", "dogstatsd", "alpine", "static")
if not skip_build:
build(ctx, rebuild=True, static=True)
if not os.path.exists(src):
raise Exit(1)
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copy(src, dst)
client.images.build(path="Dockerfiles/dogstatsd/alpine/", rm=True, tag=DOGSTATSD_TAG)
ctx.run("rm -rf Dockerfiles/dogstatsd/alpine/static")
@task
def clean(ctx):
"""
Remove temporary objects and binary artifacts
"""
# go clean
print("Executing go clean")
ctx.run("go clean")
# remove the bin/dogstatsd folder
print("Remove agent binary folder")
ctx.run("rm -rf ./bin/dogstatsd")
|
[] |
[] |
[
"DSD_OMNIBUS_BASE_DIR"
] |
[]
|
["DSD_OMNIBUS_BASE_DIR"]
|
python
| 1 | 0 | |
testing/framework/TestCmd.py
|
"""
A testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing
of executable commands and scripts (in any language, not just Python),
especially commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd
module manages and cleans up one or more temporary workspace
directories, and provides methods for creating files and directories in
those workspace directories from in-line data, here-documents), allowing
tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
import TestCmd
test = TestCmd.TestCmd()
There are a bunch of keyword arguments available at instantiation:
test = TestCmd.TestCmd(
description='string',
program='program_or_script_to_test',
interpreter='script_interpreter',
workdir='prefix',
subdir='subdir',
verbose=Boolean,
match=default_match_function,
match_stdout=default_match_stdout_function,
match_stderr=default_match_stderr_function,
diff=default_diff_stderr_function,
diff_stdout=default_diff_stdout_function,
diff_stderr=default_diff_stderr_function,
combine=Boolean,
)
There are a bunch of methods that let you do different things:
test.verbose_set(1)
test.description_set('string')
test.program_set('program_or_script_to_test')
test.interpreter_set('script_interpreter')
test.interpreter_set(['script_interpreter', 'arg'])
test.workdir_set('prefix')
test.workdir_set('')
test.workpath('file')
test.workpath('subdir', 'file')
test.subdir('subdir', ...)
test.rmdir('subdir', ...)
test.write('file', "contents\n")
test.write(['subdir', 'file'], "contents\n")
test.read('file')
test.read(['subdir', 'file'])
test.read('file', mode)
test.read(['subdir', 'file'], mode)
test.writable('dir', 1)
test.writable('dir', None)
test.preserve(condition, ...)
test.cleanup(condition)
test.command_args(
program='program_or_script_to_run',
interpreter='script_interpreter',
arguments='arguments to pass to program',
)
test.run(
program='program_or_script_to_run',
interpreter='script_interpreter',
arguments='arguments to pass to program',
chdir='directory_to_chdir_to',
stdin='input to feed to the program\n',
universal_newlines=True,
)
p = test.start(
program='program_or_script_to_run',
interpreter='script_interpreter',
arguments='arguments to pass to program',
universal_newlines=None,
)
test.finish(self, p)
test.pass_test()
test.pass_test(condition)
test.pass_test(condition, function)
test.fail_test()
test.fail_test(condition)
test.fail_test(condition, function)
test.fail_test(condition, function, skip)
test.fail_test(condition, function, skip, message)
test.no_result()
test.no_result(condition)
test.no_result(condition, function)
test.no_result(condition, function, skip)
test.stdout()
test.stdout(run)
test.stderr()
test.stderr(run)
test.symlink(target, link)
test.banner(string)
test.banner(string, width)
test.diff(actual, expected)
test.diff_stderr(actual, expected)
test.diff_stdout(actual, expected)
test.match(actual, expected)
test.match_stderr(actual, expected)
test.match_stdout(actual, expected)
test.set_match_function(match, stdout, stderr)
test.match_exact("actual 1\nactual 2\n", "expected 1\nexpected 2\n")
test.match_exact(["actual 1\n", "actual 2\n"],
["expected 1\n", "expected 2\n"])
test.match_caseinsensitive("Actual 1\nACTUAL 2\n", "expected 1\nEXPECTED 2\n")
test.match_re("actual 1\nactual 2\n", regex_string)
test.match_re(["actual 1\n", "actual 2\n"], list_of_regexes)
test.match_re_dotall("actual 1\nactual 2\n", regex_string)
test.match_re_dotall(["actual 1\n", "actual 2\n"], list_of_regexes)
test.tempdir()
test.tempdir('temporary-directory')
test.sleep()
test.sleep(seconds)
test.where_is('foo')
test.where_is('foo', 'PATH1:PATH2')
test.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
test.unlink('file')
test.unlink('subdir', 'file')
The TestCmd module provides pass_test(), fail_test(), and no_result()
unbound functions that report test results for use with the Aegis change
management system. These methods terminate the test immediately,
reporting PASSED, FAILED, or NO RESULT respectively, and exiting with
status 0 (success), 1 or 2 respectively. This allows for a distinction
between an actual failed test and a test that could not be properly
evaluated because of an external condition (such as a full file system
or incorrect permissions).
import TestCmd
TestCmd.pass_test()
TestCmd.pass_test(condition)
TestCmd.pass_test(condition, function)
TestCmd.fail_test()
TestCmd.fail_test(condition)
TestCmd.fail_test(condition, function)
TestCmd.fail_test(condition, function, skip)
TestCmd.fail_test(condition, function, skip, message)
TestCmd.no_result()
TestCmd.no_result(condition)
TestCmd.no_result(condition, function)
TestCmd.no_result(condition, function, skip)
The TestCmd module also provides unbound global functions that handle
matching in the same way as the match_*() methods described above.
import TestCmd
test = TestCmd.TestCmd(match=TestCmd.match_exact)
test = TestCmd.TestCmd(match=TestCmd.match_caseinsensitive)
test = TestCmd.TestCmd(match=TestCmd.match_re)
test = TestCmd.TestCmd(match=TestCmd.match_re_dotall)
These functions are also available as static methods:
import TestCmd
test = TestCmd.TestCmd(match=TestCmd.TestCmd.match_exact)
test = TestCmd.TestCmd(match=TestCmd.TestCmd.match_caseinsensitive)
test = TestCmd.TestCmd(match=TestCmd.TestCmd.match_re)
test = TestCmd.TestCmd(match=TestCmd.TestCmd.match_re_dotall)
These static methods can be accessed by a string naming the method:
import TestCmd
test = TestCmd.TestCmd(match='match_exact')
test = TestCmd.TestCmd(match='match_caseinsensitive')
test = TestCmd.TestCmd(match='match_re')
test = TestCmd.TestCmd(match='match_re_dotall')
The TestCmd module provides unbound global functions that can be used
for the "diff" argument to TestCmd.TestCmd instantiation:
import TestCmd
test = TestCmd.TestCmd(match=TestCmd.match_re, diff=TestCmd.diff_re)
test = TestCmd.TestCmd(diff=TestCmd.simple_diff)
test = TestCmd.TestCmd(diff=TestCmd.context_diff)
test = TestCmd.TestCmd(diff=TestCmd.unified_diff)
These functions are also available as static methods:
import TestCmd
test = TestCmd.TestCmd(match=TestCmd.TestCmd.match_re, diff=TestCmd.TestCmd.diff_re)
test = TestCmd.TestCmd(diff=TestCmd.TestCmd.simple_diff)
test = TestCmd.TestCmd(diff=TestCmd.TestCmd.context_diff)
test = TestCmd.TestCmd(diff=TestCmd.TestCmd.unified_diff)
These static methods can be accessed by a string naming the method:
import TestCmd
test = TestCmd.TestCmd(match='match_re', diff='diff_re')
test = TestCmd.TestCmd(diff='simple_diff')
test = TestCmd.TestCmd(diff='context_diff')
test = TestCmd.TestCmd(diff='unified_diff')
The "diff" argument can also be used with standard difflib functions:
import difflib
test = TestCmd.TestCmd(diff=difflib.context_diff)
test = TestCmd.TestCmd(diff=difflib.unified_diff)
Lastly, the where_is() method also exists in an unbound function
version.
import TestCmd
TestCmd.where_is('foo')
TestCmd.where_is('foo', 'PATH1:PATH2')
TestCmd.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
"""
# Copyright 2000-2010 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCmd.py 1.3.D001 2010/06/03 12:58:27 knight"
__version__ = "1.3"
import atexit
import difflib
import errno
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import threading
import time
import traceback
from collections import UserList, UserString
from subprocess import PIPE, STDOUT
IS_WINDOWS = sys.platform == 'win32'
IS_MACOS = sys.platform == 'darwin'
IS_64_BIT = sys.maxsize > 2**32
IS_PYPY = hasattr(sys, 'pypy_translation_info')
NEED_HELPER = os.environ.get('SCONS_NO_DIRECT_SCRIPT')
# sentinel for cases where None won't do
_Null = object()
__all__ = [
'diff_re',
'fail_test',
'no_result',
'pass_test',
'match_exact',
'match_caseinsensitive',
'match_re',
'match_re_dotall',
'python',
'_python_',
'TestCmd',
'to_bytes',
'to_str',
]
def is_List(e):
return isinstance(e, (list, UserList))
def to_bytes(s):
if isinstance(s, bytes):
return s
return bytes(s, 'utf-8')
def to_str(s):
if is_String(s):
return s
return str(s, 'utf-8')
def is_String(e):
return isinstance(e, (str, UserString))
testprefix = 'testcmd.'
if os.name in ('posix', 'nt'):
testprefix += "%s." % str(os.getpid())
re_space = re.compile(r'\s')
def _caller(tblist, skip):
string = ""
arr = []
for file, line, name, text in tblist:
if file[-10:] == "TestCmd.py":
break
arr = [(file, line, name, text)] + arr
atfrom = "at"
for file, line, name, text in arr[skip:]:
if name in ("?", "<module>"):
name = ""
else:
name = " (" + name + ")"
string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
atfrom = "\tfrom"
return string
def fail_test(self=None, condition=True, function=None, skip=0, message=None):
"""Causes a test to exit with a fail.
Reports that the test FAILED and exits with a status of 1, unless
a condition argument is supplied; if so the completion processing
takes place only if the condition is true.
Args:
self: a test class instance. Must be passed in explicitly
by the caller since this is an unbound method.
condition (optional): if false, return to let test continue.
function (optional): function to call before completion processing.
skip (optional): how many lines at the top of the traceback to skip.
message (optional): additional text to include in the fail message.
"""
if not condition:
return
if function is not None:
function()
of = ""
desc = ""
sep = " "
if self is not None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = _caller(traceback.extract_stack(), skip)
if message:
msg = "\t%s\n" % message
else:
msg = ""
sys.stderr.write("FAILED test" + of + desc + sep + at + msg)
sys.exit(1)
def no_result(self=None, condition=True, function=None, skip=0):
"""Causes a test to exit with a no result.
In testing parlance NO RESULT means the test could not be completed
for reasons that imply neither success nor failure - for example a
component needed to run the test could be found. However, at this
point we still have an "outcome", so record the information and exit
with a status code of 2, unless a condition argument is supplied;
if so the completion processing takes place only if the condition is true.
The different exit code and message allows other logic to distinguish
from a fail and decide how to treat NO RESULT tests.
Args:
self: a test class instance. Must be passed in explicitly
by the caller since this is an unbound method.
condition (optional): if false, return to let test continue.
function (optional): function to call before completion processing.
skip (optional): how many lines at the top of the traceback to skip.
"""
if not condition:
return
if function is not None:
function()
of = ""
desc = ""
sep = " "
if self is not None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = _caller(traceback.extract_stack(), skip)
sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
sys.exit(2)
def pass_test(self=None, condition=True, function=None):
"""Causes a test to exit with a pass.
Reports that the test PASSED and exits with a status of 0, unless
a condition argument is supplied; if so the completion processing
takes place only if the condition is true.
the test passes only if the condition is true.
Args:
self: a test class instance. Must be passed in explicitly
by the caller since this is an unbound method.
condition (optional): if false, return to let test continue.
function (optional): function to call before completion processing.
"""
if not condition:
return
if function is not None:
function()
sys.stderr.write("PASSED\n")
sys.exit(0)
def match_exact(lines=None, matches=None, newline=os.sep):
"""Match function using exact match.
:param lines: data lines
:type lines: str or list[str]
:param matches: expected lines to match
:type matches: str or list[str]
:param newline: line separator
:returns: None on failure, 1 on success.
"""
if isinstance(lines, bytes):
newline = to_bytes(newline)
if not is_List(lines):
lines = lines.split(newline)
if not is_List(matches):
matches = matches.split(newline)
if len(lines) != len(matches):
return None
for line, match in zip(lines, matches):
if line != match:
return None
return 1
def match_caseinsensitive(lines=None, matches=None):
"""Match function using case-insensitive matching.
Only a simplistic comparison is done, based on casefolding
the strings. This may still fail but is the suggestion of
the Unicode Standard.
:param lines: data lines
:type lines: str or list[str]
:param matches: expected lines to match
:type matches: str or list[str]
:returns: None on failure, 1 on success.
"""
if not is_List(lines):
lines = lines.split("\n")
if not is_List(matches):
matches = matches.split("\n")
if len(lines) != len(matches):
return None
for line, match in zip(lines, matches):
if line.casefold() != match.casefold():
return None
return 1
def match_re(lines=None, res=None):
"""Match function using line-by-line regular expression match.
:param lines: data lines
:type lines: str or list[str]
:param res: regular expression(s) for matching
:type res: str or list[str]
:returns: None on failure, 1 on success.
"""
if not is_List(lines):
# CRs mess up matching (Windows) so split carefully
lines = re.split('\r?\n', lines)
if not is_List(res):
res = res.split("\n")
if len(lines) != len(res):
print("match_re: expected %d lines, found %d" % (len(res), len(lines)))
return None
for i, (line, regex) in enumerate(zip(lines, res)):
s = r"^{}$".format(regex)
try:
expr = re.compile(s)
except re.error as e:
msg = "Regular expression error in %s: %s"
raise re.error(msg % (repr(s), e.args[0]))
if not expr.search(line):
miss_tmpl = "match_re: mismatch at line {}:\n search re='{}'\n line='{}'"
print(miss_tmpl.format(i, s, line))
return None
return 1
def match_re_dotall(lines=None, res=None):
"""Match function using regular expression match.
Unlike match_re, the arguments are converted to strings (if necessary)
and must match exactly.
:param lines: data lines
:type lines: str or list[str]
:param res: regular expression(s) for matching
:type res: str or list[str]
:returns: a match object on match, else None, like re.match
"""
if not isinstance(lines, str):
lines = "\n".join(lines)
if not isinstance(res, str):
res = "\n".join(res)
s = r"^{}$".format(res)
try:
expr = re.compile(s, re.DOTALL)
except re.error as e:
msg = "Regular expression error in %s: %s"
raise re.error(msg % (repr(s), e.args[0]))
return expr.match(lines)
def simple_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=0, lineterm=''):
r"""Compare two sequences of lines; generate the delta as a simple diff.
Similar to difflib.context_diff and difflib.unified_diff but
output is like from the "diff" command without arguments. The function
keeps the same signature as the difflib ones so they will be
interchangeable, but except for lineterm, the arguments beyond the
two sequences are ignored in this version. By default, the
diff is not created with trailing newlines, set the lineterm
argument to '\n' to do so.
Example:
>>> print(''.join(simple_diff('one\ntwo\nthree\nfour\n'.splitlines(True),
... 'zero\none\ntree\nfour\n'.splitlines(True), lineterm='\n')))
0a1
> zero
2,3c3
< two
< three
---
> tree
"""
a = [to_str(q) for q in a]
b = [to_str(q) for q in b]
sm = difflib.SequenceMatcher(None, a, b)
def comma(x1, x2):
return x1 + 1 == x2 and str(x2) or '%s,%s' % (x1 + 1, x2)
for op, a1, a2, b1, b2 in sm.get_opcodes():
if op == 'delete':
yield "{}d{}{}".format(comma(a1, a2), b1, lineterm)
for l in a[a1:a2]:
yield '< ' + l
elif op == 'insert':
yield "{}a{}{}".format(a1, comma(b1, b2), lineterm)
for l in b[b1:b2]:
yield '> ' + l
elif op == 'replace':
yield "{}c{}{}".format(comma(a1, a2), comma(b1, b2), lineterm)
for l in a[a1:a2]:
yield '< ' + l
yield '---{}'.format(lineterm)
for l in b[b1:b2]:
yield '> ' + l
def diff_re(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""Compare a and b (lists of strings) where a are regular expressions.
A simple "diff" of two sets of lines when the expected lines
are regular expressions. This is a really dumb thing that
just compares each line in turn, so it doesn't look for
chunks of matching lines and the like--but at least it lets
you know exactly which line first didn't compare correctl...
Raises:
re.error: if a regex fails to compile
"""
result = []
diff = len(a) - len(b)
if diff < 0:
a = a + [''] * (-diff)
elif diff > 0:
b = b + [''] * diff
for i, (aline, bline) in enumerate(zip(a, b)):
s = r"^{}$".format(aline)
try:
expr = re.compile(s)
except re.error as e:
msg = "Regular expression error in %s: %s"
raise re.error(msg % (repr(s), e.args[0]))
if not expr.search(bline):
result.append("%sc%s" % (i + 1, i + 1))
result.append('< ' + repr(a[i]))
result.append('---')
result.append('> ' + repr(b[i]))
return result
if os.name == 'posix':
def escape(arg):
"""escape shell special characters"""
slash = '\\'
special = '"$'
arg = arg.replace(slash, slash + slash)
for c in special:
arg = arg.replace(c, slash + c)
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
else:
# Windows does not allow special characters in file names
# anyway, so no need for an escape function, we will just quote
# the arg.
def escape(arg):
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
if os.name == 'java':
python = os.path.join(sys.prefix, 'jython')
else:
python = os.environ.get('python_executable', sys.executable)
_python_ = escape(python)
if sys.platform == 'win32':
default_sleep_seconds = 2
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = path.split(os.pathsep)
if pathext is None:
pathext = os.environ['PATHEXT']
if is_String(pathext):
pathext = pathext.split(os.pathsep)
for ext in pathext:
if ext.casefold() == file[-len(ext):].casefold():
pathext = ['']
break
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
else:
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = path.split(os.pathsep)
for dir in path:
f = os.path.join(dir, file)
if os.path.isfile(f):
try:
st = os.stat(f)
except OSError:
continue
if stat.S_IMODE(st[stat.ST_MODE]) & 0o111:
return f
return None
default_sleep_seconds = 1
# From Josiah Carlson,
# ASPN : Python Cookbook : Module to allow Asynchronous subprocess use on Windows and Posix platforms
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440554
if sys.platform == 'win32': # and subprocess.mswindows:
try:
from win32file import ReadFile, WriteFile
from win32pipe import PeekNamedPipe
except ImportError:
# If PyWin32 is not available, try ctypes instead
# XXX These replicate _just_enough_ PyWin32 behaviour for our purposes
import ctypes
from ctypes.wintypes import DWORD
def ReadFile(hFile, bufSize, ol=None):
assert ol is None
lpBuffer = ctypes.create_string_buffer(bufSize)
bytesRead = DWORD()
bErr = ctypes.windll.kernel32.ReadFile(
hFile, lpBuffer, bufSize, ctypes.byref(bytesRead), ol)
if not bErr:
raise ctypes.WinError()
return (0, ctypes.string_at(lpBuffer, bytesRead.value))
def WriteFile(hFile, data, ol=None):
assert ol is None
bytesWritten = DWORD()
bErr = ctypes.windll.kernel32.WriteFile(
hFile, data, len(data), ctypes.byref(bytesWritten), ol)
if not bErr:
raise ctypes.WinError()
return (0, bytesWritten.value)
def PeekNamedPipe(hPipe, size):
assert size == 0
bytesAvail = DWORD()
bErr = ctypes.windll.kernel32.PeekNamedPipe(
hPipe, None, size, None, ctypes.byref(bytesAvail), None)
if not bErr:
raise ctypes.WinError()
return ("", bytesAvail.value, None)
import msvcrt
else:
import select
import fcntl
try:
fcntl.F_GETFL
except AttributeError:
fcntl.F_GETFL = 3
try:
fcntl.F_SETFL
except AttributeError:
fcntl.F_SETFL = 4
class Popen(subprocess.Popen):
def recv(self, maxsize=None):
return self._recv('stdout', maxsize)
def recv_err(self, maxsize=None):
return self._recv('stderr', maxsize)
def send_recv(self, input='', maxsize=None):
return self.send(input), self.recv(maxsize), self.recv_err(maxsize)
def get_conn_maxsize(self, which, maxsize):
if maxsize is None:
maxsize = 1024
elif maxsize < 1:
maxsize = 1
return getattr(self, which), maxsize
def _close(self, which):
getattr(self, which).close()
setattr(self, which, None)
if sys.platform == 'win32': # and subprocess.mswindows:
def send(self, input):
input = to_bytes(input)
if not self.stdin:
return None
try:
x = msvcrt.get_osfhandle(self.stdin.fileno())
(errCode, written) = WriteFile(x, input)
except ValueError:
return self._close('stdin')
except (subprocess.pywintypes.error, Exception) as why:
if why.args[0] in (109, errno.ESHUTDOWN):
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
x = msvcrt.get_osfhandle(conn.fileno())
(read, nAvail, nMessage) = PeekNamedPipe(x, 0)
if maxsize < nAvail:
nAvail = maxsize
if nAvail > 0:
(errCode, read) = ReadFile(x, nAvail, None)
except ValueError:
return self._close(which)
except (subprocess.pywintypes.error, Exception) as why:
if why.args[0] in (109, errno.ESHUTDOWN):
return self._close(which)
raise
# if self.universal_newlines:
# read = self._translate_newlines(read)
return read
else:
def send(self, input):
if not self.stdin:
return None
if not select.select([], [self.stdin], [], 0)[1]:
return 0
try:
written = os.write(self.stdin.fileno(),
bytearray(input, 'utf-8'))
except OSError as why:
if why.args[0] == errno.EPIPE: # broken pipe
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
flags = fcntl.fcntl(conn, fcntl.F_GETFL)
except TypeError:
flags = None
else:
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags | os.O_NONBLOCK)
try:
if not select.select([conn], [], [], 0)[0]:
return ''
r = conn.read(maxsize)
if not r:
return self._close(which)
# if self.universal_newlines:
# r = self._translate_newlines(r)
return r
finally:
if not conn.closed and flags is not None:
fcntl.fcntl(conn, fcntl.F_SETFL, flags)
disconnect_message = "Other end disconnected!"
def recv_some(p, t=.1, e=1, tr=5, stderr=0):
if tr < 1:
tr = 1
x = time.time() + t
y = []
r = ''
pr = p.recv
if stderr:
pr = p.recv_err
while time.time() < x or r:
r = pr()
if r is None:
if e:
raise Exception(disconnect_message)
else:
break
elif r:
y.append(r)
else:
time.sleep(max((x - time.time()) / tr, 0))
return ''.join(y)
def send_all(p, data):
while len(data):
sent = p.send(data)
if sent is None:
raise Exception(disconnect_message)
data = memoryview(data)[sent:]
_Cleanup = []
@atexit.register
def _clean():
global _Cleanup
cleanlist = [c for c in _Cleanup if c]
del _Cleanup[:]
cleanlist.reverse()
for test in cleanlist:
test.cleanup()
class TestCmd:
"""Class TestCmd
"""
def __init__(
self,
description=None,
program=None,
interpreter=None,
workdir=None,
subdir=None,
verbose=None,
match=None,
match_stdout=None,
match_stderr=None,
diff=None,
diff_stdout=None,
diff_stderr=None,
combine=0,
universal_newlines=True,
timeout=None,
):
self.external = os.environ.get('SCONS_EXTERNAL_TEST', 0)
self._cwd = os.getcwd()
self.description_set(description)
self.program_set(program)
self.interpreter_set(interpreter)
if verbose is None:
try:
verbose = max(0, int(os.environ.get('TESTCMD_VERBOSE', 0)))
except ValueError:
verbose = 0
self.verbose_set(verbose)
self.combine = combine
self.universal_newlines = universal_newlines
self.process = None
self.set_timeout(timeout)
self.set_match_function(match, match_stdout, match_stderr)
self.set_diff_function(diff, diff_stdout, diff_stderr)
self._dirlist = []
self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
preserve_value = os.environ.get('PRESERVE', False)
if preserve_value not in [0, '0', 'False']:
self._preserve['pass_test'] = os.environ['PRESERVE']
self._preserve['fail_test'] = os.environ['PRESERVE']
self._preserve['no_result'] = os.environ['PRESERVE']
else:
try:
self._preserve['pass_test'] = os.environ['PRESERVE_PASS']
except KeyError:
pass
try:
self._preserve['fail_test'] = os.environ['PRESERVE_FAIL']
except KeyError:
pass
try:
self._preserve['no_result'] = os.environ['PRESERVE_NO_RESULT']
except KeyError:
pass
self._stdout = []
self._stderr = []
self.status = None
self.condition = 'no_result'
self.workdir_set(workdir)
self.subdir(subdir)
try:
self.fixture_dirs = (os.environ['FIXTURE_DIRS']).split(os.pathsep)
except KeyError:
self.fixture_dirs = []
def __del__(self):
self.cleanup()
def __repr__(self):
return "%x" % id(self)
banner_char = '='
banner_width = 80
def banner(self, s, width=None):
if width is None:
width = self.banner_width
return s + self.banner_char * (width - len(s))
escape = staticmethod(escape)
def canonicalize(self, path):
if is_List(path):
path = os.path.join(*path)
if not os.path.isabs(path):
path = os.path.join(self.workdir, path)
return path
def chmod(self, path, mode):
"""Changes permissions on the specified file or directory."""
path = self.canonicalize(path)
os.chmod(path, mode)
def cleanup(self, condition=None):
"""Removes any temporary working directories.
Cleans the TestCmd instance. If the environment variable PRESERVE was
set when the TestCmd environment was created, temporary working
directories are not removed. If any of the environment variables
PRESERVE_PASS, PRESERVE_FAIL, or PRESERVE_NO_RESULT were set
when the TestCmd environment was created, then temporary working
directories are not removed if the test passed, failed, or had
no result, respectively. Temporary working directories are also
preserved for conditions specified via the preserve method.
Typically, this method is not called directly, but is used when
the script exits to clean up temporary working directories as
appropriate for the exit status.
"""
if not self._dirlist:
return
os.chdir(self._cwd)
self.workdir = None
if condition is None:
condition = self.condition
if self._preserve[condition]:
for dir in self._dirlist:
print("Preserved directory " + dir)
else:
list = self._dirlist[:]
list.reverse()
for dir in list:
self.writable(dir, 1)
shutil.rmtree(dir, ignore_errors=1)
self._dirlist = []
global _Cleanup
if self in _Cleanup:
_Cleanup.remove(self)
def command_args(self, program=None, interpreter=None, arguments=None):
if not self.external:
if program:
if isinstance(program, str) and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
else:
program = self.program
if not interpreter:
interpreter = self.interpreter
else:
if not program:
program = self.program
if not interpreter:
interpreter = self.interpreter
if not isinstance(program, (list, tuple)):
program = [program]
cmd = list(program)
if interpreter:
if not isinstance(interpreter, (list, tuple)):
interpreter = [interpreter]
cmd = list(interpreter) + cmd
if arguments:
if isinstance(arguments, str):
arguments = arguments.split()
cmd.extend(arguments)
return cmd
def description_set(self, description):
"""Set the description of the functionality being tested. """
self.description = description
def set_diff_function(self, diff=_Null, stdout=_Null, stderr=_Null):
"""Sets the specified diff functions."""
if diff is not _Null:
self._diff_function = diff
if stdout is not _Null:
self._diff_stdout_function = stdout
if stderr is not _Null:
self._diff_stderr_function = stderr
def diff(self, a, b, name=None, diff_function=None, *args, **kw):
if diff_function is None:
try:
diff_function = getattr(self, self._diff_function)
except TypeError:
diff_function = self._diff_function
if diff_function is None:
diff_function = self.simple_diff
if name is not None:
print(self.banner(name))
if not is_List(a):
a=a.splitlines()
if not is_List(b):
b=b.splitlines()
args = (a, b) + args
for line in diff_function(*args, **kw):
print(line)
def diff_stderr(self, a, b, *args, **kw):
"""Compare actual and expected file contents."""
try:
diff_stderr_function = getattr(self, self._diff_stderr_function)
except TypeError:
diff_stderr_function = self._diff_stderr_function
return self.diff(a, b, diff_function=diff_stderr_function, *args, **kw)
def diff_stdout(self, a, b, *args, **kw):
"""Compare actual and expected file contents."""
try:
diff_stdout_function = getattr(self, self._diff_stdout_function)
except TypeError:
diff_stdout_function = self._diff_stdout_function
return self.diff(a, b, diff_function=diff_stdout_function, *args, **kw)
simple_diff = staticmethod(simple_diff)
diff_re = staticmethod(diff_re)
context_diff = staticmethod(difflib.context_diff)
unified_diff = staticmethod(difflib.unified_diff)
def fail_test(self, condition=True, function=None, skip=0, message=None):
"""Cause the test to fail."""
if not condition:
return
self.condition = 'fail_test'
fail_test(self=self,
condition=condition,
function=function,
skip=skip,
message=message)
def interpreter_set(self, interpreter):
"""Set the program to be used to interpret the program
under test as a script.
"""
self.interpreter = interpreter
def set_match_function(self, match=_Null, stdout=_Null, stderr=_Null):
"""Sets the specified match functions. """
if match is not _Null:
self._match_function = match
if stdout is not _Null:
self._match_stdout_function = stdout
if stderr is not _Null:
self._match_stderr_function = stderr
def match(self, lines, matches):
"""Compare actual and expected file contents."""
try:
match_function = getattr(self, self._match_function)
except TypeError:
match_function = self._match_function
if match_function is None:
# Default is regular expression matches.
match_function = self.match_re
return match_function(lines, matches)
def match_stderr(self, lines, matches):
"""Compare actual and expected file contents."""
try:
match_stderr_function = getattr(self, self._match_stderr_function)
except TypeError:
match_stderr_function = self._match_stderr_function
if match_stderr_function is None:
# Default is to use whatever match= is set to.
match_stderr_function = self.match
return match_stderr_function(lines, matches)
def match_stdout(self, lines, matches):
"""Compare actual and expected file contents."""
try:
match_stdout_function = getattr(self, self._match_stdout_function)
except TypeError:
match_stdout_function = self._match_stdout_function
if match_stdout_function is None:
# Default is to use whatever match= is set to.
match_stdout_function = self.match
return match_stdout_function(lines, matches)
match_exact = staticmethod(match_exact)
match_caseinsensitive = staticmethod(match_caseinsensitive)
match_re = staticmethod(match_re)
match_re_dotall = staticmethod(match_re_dotall)
def no_result(self, condition=True, function=None, skip=0):
"""Report that the test could not be run."""
if not condition:
return
self.condition = 'no_result'
no_result(self=self,
condition=condition,
function=function,
skip=skip)
def pass_test(self, condition=True, function=None):
"""Cause the test to pass."""
if not condition:
return
self.condition = 'pass_test'
pass_test(self=self, condition=condition, function=function)
def preserve(self, *conditions):
"""Preserves temporary working directories.
Arrange for the temporary working directories for the
specified TestCmd environment to be preserved for one or more
conditions. If no conditions are specified, arranges for
the temporary working directories to be preserved for all
conditions.
"""
if not conditions:
conditions = ('pass_test', 'fail_test', 'no_result')
for cond in conditions:
self._preserve[cond] = 1
def program_set(self, program):
"""Sets the executable program or script to be tested."""
if not self.external:
if program and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
self.program = program
def read(self, file, mode='rb', newline=None):
"""Reads and returns the contents of the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name. The I/O mode for the file may
be specified; it must begin with an 'r'. The default is
'rb' (binary read).
"""
file = self.canonicalize(file)
if mode[0] != 'r':
raise ValueError("mode must begin with 'r'")
if 'b' not in mode:
with open(file, mode, newline=newline) as f:
return f.read()
else:
with open(file, mode) as f:
return f.read()
def rmdir(self, dir):
"""Removes the specified dir name.
The dir name may be a list, in which case the elements are
concatenated with the os.path.join() method. The dir is
assumed to be under the temporary working directory unless it
is an absolute path name.
The dir must be empty.
"""
dir = self.canonicalize(dir)
os.rmdir(dir)
def _timeout(self):
self.process.terminate()
self.timer.cancel()
self.timer = None
def set_timeout(self, timeout):
self.timeout = timeout
self.timer = None
def parse_path(self, path, suppress_current=False):
"""Return a list with the single path components of path."""
head, tail = os.path.split(path)
result = []
if not tail:
if head == path:
return [head]
else:
result.append(tail)
head, tail = os.path.split(head)
while head and tail:
result.append(tail)
head, tail = os.path.split(head)
result.append(head or tail)
result.reverse()
return result
def dir_fixture(self, srcdir, dstdir=None):
""" Copies the contents of the fixture directory to the test directory.
If srcdir is an absolute path, it is tried directly, else
the fixture_dirs are searched in order to find the named fixture
directory. To tightly control the search order, the harness may
be called with FIXTURE_DIRS set including the test source directory
in the desired position, else it will be tried last.
If dstdir not an absolute path, it is taken as a destination under
the working dir (if omitted of the default None indicates '.',
aka the test dir). dstdir is created automatically if needed.
srcdir or dstdir may be a list, in which case the elements are first
joined into a pathname.
"""
if is_List(srcdir):
srcdir = os.path.join(*srcdir)
spath = srcdir
if srcdir and self.fixture_dirs and not os.path.isabs(srcdir):
for dir in self.fixture_dirs:
spath = os.path.join(dir, srcdir)
if os.path.isdir(spath):
break
else:
spath = srcdir
if not dstdir or dstdir == '.':
dstdir = self.workdir
else:
if is_List(dstdir):
dstdir = os.path.join(*dstdir)
if os.path.isabs(dstdir):
os.makedirs(dstdir, exist_ok=True)
else:
dstlist = self.parse_path(dstdir)
if dstlist and dstlist[0] == ".":
dstdir = os.path.join(dstlist[1:])
self.subdir(dstdir)
for entry in os.listdir(spath):
epath = os.path.join(spath, entry)
dpath = os.path.join(dstdir, entry)
if os.path.isdir(epath):
# Copy the subfolder
shutil.copytree(epath, dpath)
else:
shutil.copy(epath, dpath)
def file_fixture(self, srcfile, dstfile=None):
""" Copies a fixture file to the test directory, optionally renaming.
If srcfile is an absolute path, it is tried directly, else
the fixture_dirs are searched in order to find the named fixture
file. To tightly control the search order, the harness may
be called with FIXTURE_DIRS also including the test source directory
in the desired place, it will otherwise be tried last.
dstfile is the name to give the copied file; if the argument
is omitted the basename of srcfile is used. If dstfile is not
an absolute path name. Any directory components of dstfile are
created automatically if needed.
srcfile or dstfile may be a list, in which case the elements are first
joined into a pathname.
"""
if is_List(srcfile):
srcfile = os.path.join(*srcfile)
srcpath, srctail = os.path.split(srcfile)
spath = srcfile
if srcfile and self.fixture_dirs and not os.path.isabs(srcfile):
for dir in self.fixture_dirs:
spath = os.path.join(dir, srcfile)
if os.path.isfile(spath):
break
else:
spath = srcfile
if not dstfile:
if srctail:
dpath = os.path.join(self.workdir, srctail)
else:
return
else:
dstdir, dsttail = os.path.split(dstfile)
if dstdir:
# if dstfile has a dir part, and is not abspath, create
if os.path.isabs(dstdir):
os.makedirs(dstdir, exist_ok=True)
dpath = dstfile
else:
dstlist = self.parse_path(dstdir)
if dstlist and dstlist[0] == ".":
# strip leading ./ if present
dstdir = os.path.join(dstlist[1:])
self.subdir(dstdir)
dpath = os.path.join(self.workdir, dstfile)
else:
dpath = os.path.join(self.workdir, dstfile)
shutil.copy(spath, dpath)
def start(self, program=None,
interpreter=None,
arguments=None,
universal_newlines=None,
timeout=_Null,
**kw):
""" Starts a program or script for the test environment.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
cmd = self.command_args(program, interpreter, arguments)
if self.verbose:
cmd_string = ' '.join([self.escape(c) for c in cmd])
sys.stderr.write(cmd_string + "\n")
if universal_newlines is None:
universal_newlines = self.universal_newlines
# On Windows, if we make stdin a pipe when we plan to send
# no input, and the test program exits before
# Popen calls msvcrt.open_osfhandle, that call will fail.
# So don't use a pipe for stdin if we don't need one.
stdin = kw.get('stdin', None)
if stdin is not None:
stdin = PIPE
combine = kw.get('combine', self.combine)
if combine:
stderr_value = STDOUT
else:
stderr_value = PIPE
if timeout is _Null:
timeout = self.timeout
if timeout:
self.timer = threading.Timer(float(timeout), self._timeout)
self.timer.start()
if sys.platform == 'win32':
# Set this otherwist stdout/stderr pipes default to
# windows default locale cp1252 which will throw exception
# if using non-ascii characters.
# For example test/Install/non-ascii-name.py
os.environ['PYTHONIOENCODING'] = 'utf-8'
# It seems that all pythons up to py3.6 still set text mode if you set encoding.
# TODO: File enhancement request on python to propagate universal_newlines even
# if encoding is set.hg c
p = Popen(cmd,
stdin=stdin,
stdout=PIPE,
stderr=stderr_value,
env=os.environ,
universal_newlines=False)
self.process = p
return p
@staticmethod
def fix_binary_stream(stream):
"""Handle stream from popen when we specify not universal_newlines
This will read from the pipes in binary mode, will not decode the
output, and will not convert line endings to \n.
We do this because in py3 (3.5) with universal_newlines=True, it will
choose the default system locale to decode the output, and this breaks unicode
output. Specifically test/option--tree.py which outputs a unicode char.
py 3.6 allows us to pass an encoding param to popen thus not requiring the decode
nor end of line handling, because we propagate universal_newlines as specified.
TODO: Do we need to pass universal newlines into this function?
"""
if not stream:
return stream
# It seems that py3.6 still sets text mode if you set encoding.
stream = stream.decode('utf-8', errors='replace')
return stream.replace('\r\n', '\n')
def finish(self, popen=None, **kw):
""" Finishes and waits for the process.
Process being run under control of the specified popen argument
is waited for, recording the exit status, output and error output.
"""
if popen is None:
popen = self.process
stdout, stderr = popen.communicate()
stdout = self.fix_binary_stream(stdout)
stderr = self.fix_binary_stream(stderr)
if self.timer:
self.timer.cancel()
self.timer = None
self.status = popen.returncode
self.process = None
self._stdout.append(stdout or '')
self._stderr.append(stderr or '')
def run(self, program=None,
interpreter=None,
arguments=None,
chdir=None,
stdin=None,
universal_newlines=None,
timeout=_Null):
"""Runs a test of the program or script for the test environment.
Output and error output are saved for future retrieval via
the stdout() and stderr() methods.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
if self.external:
if not program:
program = self.program
if not interpreter:
interpreter = self.interpreter
if universal_newlines is None:
universal_newlines = self.universal_newlines
if chdir:
oldcwd = os.getcwd()
if not os.path.isabs(chdir):
chdir = os.path.join(self.workpath(chdir))
if self.verbose:
sys.stderr.write("chdir(" + chdir + ")\n")
os.chdir(chdir)
p = self.start(program=program,
interpreter=interpreter,
arguments=arguments,
universal_newlines=universal_newlines,
timeout=timeout,
stdin=stdin)
if is_List(stdin):
stdin = ''.join(stdin)
if stdin:
stdin = to_bytes(stdin)
# TODO(sgk): figure out how to re-use the logic in the .finish()
# method above. Just calling it from here causes problems with
# subclasses that redefine .finish(). We could abstract this
# into Yet Another common method called both here and by .finish(),
# but that seems ill-thought-out.
stdout, stderr = p.communicate(input=stdin)
if self.timer:
self.timer.cancel()
self.timer = None
self.status = p.returncode
self.process = None
stdout = self.fix_binary_stream(stdout)
stderr = self.fix_binary_stream(stderr)
self._stdout.append(stdout or '')
self._stderr.append(stderr or '')
if chdir:
os.chdir(oldcwd)
if self.verbose >= 2:
write = sys.stdout.write
write('============ STATUS: %d\n' % self.status)
out = self.stdout()
if out or self.verbose >= 3:
write('============ BEGIN STDOUT (len=%d):\n' % len(out))
write(out)
write('============ END STDOUT\n')
err = self.stderr()
if err or self.verbose >= 3:
write('============ BEGIN STDERR (len=%d)\n' % len(err))
write(err)
write('============ END STDERR\n')
def sleep(self, seconds=default_sleep_seconds):
"""Sleeps at least the specified number of seconds.
If no number is specified, sleeps at least the minimum number of
seconds necessary to advance file time stamps on the current
system. Sleeping more seconds is all right.
"""
time.sleep(seconds)
def stderr(self, run=None):
"""Returns the error output from the specified run number.
If there is no specified run number, then returns the error
output of the last run. If the run number is less than zero,
then returns the error output from that many runs back from the
current run.
"""
if not run:
run = len(self._stderr)
elif run < 0:
run = len(self._stderr) + run
run = run - 1
return self._stderr[run]
def stdout(self, run=None):
"""Returns the stored standard output from a given run.
Args:
run: run number to select. If run number is omitted,
return the standard output of the most recent run.
If negative, use as a relative offset, so that -2
means the run two prior to the most recent.
Returns:
selected stdout string or None if there are no
stored runs.
"""
if not run:
run = len(self._stdout)
elif run < 0:
run = len(self._stdout) + run
run = run - 1
try:
return self._stdout[run]
except IndexError:
return None
def subdir(self, *subdirs):
"""Creates new subdirectories under the temporary working directory.
Creates a subdir for each argument. An argument may be a list,
in which case the list elements are joined into a path.
Returns the number of directories created, not including
intermediate directories, for historical reasons. A directory
which already existed is counted as "created".
"""
count = 0
for sub in subdirs:
if sub is None:
continue
if is_List(sub):
sub = os.path.join(*sub)
new = os.path.join(self.workdir, sub)
try:
# okay to exist, we just do this for counting
os.makedirs(new, exist_ok=True)
count = count + 1
except OSError as e:
pass
return count
def symlink(self, target, link):
"""Creates a symlink to the specified target.
The link name may be a list, in which case the elements are
concatenated with the os.path.join() method. The link is
assumed to be under the temporary working directory unless it
is an absolute path name. The target is *not* assumed to be
under the temporary working directory.
"""
if sys.platform == 'win32':
# Skip this on windows as we're not enabling it due to
# it requiring user permissions which aren't always present
# and we don't have a good way to detect those permissions yet.
return
link = self.canonicalize(link)
try:
os.symlink(target, link)
except AttributeError:
pass # Windows has no symlink
def tempdir(self, path=None):
"""Creates a temporary directory.
A unique directory name is generated if no path name is specified.
The directory is created, and will be removed when the TestCmd
object is destroyed.
"""
if path is None:
try:
path = tempfile.mkdtemp(prefix=testprefix)
except TypeError:
path = tempfile.mkdtemp()
else:
os.mkdir(path)
# Symlinks in the path will report things
# differently from os.getcwd(), so chdir there
# and back to fetch the canonical path.
cwd = os.getcwd()
try:
os.chdir(path)
path = os.getcwd()
finally:
os.chdir(cwd)
# Uppercase the drive letter since the case of drive
# letters is pretty much random on win32:
drive, rest = os.path.splitdrive(path)
if drive:
path = drive.upper() + rest
#
self._dirlist.append(path)
global _Cleanup
if self not in _Cleanup:
_Cleanup.append(self)
return path
def touch(self, path, mtime=None):
"""Updates the modification time on the specified file or directory.
The default is to update to the
current time if no explicit modification time is specified.
"""
path = self.canonicalize(path)
atime = os.path.getatime(path)
if mtime is None:
mtime = time.time()
os.utime(path, (atime, mtime))
def unlink(self, file):
"""Unlinks the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name.
"""
file = self.canonicalize(file)
os.unlink(file)
def verbose_set(self, verbose):
"""Sets the verbose level."""
self.verbose = verbose
def where_is(self, file, path=None, pathext=None):
"""Finds an executable file."""
if is_List(file):
file = os.path.join(*file)
if not os.path.isabs(file):
file = where_is(file, path, pathext)
return file
def workdir_set(self, path):
"""Creates a temporary working directory with the specified path name.
If the path is a null string (''), a unique directory name is created.
"""
if path is not None:
if path == '':
path = None
path = self.tempdir(path)
self.workdir = path
def workpath(self, *args):
"""Returns the absolute path name to a subdirectory or file within the current temporary working directory.
Concatenates the temporary working directory name with the specified
arguments using the os.path.join() method.
"""
return os.path.join(self.workdir, *args)
def readable(self, top, read=True):
"""Makes the specified directory tree readable or unreadable.
Tree is made readable if `read` evaluates True (the default),
else it is made not readable.
This method has no effect on Windows systems, which use a
completely different mechanism to control file readability.
"""
if sys.platform == 'win32':
return
if read:
def do_chmod(fname):
try:
st = os.stat(fname)
except OSError:
pass
else:
os.chmod(fname, stat.S_IMODE(
st[stat.ST_MODE] | stat.S_IREAD))
else:
def do_chmod(fname):
try:
st = os.stat(fname)
except OSError:
pass
else:
os.chmod(fname, stat.S_IMODE(
st[stat.ST_MODE] & ~stat.S_IREAD))
if os.path.isfile(top):
# If it's a file, that's easy, just chmod it.
do_chmod(top)
elif read:
# It's a directory and we're trying to turn on read
# permission, so it's also pretty easy, just chmod the
# directory and then chmod every entry on our walk down the
# tree.
do_chmod(top)
for dirpath, dirnames, filenames in os.walk(top):
for name in dirnames + filenames:
do_chmod(os.path.join(dirpath, name))
else:
# It's a directory and we're trying to turn off read
# permission, which means we have to chmod the directories
# in the tree bottom-up, lest disabling read permission from
# the top down get in the way of being able to get at lower
# parts of the tree.
for dirpath, dirnames, filenames in os.walk(top, topdown=0):
for name in dirnames + filenames:
do_chmod(os.path.join(dirpath, name))
do_chmod(top)
def writable(self, top, write=True):
"""Make the specified directory tree writable or unwritable.
Tree is made writable if `write` evaluates True (the default),
else it is made not writable.
"""
if sys.platform == 'win32':
if write:
def do_chmod(fname):
try:
os.chmod(fname, stat.S_IWRITE)
except OSError:
pass
else:
def do_chmod(fname):
try:
os.chmod(fname, stat.S_IREAD)
except OSError:
pass
else:
if write:
def do_chmod(fname):
try:
st = os.stat(fname)
except OSError:
pass
else:
os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE] | 0o200))
else:
def do_chmod(fname):
try:
st = os.stat(fname)
except OSError:
pass
else:
os.chmod(fname, stat.S_IMODE(
st[stat.ST_MODE] & ~0o200))
if os.path.isfile(top):
do_chmod(top)
else:
do_chmod(top)
for dirpath, dirnames, filenames in os.walk(top, topdown=0):
for name in dirnames + filenames:
do_chmod(os.path.join(dirpath, name))
def executable(self, top, execute=True):
"""Make the specified directory tree executable or not executable.
Tree is made executable if `execute` evaluates True (the default),
else it is made not executable.
This method has no effect on Windows systems, which use a
completely different mechanism to control file executability.
"""
if sys.platform == 'win32':
return
if execute:
def do_chmod(fname):
try:
st = os.stat(fname)
except OSError:
pass
else:
os.chmod(fname, stat.S_IMODE(
st[stat.ST_MODE] | stat.S_IEXEC))
else:
def do_chmod(fname):
try:
st = os.stat(fname)
except OSError:
pass
else:
os.chmod(fname, stat.S_IMODE(
st[stat.ST_MODE] & ~stat.S_IEXEC))
if os.path.isfile(top):
# If it's a file, that's easy, just chmod it.
do_chmod(top)
elif execute:
# It's a directory and we're trying to turn on execute
# permission, so it's also pretty easy, just chmod the
# directory and then chmod every entry on our walk down the
# tree.
do_chmod(top)
for dirpath, dirnames, filenames in os.walk(top):
for name in dirnames + filenames:
do_chmod(os.path.join(dirpath, name))
else:
# It's a directory and we're trying to turn off execute
# permission, which means we have to chmod the directories
# in the tree bottom-up, lest disabling execute permission from
# the top down get in the way of being able to get at lower
# parts of the tree.
for dirpath, dirnames, filenames in os.walk(top, topdown=0):
for name in dirnames + filenames:
do_chmod(os.path.join(dirpath, name))
do_chmod(top)
def write(self, file, content, mode='wb'):
"""Writes data to file.
The file is created under the temporary working directory.
Any subdirectories in the path must already exist. The
write is converted to the required type rather than failing
if there is a str/bytes mistmatch.
:param file: name of file to write to. If a list, treated
as components of a path and concatenated into a path.
:type file: str or list(str)
:param content: data to write.
:type content: str or bytes
:param mode: file mode, default is binary.
:type mode: str
"""
file = self.canonicalize(file)
if mode[0] != 'w':
raise ValueError("mode must begin with 'w'")
with open(file, mode) as f:
try:
f.write(content)
except TypeError as e:
f.write(bytes(content, 'utf-8'))
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
[] |
[] |
[
"PRESERVE_PASS",
"PRESERVE_FAIL",
"PRESERVE",
"SCONS_NO_DIRECT_SCRIPT",
"PYTHONIOENCODING",
"PATHEXT",
"python_executable",
"SCONS_EXTERNAL_TEST",
"FIXTURE_DIRS",
"PRESERVE_NO_RESULT",
"TESTCMD_VERBOSE",
"PATH"
] |
[]
|
["PRESERVE_PASS", "PRESERVE_FAIL", "PRESERVE", "SCONS_NO_DIRECT_SCRIPT", "PYTHONIOENCODING", "PATHEXT", "python_executable", "SCONS_EXTERNAL_TEST", "FIXTURE_DIRS", "PRESERVE_NO_RESULT", "TESTCMD_VERBOSE", "PATH"]
|
python
| 12 | 0 | |
tests/scripts/task_build.py
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import shutil
import os
import logging
import multiprocessing
from pathlib import Path
from cmd_utils import Sh, init_log, REPO_ROOT
if __name__ == "__main__":
init_log()
parser = argparse.ArgumentParser(description="List pytest nodeids for a folder")
parser.add_argument("--sccache-bucket", required=False, help="sccache bucket name")
parser.add_argument("--build-dir", default="build", help="build folder")
parser.add_argument("--cmake-target", help="optional build target")
args = parser.parse_args()
env = {"VTA_HW_PATH": str(Path(os.getcwd()) / "3rdparty" / "vta-hw")}
sccache_exe = shutil.which("sccache")
use_sccache = sccache_exe is not None and args.sccache_bucket is not None
build_dir = Path(os.getcwd()) / args.build_dir
build_dir = build_dir.relative_to(REPO_ROOT)
if use_sccache:
env["SCCACHE_BUCKET"] = args.sccache_bucket
env["CXX"] = "/opt/sccache/c++"
env["CC"] = "/opt/sccache/cc"
logging.info(f"Using sccache bucket: {args.sccache_bucket}")
else:
if sccache_exe is None:
reason = "'sccache' executable not found"
elif args.sccache_bucket is None:
reason = "'sccache' executable not found"
else:
reason = "<unknown>"
logging.info(f"Not using sccache, reason: {reason}")
sh = Sh(env)
if use_sccache:
sh.run("sccache --start-server", check=False)
logging.info("===== sccache stats =====")
sh.run("sccache --show-stats")
if "CI" in os.environ:
executors = int(os.environ["CI_NUM_EXECUTORS"])
else:
executors = int(os.environ.get("CI_NUM_EXECUTORS", 1))
nproc = multiprocessing.cpu_count()
available_cpus = nproc // executors
num_cpus = max(available_cpus, 1)
sh.run("cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo ..", cwd=build_dir)
target = ""
if args.cmake_target:
target = args.cmake_target
sh.run(f"cmake --build . -- {target} VERBOSE=1 -j{num_cpus}", cwd=build_dir)
if use_sccache:
logging.info("===== sccache stats =====")
sh.run("sccache --show-stats")
|
[] |
[] |
[
"CI_NUM_EXECUTORS"
] |
[]
|
["CI_NUM_EXECUTORS"]
|
python
| 1 | 0 | |
go/test/endtoend/cluster/topo_process.go
|
/*
Copyright 2019 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path"
"strings"
"syscall"
"time"
"vitess.io/vitess/go/vt/log"
)
// TopoProcess is a generic handle for a running Topo service .
// It can be spawned manually
type TopoProcess struct {
Name string
Binary string
DataDirectory string
LogDirectory string
ListenClientURL string
AdvertiseClientURL string
Port int
Host string
VerifyURL string
PeerURL string
ZKPorts string
proc *exec.Cmd
exit chan error
}
// Setup starts a new topo service
func (topo *TopoProcess) Setup(topoFlavor string, cluster *LocalProcessCluster) (err error) {
switch topoFlavor {
case "zk2":
return topo.SetupZookeeper(cluster)
case "consul":
return topo.SetupConsul(cluster)
default:
return topo.SetupEtcd()
}
}
// SetupEtcd spawns a new etcd service and initializes it with the defaults.
// The service is kept running in the background until TearDown() is called.
func (topo *TopoProcess) SetupEtcd() (err error) {
topo.proc = exec.Command(
topo.Binary,
"--name", topo.Name,
"--data-dir", topo.DataDirectory,
"--listen-client-urls", topo.ListenClientURL,
"--advertise-client-urls", topo.AdvertiseClientURL,
"--initial-advertise-peer-urls", topo.PeerURL,
"--listen-peer-urls", topo.PeerURL,
"--initial-cluster", fmt.Sprintf("%s=%s", topo.Name, topo.PeerURL),
)
errFile, _ := os.Create(path.Join(topo.DataDirectory, "topo-stderr.txt"))
topo.proc.Stderr = errFile
topo.proc.Env = append(topo.proc.Env, os.Environ()...)
log.Infof("%v %v", strings.Join(topo.proc.Args, " "))
println("Starting topo with args " + strings.Join(topo.proc.Args, " "))
err = topo.proc.Start()
if err != nil {
return
}
topo.exit = make(chan error)
go func() {
topo.exit <- topo.proc.Wait()
}()
timeout := time.Now().Add(60 * time.Second)
for time.Now().Before(timeout) {
if topo.IsHealthy() {
return
}
select {
case err := <-topo.exit:
return fmt.Errorf("process '%s' exited prematurely (err: %s)", topo.Binary, err)
default:
time.Sleep(300 * time.Millisecond)
}
}
return fmt.Errorf("process '%s' timed out after 60s (err: %s)", topo.Binary, <-topo.exit)
}
// SetupZookeeper spawns a new zookeeper topo service and initializes it with the defaults.
// The service is kept running in the background until TearDown() is called.
func (topo *TopoProcess) SetupZookeeper(cluster *LocalProcessCluster) (err error) {
topo.ZKPorts = fmt.Sprintf("%d:%d:%d", cluster.GetAndReservePort(), cluster.GetAndReservePort(), topo.Port)
topo.proc = exec.Command(
topo.Binary,
"-log_dir", topo.LogDirectory,
"-zk.cfg", fmt.Sprintf("1@%v:%s", topo.Host, topo.ZKPorts),
"init",
)
errFile, _ := os.Create(path.Join(topo.DataDirectory, "topo-stderr.txt"))
topo.proc.Stderr = errFile
topo.proc.Env = append(topo.proc.Env, os.Environ()...)
log.Infof("%v %v", strings.Join(topo.proc.Args, " "))
fmt.Println(strings.Join(topo.proc.Args, " "))
err = topo.proc.Run()
if err != nil {
return
}
return
}
// SetupConsul spawns a new consul service and initializes it with the defaults.
// The service is kept running in the background until TearDown() is called.
func (topo *TopoProcess) SetupConsul(cluster *LocalProcessCluster) (err error) {
topo.VerifyURL = fmt.Sprintf("http://%s:%d/v1/kv/?keys", topo.Host, topo.Port)
configFile := path.Join(os.Getenv("VTDATAROOT"), "consul.json")
config := fmt.Sprintf(`{"ports":{"dns":%d,"http":%d,"serf_lan":%d,"serf_wan":%d}}`,
cluster.GetAndReservePort(), topo.Port, cluster.GetAndReservePort(), cluster.GetAndReservePort())
err = ioutil.WriteFile(configFile, []byte(config), 0666)
if err != nil {
return
}
topo.proc = exec.Command(
topo.Binary, "agent",
"-dev",
"-config-file", configFile,
)
errFile, _ := os.Create(path.Join(topo.DataDirectory, "topo-stderr.txt"))
topo.proc.Stderr = errFile
topo.proc.Env = append(topo.proc.Env, os.Environ()...)
log.Infof("%v %v", strings.Join(topo.proc.Args, " "))
println("Starting consul with args " + strings.Join(topo.proc.Args, " "))
err = topo.proc.Start()
if err != nil {
return
}
topo.exit = make(chan error)
go func() {
topo.exit <- topo.proc.Wait()
}()
timeout := time.Now().Add(60 * time.Second)
for time.Now().Before(timeout) {
if topo.IsHealthy() {
return
}
select {
case err := <-topo.exit:
return fmt.Errorf("process '%s' exited prematurely (err: %s)", topo.Binary, err)
default:
time.Sleep(300 * time.Millisecond)
}
}
return fmt.Errorf("process '%s' timed out after 60s (err: %s)", topo.Binary, <-topo.exit)
}
// TearDown shutdowns the running topo service
func (topo *TopoProcess) TearDown(Cell string, originalVtRoot string, currentRoot string, keepdata bool, topoFlavor string) error {
if topoFlavor == "zk2" {
cmd := "shutdown"
if keepdata {
cmd = "teardown"
}
topo.proc = exec.Command(
topo.Binary,
"-log_dir", topo.LogDirectory,
"-zk.cfg", fmt.Sprintf("1@%v:%s", topo.Host, topo.ZKPorts),
cmd,
)
err := topo.proc.Run()
if err != nil {
return err
}
} else {
if topo.proc == nil || topo.exit == nil {
return nil
}
topo.removeTopoDirectories(Cell)
// Attempt graceful shutdown with SIGTERM first
_ = topo.proc.Process.Signal(syscall.SIGTERM)
select {
case <-topo.exit:
topo.proc = nil
return nil
case <-time.After(10 * time.Second):
topo.proc.Process.Kill()
topo.proc = nil
return <-topo.exit
}
}
if !*keepData {
_ = os.RemoveAll(topo.DataDirectory)
_ = os.RemoveAll(currentRoot)
}
_ = os.Setenv("VTDATAROOT", originalVtRoot)
return nil
}
// IsHealthy function checks if topo server is up and running
func (topo *TopoProcess) IsHealthy() bool {
resp, err := http.Get(topo.VerifyURL)
if err != nil {
return false
}
if resp.StatusCode == 200 {
return true
}
return false
}
func (topo *TopoProcess) removeTopoDirectories(Cell string) {
_ = topo.ManageTopoDir("rmdir", "/vitess/global")
_ = topo.ManageTopoDir("rmdir", "/vitess/"+Cell)
}
// ManageTopoDir creates global and zone in etcd2
func (topo *TopoProcess) ManageTopoDir(command string, directory string) (err error) {
url := topo.VerifyURL + directory
payload := strings.NewReader(`{"dir":"true"}`)
if command == "mkdir" {
req, _ := http.NewRequest("PUT", url, payload)
req.Header.Add("content-type", "application/json")
_, err = http.DefaultClient.Do(req)
return err
} else if command == "rmdir" {
req, _ := http.NewRequest("DELETE", url+"?dir=true", payload)
_, err = http.DefaultClient.Do(req)
return err
} else {
return nil
}
}
// TopoProcessInstance returns a TopoProcess handle for a etcd sevice,
// configured with the given Config.
// The process must be manually started by calling setup()
func TopoProcessInstance(port int, peerPort int, hostname string, flavor string, name string) *TopoProcess {
binary := "etcd"
if flavor == "zk2" {
binary = "zkctl"
}
if flavor == "consul" {
binary = "consul"
}
topo := &TopoProcess{
Name: name,
Binary: binary,
Port: port,
Host: hostname,
}
topo.AdvertiseClientURL = fmt.Sprintf("http://%s:%d", topo.Host, topo.Port)
topo.ListenClientURL = fmt.Sprintf("http://%s:%d", topo.Host, topo.Port)
topo.DataDirectory = path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("%s_%d", "topo", port))
topo.LogDirectory = path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("%s_%d", "topo", port), "logs")
topo.VerifyURL = fmt.Sprintf("http://%s:%d/v2/keys", topo.Host, topo.Port)
topo.PeerURL = fmt.Sprintf("http://%s:%d", hostname, peerPort)
return topo
}
|
[
"\"VTDATAROOT\"",
"\"VTDATAROOT\"",
"\"VTDATAROOT\""
] |
[] |
[
"VTDATAROOT"
] |
[]
|
["VTDATAROOT"]
|
go
| 1 | 0 | |
tests/end2end_test.go
|
package tests
import (
"bytes"
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"os"
"reflect"
"testing"
"time"
"github.com/amenzhinsky/iothub/iotdevice"
"github.com/amenzhinsky/iothub/iotdevice/transport"
"github.com/amenzhinsky/iothub/iotdevice/transport/mqtt"
"github.com/amenzhinsky/iothub/iotservice"
)
func TestEnd2End(t *testing.T) {
cs := os.Getenv("TEST_IOTHUB_SERVICE_CONNECTION_STRING")
if cs == "" {
t.Fatal("$TEST_IOTHUB_SERVICE_CONNECTION_STRING is empty")
}
sc, err := iotservice.NewFromConnectionString(cs) //iotservice.WithLogger(logger.New(logger.LevelDebug, nil)),
if err != nil {
t.Fatal(err)
}
defer sc.Close()
// create devices with all possible authentication types
_, err = sc.DeleteDevices(context.Background(), []*iotservice.Device{
{DeviceID: "golang-iothub-sas"},
{DeviceID: "golang-iothub-self-signed"},
{DeviceID: "golang-iothub-ca"},
}, true)
if err != nil {
t.Fatal(err)
}
result, err := sc.CreateDevices(context.Background(), []*iotservice.Device{{
DeviceID: "golang-iothub-sas",
Authentication: &iotservice.Authentication{
Type: iotservice.AuthSAS,
},
}, {
DeviceID: "golang-iothub-self-signed",
Authentication: &iotservice.Authentication{
Type: iotservice.AuthSelfSigned,
X509Thumbprint: &iotservice.X509Thumbprint{
PrimaryThumbprint: "443ABB6DEA8F93D5987D31D2607BE2931217752C",
SecondaryThumbprint: "443ABB6DEA8F93D5987D31D2607BE2931217752C",
},
},
}, {
DeviceID: "golang-iothub-ca",
Authentication: &iotservice.Authentication{
Type: iotservice.AuthCA,
},
}})
if err != nil {
t.Fatal(err)
}
if !result.IsSuccessful {
t.Fatalf("couldn't create devices: %v", result.Errors)
}
for name, mktransport := range map[string]func() transport.Transport{
"mqtt": func() transport.Transport { return mqtt.New() },
"mqtt-ws": func() transport.Transport { return mqtt.New(mqtt.WithWebSocket(true)) },
// TODO: "amqp": func() transport.Transport { return amqp.New() },
// TODO: "http": func() transport.Transport { return http.New() },
} {
mktransport := mktransport
t.Run(name, func(t *testing.T) {
for auth, suite := range map[string]struct {
init func(transport transport.Transport) (*iotdevice.Client, error)
only string
}{
// TODO: ca authentication
"x509": {
func(transport transport.Transport) (*iotdevice.Client, error) {
return iotdevice.NewFromX509FromFile(
transport,
"golang-iothub-self-signed",
sc.HostName(),
"testdata/device.crt",
"testdata/device.key",
)
},
"DeviceToCloud", // just need to check access
},
"sas": {
func(transport transport.Transport) (*iotdevice.Client, error) {
device, err := sc.GetDevice(context.Background(), "golang-iothub-sas")
if err != nil {
return nil, err
}
dcs, err := sc.DeviceConnectionString(device, false)
if err != nil {
t.Fatal(err)
}
return iotdevice.NewFromConnectionString(transport, dcs)
},
"*",
},
} {
for name, test := range map[string]func(*testing.T, *iotservice.Client, *iotdevice.Client){
"DeviceToCloud": testDeviceToCloud,
"CloudToDevice": testCloudToDevice,
"DirectMethod": testDirectMethod,
"UpdateDeviceTwin": testUpdateTwin,
"SubscribeTwin": testSubscribeTwin,
} {
if suite.only != "*" && suite.only != name {
continue
}
test, suite, mktransport := test, suite, mktransport
t.Run(auth+"/"+name, func(t *testing.T) {
dc, err := suite.init(mktransport())
if err != nil {
t.Fatal(err)
}
defer dc.Close()
if err := dc.Connect(context.Background()); err != nil {
t.Fatal(err)
}
test(t, sc, dc)
})
}
}
})
}
}
func testDeviceToCloud(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
evsc := make(chan *iotservice.Event, 1)
errc := make(chan error, 2)
go func() {
errc <- sc.SubscribeEvents(context.Background(), func(ev *iotservice.Event) error {
if ev.ConnectionDeviceID == dc.DeviceID() {
evsc <- ev
}
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
done := make(chan struct{})
defer close(done)
// send events until one of them is received
go func() {
for {
if err := dc.SendEvent(context.Background(), payload,
iotdevice.WithSendMessageID(genID()),
iotdevice.WithSendCorrelationID(genID()),
iotdevice.WithSendProperties(props),
); err != nil {
errc <- err
break
}
select {
case <-done:
case <-time.After(500 * time.Millisecond):
}
}
}()
select {
case msg := <-evsc:
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ConnectionDeviceID != dc.DeviceID() {
t.Errorf("ConnectionDeviceID = %q, want %q", msg.ConnectionDeviceID, dc.DeviceID())
}
if msg.ConnectionDeviceGenerationID == "" {
t.Error("ConnectionDeviceGenerationID is empty")
}
if msg.ConnectionAuthMethod == nil {
t.Error("ConnectionAuthMethod is nil")
}
if msg.MessageSource == "" {
t.Error("MessageSource is empty")
}
if msg.EnqueuedTime.IsZero() {
t.Error("EnqueuedTime is zero")
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
testProperties(t, msg.Properties, props)
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("d2c timed out")
}
}
func testProperties(t *testing.T, got, want map[string]string) {
t.Helper()
for k, v := range want {
x, ok := got[k]
if !ok || x != v {
t.Errorf("Properties = %v, want %v", got, want)
return
}
}
}
func testCloudToDevice(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
fbsc := make(chan *iotservice.Feedback, 1)
errc := make(chan error, 3)
sub, err := dc.SubscribeEvents(context.Background())
if err != nil {
t.Fatal(err)
}
// subscribe to feedback and report first registered message id
go func() {
errc <- sc.SubscribeFeedback(context.Background(), func(fb *iotservice.Feedback) error {
fbsc <- fb
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
uid := "golang-iothub"
mid := genID()
if err := sc.SendEvent(context.Background(), dc.DeviceID(), payload,
iotservice.WithSendAck(iotservice.AckFull),
iotservice.WithSendProperties(props),
iotservice.WithSendUserID(uid),
iotservice.WithSendMessageID(mid),
iotservice.WithSendCorrelationID(genID()),
iotservice.WithSendExpiryTime(time.Now().Add(5*time.Second)),
); err != nil {
errc <- err
return
}
for {
select {
case msg := <-sub.C():
if msg.MessageID != mid {
continue
}
// validate event feedback
Outer:
for {
select {
case fb := <-fbsc:
if fb.OriginalMessageID != mid {
continue
}
if fb.StatusCode != "Success" {
t.Errorf("feedback status = %q, want %q", fb.StatusCode, "Success")
}
break Outer
case <-time.After(30 * time.Second):
t.Fatal("feedback timed out")
}
}
// validate message content
if msg.To == "" {
t.Error("To is empty")
}
if msg.UserID != uid {
t.Errorf("UserID = %q, want %q", msg.UserID, uid)
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ExpiryTime.IsZero() {
t.Error("ExpiryTime is zero")
}
testProperties(t, msg.Properties, props)
return
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("c2d timed out")
}
}
}
func testUpdateTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
// update state and keep track of version
s := fmt.Sprintf("%d", time.Now().UnixNano())
v, err := dc.UpdateTwinState(context.Background(), map[string]interface{}{
"ts": s,
})
if err != nil {
t.Fatal(err)
}
_, r, err := dc.RetrieveTwinState(context.Background())
if err != nil {
t.Fatal(err)
}
if v != r.Version() {
t.Errorf("update-twin version = %d, want %d", r.Version(), v)
}
if r["ts"] != s {
t.Errorf("update-twin parameter = %q, want %q", r["ts"], s)
}
}
// TODO: very flaky
func testSubscribeTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
sub, err := dc.SubscribeTwinUpdates(context.Background())
if err != nil {
t.Fatal(err)
}
defer dc.UnsubscribeTwinUpdates(sub)
// TODO: hacky, but reduces flakiness
time.Sleep(time.Second)
twin, err := sc.UpdateDeviceTwin(context.Background(), &iotservice.Twin{
DeviceID: dc.DeviceID(),
Tags: map[string]interface{}{
"test-device": true,
},
Properties: &iotservice.Properties{
Desired: map[string]interface{}{
"test-prop": time.Now().UnixNano() / 1000,
},
},
})
if err != nil {
t.Fatal(err)
}
select {
case state := <-sub.C():
if state["$version"] != twin.Properties.Desired["$version"] {
t.Errorf("version = %d, want %d", state["$version"], twin.Properties.Desired["$version"])
}
if state["test-prop"] != twin.Properties.Desired["test-prop"] {
t.Errorf("test-prop = %q, want %q", state["test-prop"], twin.Properties.Desired["test-prop"])
}
case <-time.After(10 * time.Second):
t.Fatal("SubscribeTwinUpdates timed out")
}
}
func testDirectMethod(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
if err := dc.RegisterMethod(
context.Background(),
time.Minute,
time.Minute,
"sum",
func(v map[string]interface{}) (map[string]interface{}, error) {
return map[string]interface{}{
"result": v["a"].(float64) + v["b"].(float64),
}, nil
},
); err != nil {
t.Fatal(err)
}
resc := make(chan *iotservice.MethodResult, 1)
errc := make(chan error, 2)
go func() {
v, err := sc.CallDeviceMethod(context.Background(), dc.DeviceID(), &iotservice.MethodCall{
MethodName: "sum",
ConnectTimeout: 5,
ResponseTimeout: 5,
Payload: map[string]interface{}{
"a": 1.5,
"b": 3,
},
})
if err != nil {
errc <- err
}
resc <- v
}()
select {
case v := <-resc:
w := &iotservice.MethodResult{
Status: 200,
Payload: map[string]interface{}{
"result": 4.5,
},
}
if !reflect.DeepEqual(v, w) {
t.Errorf("direct-method result = %v, want %v", v, w)
}
case err := <-errc:
t.Fatal(err)
}
}
func genID() string {
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
panic(err)
}
return hex.EncodeToString(b)
}
|
[
"\"TEST_IOTHUB_SERVICE_CONNECTION_STRING\""
] |
[] |
[
"TEST_IOTHUB_SERVICE_CONNECTION_STRING"
] |
[]
|
["TEST_IOTHUB_SERVICE_CONNECTION_STRING"]
|
go
| 1 | 0 | |
waflyctl.go
|
/*
* WAF provisioning tool
*
* Copyright (c) 2018-2019 Fastly Inc.
* Author: Jose Enrique Hernandez
*/
package main
import (
"bytes"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/user"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/BurntSushi/toml"
"github.com/fastly/go-fastly/fastly"
"gopkg.in/alecthomas/kingpin.v2"
"gopkg.in/resty.v1"
)
var (
//logging variables
logFile string
//Info level logging
Info *log.Logger
//Warning level logging
Warning *log.Logger
//Error level logging
Error *log.Logger
// version number
version = "dev"
date = "unknown"
)
// TOMLConfig is the applications config file
type TOMLConfig struct {
Logpath string
APIEndpoint string
Tags []string
Publisher []string
Action string
Rules []int64
DisabledRules []int64
Owasp owaspSettings
Weblog WeblogSettings
Waflog WaflogSettings
Vclsnippet VCLSnippetSettings
AdditionalSnippets map[string]VCLSnippetSettings
Response ResponseSettings
Prefetch PrefetchSettings
}
// Backup is a backup of the rule status for a WAF
type Backup struct {
ServiceID string
ID string
Updated time.Time
Disabled []int64
Block []int64
Log []int64
Owasp owaspSettings
}
type owaspSettings struct {
AllowedHTTPVersions string
AllowedMethods string
AllowedRequestContentType string
AllowedRequestContentTypeCharset string
ArgLength int
ArgNameLength int
CombinedFileSizes int
CriticalAnomalyScore int
CRSValidateUTF8Encoding bool
ErrorAnomalyScore int
HTTPViolationScoreThreshold int
InboundAnomalyScoreThreshold int
LFIScoreThreshold int
MaxFileSize int
MaxNumArgs int
NoticeAnomalyScore int
ParanoiaLevel int
PHPInjectionScoreThreshold int
RCEScoreThreshold int
RestrictedExtensions string
RestrictedHeaders string
RFIScoreThreshold int
SessionFixationScoreThreshold int
SQLInjectionScoreThreshold int
XSSScoreThreshold int
TotalArgLength int
WarningAnomalyScore int
}
// WeblogSettings parameters for logs in config file
type WeblogSettings struct {
Name string
Address string
Port uint
Tlscacert string
Tlshostname string
Format string
Condition string
Expiry uint
}
// VCLSnippetSettings parameters for snippets in config file
type VCLSnippetSettings struct {
Name string
Content string
Type fastly.SnippetType
Priority int
Dynamic int
}
// WaflogSettings parameters from config
type WaflogSettings struct {
Name string
Address string
Port uint
Tlscacert string
Tlshostname string
Format string
}
// ResponseSettings parameters from config
type ResponseSettings struct {
Name string
HTTPStatusCode uint
HTTPResponse string
ContentType string
Content string
}
// PrefetchSettings parameters from config
type PrefetchSettings struct {
Name string
Statement string
Type string
Priority int
}
// RuleList contains list of rules
type RuleList struct {
Data []Rule
Links struct {
Last string `json:"last"`
First string `json:"first"`
Next string `json:"next"`
} `json:"links"`
Meta struct {
CurrentPage int `json:"current_page"`
PerPage int `json:"per_page"`
RecordCount int `json:"record_count"`
TotalPages int `json:"total_pages"`
} `json:"meta"`
}
// Rule from Fastly API
type Rule struct {
ID string `json:"id"`
Type string `json:"type"`
Attributes struct {
Message string `json:"message"`
Status string `json:"status"`
Publisher string `json:"publisher"`
ParanoiaLevel int `json:"paranoia_level"`
Revision int `json:"revision"`
Severity interface{} `json:"severity"`
Version interface{} `json:"version"`
RuleID string `json:"rule_id"`
ModsecRuleID string `json:"modsec_rule_id"`
UniqueRuleID string `json:"unique_rule_id"`
Source interface{} `json:"source"`
Vcl interface{} `json:"vcl"`
} `json:"attributes"`
}
// PagesOfRules contains a list of rulelist
type PagesOfRules struct {
page []RuleList
}
// PagesOfConfigurationSets contains a list of ConfigSetList
type PagesOfConfigurationSets struct {
page []ConfigSetList
}
// ConfigSetList contains a list of configuration set and its metadata
type ConfigSetList struct {
Data []ConfigSet
Links struct {
Last string `json:"last"`
First string `json:"first"`
Next string `json:"next"`
} `json:"links"`
Meta struct {
CurrentPage int `json:"current_page"`
PerPage int `json:"per_page"`
RecordCount int `json:"record_count"`
TotalPages int `json:"total_pages"`
} `json:"meta"`
}
// ConfigSet defines details of a configuration set
type ConfigSet struct {
ID string `json:"id"`
Type string `json:"type"`
Attributes struct {
Active bool `json:"active"`
Name string `json:"name"`
} `json:"attributes"`
}
//Init function starts our logger
func Init(configFile string) TOMLConfig {
//load configs
var config TOMLConfig
if _, err := toml.DecodeFile(configFile, &config); err != nil {
fmt.Println("Could not read config file -", err)
os.Exit(1)
}
//assigned the right log path
if config.Logpath == "" {
fmt.Println("no log path defined using default waflyctl.log")
config.Logpath = "waflyctl.log"
}
/*
fmt.Println("config settings: ")
fmt.Println("- logpath",config.Logpath)
fmt.Println("- apiendpoint", config.APIEndpoint)
fmt.Println("- owasp", config.Owasp)
fmt.Println("- weblogs", config.Weblog.Port)
fmt.Println("- waflogs", config.Waflog.Port)
*/
//now lets create a logging object
file, err := os.OpenFile(config.Logpath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
log.Fatalln("Failed to open log file", logFile, ":", err)
}
multi := io.MultiWriter(file, os.Stdout)
Info = log.New(multi,
"INFO: ",
log.Ldate|log.Ltime|log.Lshortfile)
Warning = log.New(multi,
"WARNING: ",
log.Ldate|log.Ltime|log.Lshortfile)
Error = log.New(multi,
"ERROR: ",
log.Ldate|log.Ltime|log.Lshortfile)
return config
}
func getActiveVersion(client fastly.Client, serviceID string) int {
service, err := client.GetService(&fastly.GetServiceInput{
ID: serviceID,
})
if err != nil {
Error.Fatalf("Cannot get service %q: GetService: %v\n", serviceID, err)
}
for _, version := range service.Versions {
if version.Active {
return version.Number
}
}
Error.Fatal("No active version found (wrong service id?). Aborting")
return 0
}
func cloneVersion(client fastly.Client, serviceID string, activeVersion int, comment string) int {
version, err := client.CloneVersion(&fastly.CloneVersionInput{
Service: serviceID,
Version: activeVersion,
})
if err != nil {
Error.Fatalf("Cannot clone version %d: CloneVersion: %v\n", activeVersion, err)
}
if comment == "" {
Info.Printf("New version %d created\n", version.Number)
} else {
client.UpdateVersion(&fastly.UpdateVersionInput{
Service: serviceID,
Version: version.Number,
Comment: comment,
})
Info.Printf("New version %d created. Comment: %s\n", version.Number, comment)
}
return version.Number
}
func prefetchCondition(client fastly.Client, serviceID string, config TOMLConfig, version int) {
conditions, err := client.ListConditions(&fastly.ListConditionsInput{
Service: serviceID,
Version: version,
})
if err != nil {
Error.Fatalf("Cannot create prefetch condition %q: ListConditions: %v\n", config.Prefetch.Name, err)
}
if !conditionExists(conditions, config.Prefetch.Name) {
_, err = client.CreateCondition(&fastly.CreateConditionInput{
Service: serviceID,
Version: version,
Name: config.Prefetch.Name,
Statement: config.Prefetch.Statement,
Type: config.Prefetch.Type,
Priority: 10,
})
if err != nil {
Error.Fatalf("Cannot create prefetch condition %q: CreateCondition: %v\n", config.Prefetch.Name, err)
}
Info.Printf("Prefetch condition %q created\n", config.Prefetch.Name)
} else {
Warning.Printf("Prefetch condition %q already exists, skipping\n", config.Prefetch.Name)
}
}
func responseObject(client fastly.Client, serviceID string, config TOMLConfig, version int) {
responses, err := client.ListResponseObjects(&fastly.ListResponseObjectsInput{
Service: serviceID,
Version: version,
})
if err != nil {
Error.Fatalf("Cannot create response object %q: ListResponseObjects: %v\n", config.Response.Name, err)
}
for _, response := range responses {
if strings.EqualFold(response.Name, config.Response.Name) {
Warning.Printf("Response object %q already exists, skipping\n", config.Response.Name)
return
}
}
_, err = client.CreateResponseObject(&fastly.CreateResponseObjectInput{
Service: serviceID,
Version: version,
Name: config.Response.Name,
Status: config.Response.HTTPStatusCode,
Response: config.Response.HTTPResponse,
Content: config.Response.Content,
ContentType: config.Response.ContentType,
})
if err != nil {
Error.Fatalf("Cannot create response object %q: CreateResponseObject: %v\n", config.Response.Name, err)
}
Info.Printf("Response object %q created\n", config.Response.Name)
}
func vclSnippet(client fastly.Client, serviceID string, vclSnippet VCLSnippetSettings, version int) {
snippets, err := client.ListSnippets(&fastly.ListSnippetsInput{
Service: serviceID,
Version: version,
})
if err != nil {
Error.Fatalf("Cannot create VCL snippet %q: ListSnippets: %v\n", vclSnippet.Name, err)
}
for _, snippet := range snippets {
if snippet.Name == vclSnippet.Name {
Warning.Printf("VCL snippet %q already exists, skipping\n", vclSnippet.Name)
return
}
}
_, err = client.CreateSnippet(&fastly.CreateSnippetInput{
Service: serviceID,
Version: version,
Name: vclSnippet.Name,
Priority: vclSnippet.Priority,
Dynamic: vclSnippet.Dynamic,
Content: vclSnippet.Content,
Type: vclSnippet.Type,
})
if err != nil {
Error.Fatalf("Cannot create VCL snippet %q: CreateSnippet: %v\n", vclSnippet.Name, err)
}
Info.Printf("VCL snippet %q created\n", vclSnippet.Name)
}
func fastlyLogging(client fastly.Client, serviceID string, config TOMLConfig, version int) {
if config.Weblog.Name != "" {
_, err := client.CreateSyslog(&fastly.CreateSyslogInput{
Service: serviceID,
Version: version,
Name: config.Weblog.Name,
Address: config.Weblog.Address,
Port: config.Weblog.Port,
UseTLS: fastly.CBool(true),
IPV4: config.Weblog.Address,
TLSCACert: config.Weblog.Tlscacert,
TLSHostname: config.Weblog.Tlshostname,
Format: config.Weblog.Format,
FormatVersion: 2,
MessageType: "blank",
})
switch {
case err == nil:
Info.Printf("Logging endpoint %q created\n", config.Weblog.Name)
case strings.Contains(err.Error(), "Duplicate record"):
Warning.Printf("Logging endpoint %q already exists, skipping\n", config.Weblog.Name)
default:
Error.Fatalf("Cannot create logging endpoint %q: CreateSyslog: %v\n", config.Weblog.Name, err)
}
} else {
Warning.Printf("Empty or invalid web log configuration, skipping\n")
}
if config.Waflog.Name != "" {
_, err := client.CreateSyslog(&fastly.CreateSyslogInput{
Service: serviceID,
Version: version,
Name: config.Waflog.Name,
Address: config.Waflog.Address,
Port: config.Waflog.Port,
UseTLS: fastly.CBool(true),
IPV4: config.Waflog.Address,
TLSCACert: config.Waflog.Tlscacert,
TLSHostname: config.Waflog.Tlshostname,
Format: config.Waflog.Format,
FormatVersion: 2,
MessageType: "blank",
Placement: "waf_debug",
})
switch {
case err == nil:
Info.Printf("Logging endpoint %q created\n", config.Waflog.Name)
case strings.Contains(err.Error(), "Duplicate record"):
Warning.Printf("Logging endpoint %q already exists, skipping\n", config.Waflog.Name)
default:
Error.Fatalf("Cannot create logging endpoint %q: CreateSyslog: %v\n", config.Waflog.Name, err)
}
} else {
Warning.Printf("Empty or invalid web log configuration, skipping\n")
}
}
func wafContainer(client fastly.Client, serviceID string, config TOMLConfig, version int) string {
waf, err := client.CreateWAF(&fastly.CreateWAFInput{
Service: serviceID,
Version: version,
PrefetchCondition: config.Prefetch.Name,
Response: config.Response.Name,
})
if err != nil {
Error.Fatalf("Cannot create WAF: CreateWAF: %v\n", err)
}
Info.Printf("WAF %q created\n", waf.ID)
return waf.ID
}
func createOWASP(client fastly.Client, serviceID string, config TOMLConfig, wafID string) {
var created bool
var err error
owasp, _ := client.GetOWASP(&fastly.GetOWASPInput{
Service: serviceID,
ID: wafID,
})
if owasp.ID == "" {
owasp, err = client.CreateOWASP(&fastly.CreateOWASPInput{
Service: serviceID,
ID: wafID,
})
if err != nil {
Error.Fatalf("%v\n", err)
}
created = true
}
owasp, err = client.UpdateOWASP(&fastly.UpdateOWASPInput{
Service: serviceID,
ID: wafID,
OWASPID: owasp.ID,
AllowedHTTPVersions: config.Owasp.AllowedHTTPVersions,
AllowedMethods: config.Owasp.AllowedMethods,
AllowedRequestContentType: config.Owasp.AllowedRequestContentType,
AllowedRequestContentTypeCharset: config.Owasp.AllowedRequestContentTypeCharset,
ArgLength: config.Owasp.ArgLength,
ArgNameLength: config.Owasp.ArgNameLength,
CombinedFileSizes: config.Owasp.CombinedFileSizes,
CriticalAnomalyScore: config.Owasp.CriticalAnomalyScore,
CRSValidateUTF8Encoding: config.Owasp.CRSValidateUTF8Encoding,
ErrorAnomalyScore: config.Owasp.ErrorAnomalyScore,
HTTPViolationScoreThreshold: config.Owasp.HTTPViolationScoreThreshold,
InboundAnomalyScoreThreshold: config.Owasp.InboundAnomalyScoreThreshold,
LFIScoreThreshold: config.Owasp.LFIScoreThreshold,
MaxFileSize: config.Owasp.MaxFileSize,
MaxNumArgs: config.Owasp.MaxNumArgs,
NoticeAnomalyScore: config.Owasp.NoticeAnomalyScore,
ParanoiaLevel: config.Owasp.ParanoiaLevel,
PHPInjectionScoreThreshold: config.Owasp.PHPInjectionScoreThreshold,
RCEScoreThreshold: config.Owasp.RCEScoreThreshold,
RestrictedExtensions: config.Owasp.RestrictedExtensions,
RestrictedHeaders: config.Owasp.RestrictedHeaders,
RFIScoreThreshold: config.Owasp.RFIScoreThreshold,
SessionFixationScoreThreshold: config.Owasp.SessionFixationScoreThreshold,
SQLInjectionScoreThreshold: config.Owasp.SQLInjectionScoreThreshold,
XSSScoreThreshold: config.Owasp.XSSScoreThreshold,
TotalArgLength: config.Owasp.TotalArgLength,
WarningAnomalyScore: config.Owasp.WarningAnomalyScore,
})
if err != nil {
Error.Fatalf("%v\n", err)
}
if created {
Info.Println("OWASP settings created with the following settings:")
} else {
Info.Println("OWASP settings updated with the following settings:")
}
Info.Println(" - AllowedHTTPVersions:", owasp.AllowedHTTPVersions)
Info.Println(" - AllowedMethods:", owasp.AllowedMethods)
Info.Println(" - AllowedRequestContentType:", owasp.AllowedRequestContentType)
Info.Println(" - AllowedRequestContentTypeCharset:", owasp.AllowedRequestContentTypeCharset)
Info.Println(" - ArgLength:", owasp.ArgLength)
Info.Println(" - ArgNameLength:", owasp.ArgNameLength)
Info.Println(" - CombinedFileSizes:", owasp.CombinedFileSizes)
Info.Println(" - CriticalAnomalyScore:", owasp.CriticalAnomalyScore)
Info.Println(" - CRSValidateUTF8Encoding:", owasp.CRSValidateUTF8Encoding)
Info.Println(" - ErrorAnomalyScore:", owasp.ErrorAnomalyScore)
Info.Println(" - HTTPViolationScoreThreshold:", owasp.HTTPViolationScoreThreshold)
Info.Println(" - InboundAnomalyScoreThreshold:", owasp.InboundAnomalyScoreThreshold)
Info.Println(" - LFIScoreThreshold:", owasp.LFIScoreThreshold)
Info.Println(" - MaxFileSize:", owasp.MaxFileSize)
Info.Println(" - MaxNumArgs:", owasp.MaxNumArgs)
Info.Println(" - NoticeAnomalyScore:", owasp.NoticeAnomalyScore)
Info.Println(" - ParanoiaLevel:", owasp.ParanoiaLevel)
Info.Println(" - PHPInjectionScoreThreshold:", owasp.PHPInjectionScoreThreshold)
Info.Println(" - RCEScoreThreshold:", owasp.RCEScoreThreshold)
Info.Println(" - RestrictedHeaders:", owasp.RestrictedHeaders)
Info.Println(" - RFIScoreThreshold:", owasp.RFIScoreThreshold)
Info.Println(" - SessionFixationScoreThreshold:", owasp.SessionFixationScoreThreshold)
Info.Println(" - SQLInjectionScoreThreshold:", owasp.SQLInjectionScoreThreshold)
Info.Println(" - XssScoreThreshold:", owasp.XSSScoreThreshold)
Info.Println(" - TotalArgLength:", owasp.TotalArgLength)
Info.Println(" - WarningAnomalyScore:", owasp.WarningAnomalyScore)
}
// DeleteLogsCall removes logging endpoints and any logging conditions.
func DeleteLogsCall(client fastly.Client, serviceID string, config TOMLConfig, version int) bool {
//Get a list of SysLogs
slogs, err := client.ListSyslogs(&fastly.ListSyslogsInput{
Service: serviceID,
Version: version,
})
if err != nil {
Error.Println(err)
return false
}
//drop syslogs if they exist
if sysLogExists(slogs, config.Weblog.Name) {
Info.Printf("Deleting Web logging endpoint: %q\n", config.Weblog.Name)
err = client.DeleteSyslog(&fastly.DeleteSyslogInput{
Service: serviceID,
Version: version,
Name: config.Weblog.Name,
})
if err != nil {
fmt.Println(err)
return false
}
}
if sysLogExists(slogs, config.Waflog.Name) {
Info.Printf("Deleting WAF logging endpoint: %q\n", config.Waflog.Name)
err = client.DeleteSyslog(&fastly.DeleteSyslogInput{
Service: serviceID,
Version: version,
Name: config.Waflog.Name,
})
if err != nil {
fmt.Println(err)
return false
}
}
//first find if we have any PX conditions
conditions, err := client.ListConditions(&fastly.ListConditionsInput{
Service: serviceID,
Version: version,
})
if err != nil {
Error.Println(err)
return false
}
//remove logging conditions (and expiry conditions)
if conditionExists(conditions, "waf-soc-logging") {
Info.Println("Deleting logging condition: 'waf-soc-logging'")
err = client.DeleteCondition(&fastly.DeleteConditionInput{
Service: serviceID,
Version: version,
Name: "waf-soc-logging",
})
if err != nil {
Error.Println(err)
return false
}
}
if conditionExists(conditions, "waf-soc-logging-with-expiry") {
Info.Println("Deleting logging condition: 'waf-soc-logging-with-expiry'")
err = client.DeleteCondition(&fastly.DeleteConditionInput{
Service: serviceID,
Version: version,
Name: "waf-soc-logging-with-expiry",
})
if err != nil {
Error.Println(err)
return false
}
}
//Legacy conditions
//remove PerimeterX logging condition (if exists)
if conditionExists(conditions, "waf-soc-with-px") {
Info.Println("Deleting Legacy PerimeterX logging condition: 'waf-soc-with-px'")
err = client.DeleteCondition(&fastly.DeleteConditionInput{
Service: serviceID,
Version: version,
Name: "waf-soc-with-px",
})
if err != nil {
Error.Println(err)
return false
}
}
//remove legacy shielding logging condition (if exists)
if conditionExists(conditions, "waf-soc-with-shielding") {
Info.Println("Deleting Legacy Shielding logging condition: 'waf-soc-with-shielding'")
err = client.DeleteCondition(&fastly.DeleteConditionInput{
Service: serviceID,
Version: version,
Name: "waf-soc-with-shielding",
})
if err != nil {
Error.Println(err)
return false
}
}
return true
}
// conditionExists iterates through the given slice of conditions and returns
// whether the given name exists in the collection
func conditionExists(conds []*fastly.Condition, name string) bool {
for _, c := range conds {
if strings.EqualFold(c.Name, name) {
return true
}
}
return false
}
// sysLogExists iterates through the given slice of syslogs and returns
// whether the given name exists in the collection
func sysLogExists(slogs []*fastly.Syslog, name string) bool {
for _, sl := range slogs {
if strings.EqualFold(sl.Name, name) {
return true
}
}
return false
}
// DeprovisionWAF removes a WAF from a service
func DeprovisionWAF(client fastly.Client, serviceID, apiKey string, config TOMLConfig, version int) bool {
/*
To Remove
1. Delete response
2. Delete prefetch
3. Delete WAF
*/
//get current waf objects
wafs, err := client.ListWAFs(&fastly.ListWAFsInput{
Service: serviceID,
Version: version,
})
if err != nil {
Error.Fatal(err)
return false
}
if len(wafs) == 0 {
Error.Printf("No WAF object exists in current service %s version #%v .. exiting\n", serviceID, version)
return false
}
//get list of conditions
//first find if we have any PX conditions
conditions, err := client.ListConditions(&fastly.ListConditionsInput{
Service: serviceID,
Version: version,
})
if err != nil {
Error.Fatal(err)
return false
}
for index, waf := range wafs {
//remove WAF Logging
result := DeleteLogsCall(client, serviceID, config, version)
Info.Printf("Deleting WAF #%v Logging\n", index+1)
if !result {
Error.Printf("Deleting WAF #%v Logging.\n", index+1)
}
Info.Printf("Deleting WAF #%v Container\n", index+1)
//remove WAF container
err = client.DeleteWAF(&fastly.DeleteWAFInput{
Service: serviceID,
Version: version,
ID: waf.ID,
})
if err != nil {
Error.Print(err)
return false
}
//remove WAF Response Object
Info.Printf("Deleting WAF #%v Response Object\n", index+1)
err = client.DeleteResponseObject(&fastly.DeleteResponseObjectInput{
Service: serviceID,
Version: version,
Name: "WAF_Response",
})
if err != nil {
Error.Print(err)
return false
}
//remove WAF Prefetch condition (if exists)
if conditionExists(conditions, "WAF_Prefetch") {
Info.Printf("Deleting WAF #%v Prefetch Condition\n", index+1)
err = client.DeleteCondition(&fastly.DeleteConditionInput{
Service: serviceID,
Version: version,
Name: "WAF_Prefetch",
})
if err != nil {
Error.Print(err)
return false
}
}
//remove VCL Snippet
Info.Printf("Deleting WAF #%v VCL Snippet\n", index+1)
apiCall := config.APIEndpoint + "/service/" + serviceID + "/version/" + strconv.Itoa(version) + "/snippet/" + config.Vclsnippet.Name
//get list of current snippets
_, err := resty.R().
SetHeader("Accept", "application/json").
SetHeader("Fastly-Key", apiKey).
Delete(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Printf("Deleting WAF #%v VCL Snippet\n", index+1)
}
}
return true
}
func provisionWAF(client fastly.Client, serviceID string, config TOMLConfig, version int) string {
prefetchCondition(client, serviceID, config, version)
responseObject(client, serviceID, config, version)
vclSnippet(client, serviceID, config.Vclsnippet, version)
if len(config.AdditionalSnippets) > 0 {
for _, snippet := range config.AdditionalSnippets {
vclSnippet(client, serviceID, snippet, version)
}
}
wafID := wafContainer(client, serviceID, config, version)
createOWASP(client, serviceID, config, wafID)
if !*omitLogs {
fastlyLogging(client, serviceID, config, version)
}
return wafID
}
func validateVersion(client fastly.Client, serviceID string, version int) bool {
valid, _, err := client.ValidateVersion(&fastly.ValidateVersionInput{
Service: serviceID,
Version: version,
})
if err != nil {
Error.Fatal(err)
return false
}
if !valid {
Error.Println("Version invalid")
return false
}
Info.Printf("Config Version %v validated. Remember to activate it\n", version)
return true
}
func publisherConfig(apiEndpoint, apiKey, serviceID, wafID string, config TOMLConfig) bool {
for _, publisher := range config.Publisher {
if publisher == "" {
continue
}
//set our API call
apiCall := apiEndpoint + "/wafs/rules?filter[publisher]=" + publisher + "&page[number]=1"
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
Get(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
return false
}
//unmarshal the response and extract the rules
body := RuleList{}
json.Unmarshal([]byte(resp.String()), &body)
if len(body.Data) == 0 {
Error.Println("No Fastly Rules found")
return false
}
result := PagesOfRules{[]RuleList{}}
result.page = append(result.page, body)
currentpage := body.Meta.CurrentPage
totalpages := body.Meta.TotalPages
Info.Printf("Read Total Pages: %d with %d rules\n", body.Meta.TotalPages, body.Meta.RecordCount)
// iterate through pages collecting all rules
for currentpage := currentpage + 1; currentpage <= totalpages; currentpage++ {
Info.Printf("Reading page: %d out of %d\n", currentpage, totalpages)
//set our API call
apiCall := apiEndpoint + "/wafs/rules?filter[publisher]=" + publisher + "&page[number]=" + strconv.Itoa(currentpage)
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
Get(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
return false
}
//unmarshal the response and extract the service id
body := RuleList{}
json.Unmarshal([]byte(resp.String()), &body)
result.page = append(result.page, body)
}
Info.Println("- Publisher ", publisher)
for _, p := range result.page {
for _, r := range p.Data {
//set rule action on our tags
apiCall := apiEndpoint + "/service/" + serviceID + "/wafs/" + wafID + "/rules/" + r.ID + "/rule_status"
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
SetBody(`{"data": {"attributes": {"status": "` + config.Action + `"},"id": "` + wafID + `-` + r.ID + `","type": "rule_status"}}`).
Patch(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
os.Exit(1)
}
//check if our response was ok
if resp.Status() == "200 OK" {
Info.Printf("Rule %s was configured in the WAF with action %s\n", r.ID, config.Action)
} else {
Error.Printf("Could not set status: %s on rule tag: %s the response was: %s\n", config.Action, r.ID, resp.String())
}
}
}
}
return true
}
func tagsConfig(apiEndpoint, apiKey, serviceID, wafID string, config TOMLConfig, forceStatus bool) {
//Work on Tags first
//API Endpoint to call for domain searches
apiCall := apiEndpoint + "/wafs/tags"
//make the call
ruleList := RuleList{}
for _, tag := range config.Tags {
if tag == "" {
continue
}
resp, err := resty.R().
SetQueryString(fmt.Sprintf("filter[name]=%s&include=rules", tag)).
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
Get(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
os.Exit(1)
}
//unmarshal the response and extract the service id
body := RuleList{}
json.Unmarshal([]byte(resp.String()), &body)
if len(body.Data) == 0 {
Error.Printf("Could not find any rules with tag: %s please make sure it exists..moving to the next tag\n", tag)
continue
}
//set rule action on our tags
apiCall := apiEndpoint + "/service/" + serviceID + "/wafs/" + wafID + "/rule_statuses"
resp, err = resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
SetBody(fmt.Sprintf(`{"data": {"attributes": {"status": "%s", "name": "%s", "force": %t}, "id": "%s", "type": "rule_status"}}`, config.Action, tag, forceStatus, wafID)).
Post(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
os.Exit(1)
}
//unmarshal the response. Keep track of unique rules added by each tag so we can provide an accurate count
ruleCount := 0
if len(ruleList.Data) > 0 {
tmpRuleList := RuleList{}
json.Unmarshal([]byte(resp.String()), &tmpRuleList)
for _, rule := range tmpRuleList.Data {
if checkRuleInList(rule, ruleList.Data) {
ruleList.Data = append(ruleList.Data, rule)
ruleCount++
}
}
} else {
json.Unmarshal([]byte(resp.String()), &ruleList)
ruleCount = len(ruleList.Data)
}
//check if our response was ok
if resp.Status() == "200 OK" {
Info.Printf("%d rule(s) added in %s mode for tag: %s\n", ruleCount, config.Action, tag)
} else {
Error.Printf("Could not set status: %s on rule tag: %s the response was: %s\n", config.Action, tag, resp.String())
}
}
Info.Printf("Total %d rule(s) added via tags\n", len(ruleList.Data))
}
func changeStatus(apiEndpoint, apiKey, wafID, status string) {
apiCall := apiEndpoint + "/wafs/" + wafID + "/" + status
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
SetBody(`{"data": {"id": "` + wafID + `","type": "waf"}}`).
Patch(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
os.Exit(1)
}
//check if our response was ok
if resp.Status() == "202 Accepted" {
Info.Printf("WAF %s status was changed to %s\n", wafID, status)
} else {
Error.Println("Could not change the status of WAF " + wafID + " to " + status)
Error.Println("We received the following status code: " + resp.Status() + " with response from the API: " + resp.String())
}
}
func rulesConfig(apiEndpoint, apiKey, serviceID, wafID string, config TOMLConfig) {
//implement individual rule management here
for _, rule := range config.Rules {
ruleID := strconv.FormatInt(rule, 10)
//set rule action on our tags
apiCall := apiEndpoint + "/service/" + serviceID + "/wafs/" + wafID + "/rules/" + ruleID + "/rule_status"
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
SetBody(`{"data": {"attributes": {"status": "` + config.Action + `"},"id": "` + wafID + `-` + ruleID + `","type": "rule_status"}}`).
Patch(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
os.Exit(1)
}
//check if our response was ok
if resp.Status() == "200 OK" {
Info.Printf("Rule %s was configured in the WAF with action %s\n", ruleID, config.Action)
} else {
Error.Printf("Could not set status: %s on rule tag: %s the response was: %s\n", config.Action, ruleID, resp.String())
}
}
}
// DefaultRuleDisabled disables rule IDs defined in the configuration file
func DefaultRuleDisabled(apiEndpoint, apiKey, serviceID, wafID string, config TOMLConfig) {
//implement individual rule management here
for _, rule := range config.DisabledRules {
ruleID := strconv.FormatInt(rule, 10)
//set rule action on our tags
apiCall := apiEndpoint + "/service/" + serviceID + "/wafs/" + wafID + "/rules/" + ruleID + "/rule_status"
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
SetBody(`{"data": {"attributes": {"status": "disabled"},"id": "` + wafID + `-` + ruleID + `","type": "rule_status"}}`).
Patch(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
os.Exit(1)
}
//check if our response was ok
if resp.Status() == "200 OK" {
Info.Printf("Rule %s was configured in the WAF with action disabled via disabledrules parameter\n", ruleID)
} else {
Error.Printf("Could not set status: %s on rule tag: %s the response was: %s\n", config.Action, ruleID, resp.String())
}
}
}
func checkRuleInList(rule Rule, ruleList []Rule) bool {
for _, checkRule := range ruleList {
if checkRule.ID == rule.ID {
return false
}
}
return true
}
// AddLoggingCondition creates/updates logging conditions based on whether the
// user has specified withPerimeterX and/or a web-log expiry.
// NOTE: PerimeterX conditions will be deprecated next major release.
func AddLoggingCondition(client fastly.Client, serviceID string, version int, config TOMLConfig, withPX bool) bool {
conditions, err := client.ListConditions(&fastly.ListConditionsInput{
Service: serviceID,
Version: version,
})
if err != nil {
Error.Fatal(err)
return false
}
weblogCondtion := "waf.executed"
//Check if there's a condition supplied in the config.
if config.Weblog.Condition != "" {
weblogCondtion = config.Weblog.Condition
}
Info.Printf("Using web logging condition : %q\n", weblogCondtion)
// Create condition statement for PX and/or expiry
var cstmts []string
var msgs []string
cstmts = append(cstmts, weblogCondtion)
cn := "waf-soc-logging"
if withPX {
msgs = append(msgs, "PerimeterX")
cstmts = append(cstmts, "(req.http.x-request-id)")
}
//Check for expiry value
if config.Weblog.Expiry > 0 {
cn = "waf-soc-logging-with-expiry"
exp := time.Now().AddDate(0, 0, int(config.Weblog.Expiry)).Unix()
cstmts = append(cstmts, fmt.Sprintf("(std.atoi(now.sec) < %d)", exp))
msgs = append(msgs, fmt.Sprintf("%d day expiry", config.Weblog.Expiry))
//Check for existing
if conditionExists(conditions, "waf-soc-logging-with-expiry") {
Info.Println("Deleting logging condition: 'waf-soc-logging-with-expiry'")
err = client.DeleteCondition(&fastly.DeleteConditionInput{
Service: serviceID,
Version: version,
Name: "waf-soc-logging-with-expiry",
})
if err != nil {
Error.Fatal(err)
return false
}
}
}
// Add the condition
if conditionExists(conditions, cn) {
Info.Printf("Updating WAF logging condition : %q\n", cn)
_, err = client.UpdateCondition(&fastly.UpdateConditionInput{
Service: serviceID,
Version: version,
Name: cn,
Statement: strings.Join(cstmts, " && "),
Type: "RESPONSE",
Priority: 10,
})
if err != nil {
Error.Fatal(err)
return false
}
} else {
Info.Printf("Creating WAF logging condition : %q\n", cn)
_, err = client.CreateCondition(&fastly.CreateConditionInput{
Service: serviceID,
Version: version,
Name: cn,
Statement: strings.Join(cstmts, " && "),
Type: "RESPONSE",
Priority: 10,
})
if err != nil {
Error.Fatal(err)
return false
}
}
// Assign the conditions to the WAF web-log object
Info.Printf("Assigning condition %q (%s) to web log %q\n", cn, strings.Join(msgs, ", "), config.Weblog.Name)
_, err = client.UpdateSyslog(&fastly.UpdateSyslogInput{
Service: serviceID,
Version: version,
Name: config.Weblog.Name,
ResponseCondition: cn,
})
if err != nil {
Error.Fatal(err)
return false
}
return true
}
// PatchRules function patches a rule set after a status of a rule has been changed
func PatchRules(serviceID, wafID string, client fastly.Client) bool {
_, err := client.UpdateWAFRuleSets(&fastly.UpdateWAFRuleRuleSetsInput{
Service: serviceID,
ID: wafID,
})
if err != nil {
Error.Print(err)
return false
}
return true
}
// changeConfigurationSet function allows you to change a config set for a WAF object
func setConfigurationSet(wafID, configurationSet string, client fastly.Client) bool {
wafs := []fastly.ConfigSetWAFs{{ID: wafID}}
_, err := client.UpdateWAFConfigSet(&fastly.UpdateWAFConfigSetInput{
WAFList: wafs,
ConfigSetID: configurationSet,
})
//check if we had an issue with our call
if err != nil {
Error.Println("Error setting configuration set ID: " + configurationSet)
return false
}
return true
}
// getConfigurationSets function provides a listing of all config sets
func getConfigurationSets(apiEndpoint, apiKey string) bool {
//set our API call
apiCall := apiEndpoint + "/wafs/configuration_sets"
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
Get(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
return false
}
//unmarshal the response and extract the service id
body := ConfigSetList{}
json.Unmarshal([]byte(resp.String()), &body)
if len(body.Data) == 0 {
Error.Println("No Configuration Sets found")
return false
}
json.Unmarshal([]byte(resp.String()), &body)
if len(body.Data) == 0 {
Error.Println("No Fastly Rules found")
return false
}
result := PagesOfConfigurationSets{[]ConfigSetList{}}
result.page = append(result.page, body)
currentpage := body.Meta.CurrentPage
totalpages := body.Meta.TotalPages
Info.Printf("Read Total Pages: %d with %d rules\n", body.Meta.TotalPages, body.Meta.RecordCount)
// iterate through pages collecting all rules
for currentpage := currentpage + 1; currentpage <= totalpages; currentpage++ {
Info.Printf("Reading page: %d out of %d\n", currentpage, totalpages)
//set our API call
apiCall := apiEndpoint + "/wafs/configuration_sets?page[number]=" + strconv.Itoa(currentpage)
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
Get(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
return false
}
//unmarshal the response and extract the service id
body := ConfigSetList{}
json.Unmarshal([]byte(resp.String()), &body)
result.page = append(result.page, body)
}
for _, p := range result.page {
for _, c := range p.Data {
Info.Printf("- Configuration Set %s - %s - Active: %t \n", c.ID, c.Attributes.Name, c.Attributes.Active)
}
}
return true
}
// getRuleInfo function
func getRuleInfo(apiEndpoint, apiKey, ruleID string) Rule {
rule := Rule{}
//set our API call
apiCall := apiEndpoint + "/wafs/rules?page[size]=10&page[number]=1&filter[rule_id]=" + ruleID
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
Get(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
}
//unmarshal the response and extract the service id
body := RuleList{}
json.Unmarshal([]byte(resp.String()), &body)
if len(body.Data) == 0 {
Error.Println("No Fastly Rules found")
}
for _, r := range body.Data {
rule = r
}
return rule
}
// getRules functions lists all rules for a WAFID and their status
func getRules(apiEndpoint, apiKey, serviceID, wafID string) bool {
//set our API call
apiCall := apiEndpoint + "/service/" + serviceID + "/wafs/" + wafID + "/rule_statuses"
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
Get(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
return false
}
//unmarshal the response and extract the service id
body := RuleList{}
json.Unmarshal([]byte(resp.String()), &body)
if len(body.Data) == 0 {
Error.Println("No Fastly Rules found")
return false
}
result := PagesOfRules{[]RuleList{}}
result.page = append(result.page, body)
currentpage := body.Meta.CurrentPage
totalpages := body.Meta.TotalPages
Info.Printf("Read Total Pages: %d with %d rules\n", body.Meta.TotalPages, body.Meta.RecordCount)
// iterate through pages collecting all rules
for currentpage := currentpage + 1; currentpage <= totalpages; currentpage++ {
Info.Printf("Reading page: %d out of %d\n", currentpage, totalpages)
//set our API call
apiCall := apiEndpoint + "/service/" + serviceID + "/wafs/" + wafID + "/rule_statuses?page[number]=" + strconv.Itoa(currentpage)
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
Get(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
return false
}
//unmarshal the response and extract the service id
body := RuleList{}
json.Unmarshal([]byte(resp.String()), &body)
result.page = append(result.page, body)
}
var log []Rule
var disabled []Rule
var block []Rule
for _, p := range result.page {
for _, r := range p.Data {
switch r.Attributes.Status {
case "log":
log = append(log, r)
case "block":
block = append(block, r)
case "disabled":
disabled = append(disabled, r)
}
}
}
Info.Println("- Blocking Rules")
for _, r := range block {
info := getRuleInfo(apiEndpoint, apiKey, r.Attributes.ModsecRuleID)
Info.Printf("- Rule ID: %s\tStatus: %s\tParanoia: %d\tPublisher: %s\tMessage: %s\n",
r.Attributes.ModsecRuleID, r.Attributes.Status, info.Attributes.ParanoiaLevel,
info.Attributes.Publisher, info.Attributes.Message)
}
Info.Println("- Logging Rules")
for _, r := range log {
info := getRuleInfo(apiEndpoint, apiKey, r.Attributes.ModsecRuleID)
Info.Printf("- Rule ID: %s\tStatus: %s\tParanoia: %d\tPublisher: %s\tMessage: %s\n",
r.Attributes.ModsecRuleID, r.Attributes.Status, info.Attributes.ParanoiaLevel,
info.Attributes.Publisher, info.Attributes.Message)
}
Info.Println("- Disabled Rules")
for _, r := range disabled {
info := getRuleInfo(apiEndpoint, apiKey, r.Attributes.ModsecRuleID)
Info.Printf("- Rule ID: %s\tStatus: %s\tParanoia: %d\tPublisher: %s\tMessage: %s\n",
r.Attributes.ModsecRuleID, r.Attributes.Status, info.Attributes.ParanoiaLevel,
info.Attributes.Publisher, info.Attributes.Message)
}
return true
}
// getAllRules function lists all the rules with in the Fastly API
func getAllRules(apiEndpoint, apiKey, configID string) bool {
if configID == "" {
//set our API call
apiCall := apiEndpoint + "/wafs/rules?page[number]=1"
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
Get(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
return false
}
//unmarshal the response and extract the service id
body := RuleList{}
json.Unmarshal([]byte(resp.String()), &body)
if len(body.Data) == 0 {
Error.Println("No Fastly Rules found")
return false
}
result := PagesOfRules{[]RuleList{}}
result.page = append(result.page, body)
currentpage := body.Meta.CurrentPage
totalpages := body.Meta.TotalPages
Info.Printf("Read Total Pages: %d with %d rules\n", body.Meta.TotalPages, body.Meta.RecordCount)
// iterate through pages collecting all rules
for currentpage := currentpage + 1; currentpage <= totalpages; currentpage++ {
Info.Printf("Reading page: %d out of %d\n", currentpage, totalpages)
//set our API call
apiCall := apiEndpoint + "/wafs/rules?page[number]=" + strconv.Itoa(currentpage)
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
Get(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
return false
}
//unmarshal the response and extract the service id
body := RuleList{}
json.Unmarshal([]byte(resp.String()), &body)
result.page = append(result.page, body)
}
var owasp []Rule
var fastly []Rule
var trustwave []Rule
for _, p := range result.page {
for _, r := range p.Data {
switch r.Attributes.Publisher {
case "owasp":
owasp = append(owasp, r)
case "trustwave":
trustwave = append(trustwave, r)
case "fastly":
fastly = append(fastly, r)
}
}
}
Info.Println("- OWASP Rules")
for _, r := range owasp {
Info.Printf("- Rule ID: %s\tParanoia: %d\tVersion: %s\tMessage: %s\n", r.ID, r.Attributes.ParanoiaLevel, r.Attributes.Version, r.Attributes.Message)
}
Info.Println("- Fastly Rules")
for _, r := range fastly {
Info.Printf("- Rule ID: %s\tParanoia: %d\tVersion: %s\tMessage: %s\n", r.ID, r.Attributes.ParanoiaLevel, r.Attributes.Version, r.Attributes.Message)
}
Info.Println("- Trustwave Rules")
for _, r := range trustwave {
Info.Printf("- Rule ID: %s\tParanoia: %d\tVersion: %s\tMessage: %s\n", r.ID, r.Attributes.ParanoiaLevel, r.Attributes.Version, r.Attributes.Message)
}
} else {
//set our API call
apiCall := apiEndpoint + "/wafs/rules?filter[configuration_set_id]=" + configID + "&page[number]=1"
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
Get(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
return false
}
//unmarshal the response and extract the service id
body := RuleList{}
json.Unmarshal([]byte(resp.String()), &body)
if len(body.Data) == 0 {
Error.Println("No Fastly Rules found")
return false
}
result := PagesOfRules{[]RuleList{}}
result.page = append(result.page, body)
currentpage := body.Meta.CurrentPage
totalpages := body.Meta.TotalPages
Info.Printf("Read Total Pages: %d with %d rules\n", body.Meta.TotalPages, body.Meta.RecordCount)
// iterate through pages collecting all rules
for currentpage := currentpage + 1; currentpage <= totalpages; currentpage++ {
Info.Printf("Reading page: %d out of %d\n", currentpage, totalpages)
//set our API call
apiCall := apiEndpoint + "/wafs/rules?filter[configuration_set_id]=" + configID + "&page[number]=" + strconv.Itoa(currentpage)
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
Get(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
return false
}
//unmarshal the response and extract the service id
body := RuleList{}
json.Unmarshal([]byte(resp.String()), &body)
result.page = append(result.page, body)
}
var owasp []Rule
var fastly []Rule
var trustwave []Rule
for _, p := range result.page {
for _, r := range p.Data {
switch r.Attributes.Publisher {
case "owasp":
owasp = append(owasp, r)
case "trustwave":
trustwave = append(trustwave, r)
case "fastly":
fastly = append(fastly, r)
}
}
}
Info.Println("- OWASP Rules")
for _, r := range owasp {
Info.Printf("- Rule ID: %s\tParanoia: %d\tVersion: %s\tMessage: %s\n", r.ID, r.Attributes.ParanoiaLevel, r.Attributes.Version, r.Attributes.Message)
}
Info.Println("- Fastly Rules")
for _, r := range fastly {
Info.Printf("- Rule ID: %s\tParanoia: %d\tVersion: %s\tMessage: %s\n", r.ID, r.Attributes.ParanoiaLevel, r.Attributes.Version, r.Attributes.Message)
}
Info.Println("- Trustwave Rules")
for _, r := range trustwave {
Info.Printf("- Rule ID: %s\tParanoia: %d\tVersion: %s\tMessage: %s\n", r.ID, r.Attributes.ParanoiaLevel, r.Attributes.Version, r.Attributes.Message)
}
}
return true
}
// backupConfig function stores all rules, status, configuration set, and OWASP configuration locally
func backupConfig(apiEndpoint, apiKey, serviceID, wafID string, client fastly.Client, bpath string) bool {
//validate the output path
d := filepath.Dir(bpath)
if _, err := os.Stat(d); os.IsNotExist(err) {
Error.Printf("Output path does not exist: %s\n", d)
return false
}
//get all rules and their status
//set our API call
apiCall := apiEndpoint + "/service/" + serviceID + "/wafs/" + wafID + "/rule_statuses"
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
Get(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
return false
}
//unmarshal the response and extract the service id
body := RuleList{}
json.Unmarshal([]byte(resp.String()), &body)
if len(body.Data) == 0 {
Error.Println("No rules found to back up")
return false
}
result := PagesOfRules{[]RuleList{}}
result.page = append(result.page, body)
currentpage := body.Meta.CurrentPage
perpage := body.Meta.PerPage
totalpages := body.Meta.TotalPages
Info.Printf("Backing up %d rules\n", body.Meta.RecordCount)
// iterate through pages collecting all rules
for currentpage := currentpage + 1; currentpage <= totalpages; currentpage++ {
Info.Printf("Reading page: %d out of %d\n", currentpage, totalpages)
//set our API call
apiCall := fmt.Sprintf("%s/service/%s/wafs/%s/rule_statuses?page[size]=%d&page[number]=%d", apiEndpoint, serviceID, wafID, perpage, currentpage)
resp, err := resty.R().
SetHeader("Accept", "application/vnd.api+json").
SetHeader("Fastly-Key", apiKey).
SetHeader("Content-Type", "application/vnd.api+json").
Get(apiCall)
//check if we had an issue with our call
if err != nil {
Error.Println("Error with API call: " + apiCall)
Error.Println(resp.String())
return false
}
//unmarshal the response and extract the service id
body := RuleList{}
json.Unmarshal([]byte(resp.String()), &body)
result.page = append(result.page, body)
}
var log []int64
var disabled []int64
var block []int64
for _, p := range result.page {
for _, r := range p.Data {
ruleID, err := strconv.ParseInt(r.Attributes.ModsecRuleID, 10, 64)
if err != nil {
Error.Printf("Failed to parse rule as int %s\n", r.Attributes.ModsecRuleID)
} else {
switch r.Attributes.Status {
case "log":
log = append(log, ruleID)
case "block":
block = append(block, ruleID)
case "disabled":
disabled = append(disabled, ruleID)
}
}
}
}
//backup OWASP settings
owasp, _ := client.GetOWASP(&fastly.GetOWASPInput{
Service: serviceID,
ID: wafID,
})
if owasp.ID == "" {
Error.Println("No OWASP Object to back up")
return false
}
o := owaspSettings{
AllowedHTTPVersions: owasp.AllowedHTTPVersions,
AllowedMethods: owasp.AllowedMethods,
AllowedRequestContentType: owasp.AllowedRequestContentType,
AllowedRequestContentTypeCharset: owasp.AllowedRequestContentTypeCharset,
ArgLength: owasp.ArgLength,
ArgNameLength: owasp.ArgNameLength,
CombinedFileSizes: owasp.CombinedFileSizes,
CriticalAnomalyScore: owasp.CriticalAnomalyScore,
CRSValidateUTF8Encoding: owasp.CRSValidateUTF8Encoding,
ErrorAnomalyScore: owasp.ErrorAnomalyScore,
HTTPViolationScoreThreshold: owasp.HTTPViolationScoreThreshold,
InboundAnomalyScoreThreshold: owasp.InboundAnomalyScoreThreshold,
LFIScoreThreshold: owasp.LFIScoreThreshold,
MaxFileSize: owasp.MaxFileSize,
MaxNumArgs: owasp.MaxNumArgs,
NoticeAnomalyScore: owasp.NoticeAnomalyScore,
ParanoiaLevel: owasp.ParanoiaLevel,
PHPInjectionScoreThreshold: owasp.PHPInjectionScoreThreshold,
RCEScoreThreshold: owasp.RCEScoreThreshold,
RestrictedExtensions: owasp.RestrictedExtensions,
RestrictedHeaders: owasp.RestrictedHeaders,
RFIScoreThreshold: owasp.RFIScoreThreshold,
SessionFixationScoreThreshold: owasp.SessionFixationScoreThreshold,
SQLInjectionScoreThreshold: owasp.SQLInjectionScoreThreshold,
XSSScoreThreshold: owasp.XSSScoreThreshold,
TotalArgLength: owasp.TotalArgLength,
WarningAnomalyScore: owasp.WarningAnomalyScore,
}
//create a hash
hasher := sha1.New()
hasher.Write([]byte(serviceID + time.Now().String()))
sha := hex.EncodeToString(hasher.Sum(nil))
//Safe Backup Object
backup := Backup{
ID: sha,
ServiceID: serviceID,
Disabled: disabled,
Block: block,
Log: log,
Owasp: o,
Updated: time.Now(),
}
buf := new(bytes.Buffer)
if err := toml.NewEncoder(buf).Encode(backup); err != nil {
Error.Println(err)
return false
}
err = ioutil.WriteFile(bpath, buf.Bytes(), 0644)
if err != nil {
Error.Println(err)
return false
}
Info.Printf("Bytes written: %d to %s\n", buf.Len(), bpath)
return true
}
func homeDir() string {
user, err := user.Current()
if err != nil {
return os.Getenv("HOME")
}
return user.HomeDir
}
var (
app = kingpin.New("waflyctl", "Fastly WAF Control Tool").Version(version)
action = app.Flag("action", "Action to take on the rules list and rule tags. Overwrites action defined in config file. One of: disabled, block, log.").Enum("disabled", "block", "log")
apiEndpoint = app.Flag("apiendpoint", "Fastly API endpoint to use.").Default("https://api.fastly.com").String()
apiKey = app.Flag("apikey", "API Key to use.").Envar("FASTLY_API_TOKEN").Required().String()
backup = app.Flag("backup", "Store a copy of the WAF configuration locally.").Bool()
backupPath = app.Flag("backup-path", "Location for the WAF configuration backup file.").Default(homeDir() + "/waflyctl-backup-<service-id>.toml").String()
configFile = app.Flag("config", "Location of configuration file for waflyctl.").Default(homeDir() + "/.waflyctl.toml").String()
configurationSet = app.Flag("configuration-set", "Changes WAF configuration set to the provided one.").String()
deprovision = app.Flag("delete", "Remove a WAF configuration created with waflyctl.").Bool()
deleteLogs = app.Flag("delete-logs", "When set removes WAF logging configuration.").Bool()
forceStatus = app.Flag("force-status", "Force all rules (inc. disabled) to update for the given tag.").Bool()
logOnly = app.Flag("enable-logs-only", "Add logging configuration only to the service. No other changes will be made. Can be used together with --with-perimeterx").Bool()
omitLogs = app.Flag("no-logs", "Provision the WAF without setting up any logging endpoints.").Bool()
listAllRules = app.Flag("list-all-rules", "List all rules available on the Fastly platform for a given configuration set.").PlaceHolder("CONFIGURATION-SET").String()
listConfigSet = app.Flag("list-configuration-sets", "List all configuration sets and their status.").Bool()
listRules = app.Flag("list-rules", "List current WAF rules and their status.").Bool()
editOWASP = app.Flag("owasp", "Edit the OWASP object base on the settings in the configuration file.").Bool()
provision = app.Flag("provision", "Provision a new WAF or update an existing one.").Bool()
publishers = app.Flag("publisher", "Which rule publisher to use in a comma delimited fashion. Overwrites publisher defined in config file. Choices are: owasp, trustwave, fastly").String()
rules = app.Flag("rules", "Which rules to apply action on in a comma delimited fashion. Overwrites ruleid defined in config file. Example: 1010010,931100,931110.").String()
serviceID = app.Flag("serviceid", "Service ID to Provision.").Required().String()
status = app.Flag("status", "Disable or Enable the WAF. A disabled WAF will not block any traffic. In addition disabling a WAF does not change rule statuses on its configure policy. One of: disable, enable.").Enum("disable", "enable")
tags = app.Flag("tags", "Which rules tags to add to the ruleset in a comma delimited fashion. Overwrites tags defined in config file. Example: wordpress,language-php,drupal.").String()
weblogExpiry = app.Flag("web-log-expiry", "The default expiry of the web-log condition, expressed in days from the current date-time.").Default("-1").Int()
withPX = app.Flag("with-perimeterx", "Enable if the customer has PerimeterX enabled on the service as well as WAF. Helps fix null value logging.").Bool()
addComment = app.Flag("comment", "Add version comment when creating a new version.").String()
)
func main() {
kingpin.MustParse(app.Parse(os.Args[1:]))
const logo = `
_.--------._
.' _|_|_|_|_ '.
/ _|_|_|_|_|_|_ \
| |_|_|_|_|_|_|_| |
|_|_|_|_|_|_|_|_|_|
| |_|_|_|_|_|_|_| |
| |_|_|_|_|_|_|_| |
\ -|_|_|_|_|_|- /
'. -|_|_|_|- .'
` + `----------`
fmt.Println(logo)
// grab version and build
fmt.Println("Fastly WAF Control Tool version: " + version + " built on " + date)
//run init to get our logging configured
config := Init(*configFile)
config.APIEndpoint = *apiEndpoint
//check if rule action was set on CLI
if *action != "" {
config.Action = *action
Info.Println("using rule action set by CLI: ", *action)
}
//check status rule action was set on CLI
if *status != "" {
Info.Println("using rule status set by CLI: ", *status)
}
//if rules are passed via CLI parse them and replace config parameters
if *rules != "" {
Info.Println("using rule IDS set by CLI:")
ruleIDs := strings.Split(*rules, ",")
for _, id := range ruleIDs {
//cast IDs from string to int
i, _ := strconv.ParseInt(id, 10, 32)
Info.Println("- ruleID:", id)
config.Rules = append(config.Rules, i)
}
}
//if rule tags are passed via CLI parse them and replace config parameters
if *tags != "" {
Info.Println("using tags set by CLI:")
tags := strings.Split(*tags, ",")
for _, tag := range tags {
Info.Println(" - tag name: ", tag)
config.Tags = append(config.Tags, tag)
}
}
//if rule publisher is passed via CLI parse them and replace config parameters
if *publishers != "" {
Info.Println("using publisher set by CLI:")
publishers := strings.Split(*publishers, ",")
for _, publisher := range publishers {
Info.Println(" - publisher name: ", publisher)
config.Publisher = append(config.Publisher, publisher)
}
}
//if log expiry is passed through CLI, override config file
if *weblogExpiry >= 0 {
Info.Println("using web log expiry set by CLI:", *weblogExpiry)
config.Weblog.Expiry = uint(*weblogExpiry)
}
//create Fastly client
client, err := fastly.NewClientForEndpoint(*apiKey, config.APIEndpoint)
if err != nil {
Error.Fatal(err)
}
//get currently activeVersion to be used
activeVersion := getActiveVersion(*client, *serviceID)
// add logs only to a service
if *logOnly {
Info.Println("Adding logging endpoints only")
version := cloneVersion(*client, *serviceID, activeVersion, *addComment)
//create VCL Snippet
vclSnippet(*client, *serviceID, config.Vclsnippet, version)
//set logging parameters
fastlyLogging(*client, *serviceID, config, version)
//configure any logging conditions
AddLoggingCondition(*client, *serviceID, version, config, *withPX)
//validate the config
validateVersion(*client, *serviceID, version)
Info.Println("Completed")
os.Exit(0)
}
// check if is a de-provisioning call
if *deprovision {
version := cloneVersion(*client, *serviceID, activeVersion, *addComment)
result := DeprovisionWAF(*client, *serviceID, *apiKey, config, version)
if result {
Info.Printf("Successfully deleted WAF on Service ID %s. Do not forget to activate version %v!\n", *serviceID, version)
Info.Println("Completed")
os.Exit(0)
} else {
Error.Printf("Failed to delete WAF on Service ID %s..see above for details\n", *serviceID)
Info.Println("Completed")
os.Exit(1)
}
}
// check if is a delete logs parameter was called
if *deleteLogs {
version := cloneVersion(*client, *serviceID, activeVersion, *addComment)
//delete the logs
result := DeleteLogsCall(*client, *serviceID, config, version)
if result {
Info.Printf("Successfully deleted logging endpint %s and %s in Service ID %s. Remember to activate version %v!\n", config.Weblog.Name, config.Waflog.Name, *serviceID, version)
Info.Println("Completed")
os.Exit(0)
} else {
Error.Printf("Failed to delete logging endpoints on Service ID %s..see above for details\n", *serviceID)
Info.Println("Completed")
os.Exit(1)
}
}
Info.Printf("Active config version: %v.\n", activeVersion)
wafs, err := client.ListWAFs(&fastly.ListWAFsInput{
Service: *serviceID,
Version: activeVersion,
})
if err != nil {
Error.Fatal(err)
}
if len(wafs) != 0 {
//do rule adjustment here
for index, waf := range wafs {
//if no individual tags or rules are set via CLI run both actions
switch {
//list configuration sets rules
case *listConfigSet:
Info.Println("Listing all configuration sets")
getConfigurationSets(config.APIEndpoint, *apiKey)
Info.Println("Completed")
os.Exit(0)
//list waf rules
case *listRules:
Info.Printf("Listing all rules for WAF ID: %s\n", waf.ID)
getRules(config.APIEndpoint, *apiKey, *serviceID, waf.ID)
Info.Println("Completed")
os.Exit(0)
//list all rules for a given configset
case *listAllRules != "":
Info.Printf("Listing all rules under configuration set ID: %s\n", *listAllRules)
configID := *listAllRules
getAllRules(config.APIEndpoint, *apiKey, configID)
Info.Println("Completed")
os.Exit(0)
//change a configuration set
case *configurationSet != "":
Info.Printf("Changing Configuration Set to: %s\n", *configurationSet)
configID := *configurationSet
setConfigurationSet(waf.ID, configID, *client)
Info.Println("Completed")
os.Exit(0)
case *status != "":
Info.Println("Changing WAF Status")
//rule management
changeStatus(config.APIEndpoint, *apiKey, waf.ID, *status)
Info.Println("Completed")
os.Exit(0)
case *tags != "":
Info.Println("Editing Tags")
Warning.Println("Publisher, Rules, OWASP Settings and Tags changes are versionless actions and thus do not generate a new config version")
//tags management
tagsConfig(config.APIEndpoint, *apiKey, *serviceID, waf.ID, config, *forceStatus)
//patch ruleset
if PatchRules(*serviceID, waf.ID, *client) {
Info.Println("Rule set successfully patched")
} else {
Error.Println("Issue patching ruleset see above error..")
}
case *publishers != "":
Info.Println("Editing Publishers")
Warning.Println("Publisher, Rules, OWASP Settings and Tags changes are versionless actions and thus do not generate a new config version")
//Publisher management
publisherConfig(config.APIEndpoint, *apiKey, *serviceID, waf.ID, config)
//patch ruleset
if PatchRules(*serviceID, waf.ID, *client) {
Info.Println("Rule set successfully patched")
} else {
Error.Println("Issue patching ruleset see above error..")
}
case *rules != "":
Info.Println("Editing Rules")
Warning.Println("Publisher, Rules, OWASP Settings and Tags changes are versionless actions and thus do not generate a new config version")
//rule management
rulesConfig(config.APIEndpoint, *apiKey, *serviceID, waf.ID, config)
//patch ruleset
if PatchRules(*serviceID, waf.ID, *client) {
Info.Println("Rule set successfully patched")
} else {
Error.Println("Issue patching ruleset see above error..")
}
case *editOWASP:
Info.Printf("Editing OWASP settings for WAF #%v\n", index+1)
Warning.Println("Publisher, Rules, OWASP Settings and Tags changes are versionless actions and thus do not generate a new config version")
createOWASP(*client, *serviceID, config, waf.ID)
//patch ruleset
if PatchRules(*serviceID, waf.ID, *client) {
Info.Println("Rule set successfully patched")
} else {
Error.Println("Issue patching ruleset see above error..")
}
case *withPX:
Info.Println("WAF enabled with PerimeterX, setting logging conditions")
version := cloneVersion(*client, *serviceID, activeVersion, *addComment)
AddLoggingCondition(*client, *serviceID, version, config, *withPX)
validateVersion(*client, *serviceID, activeVersion)
//back up WAF rules locally
case *backup:
Info.Println("Backing up WAF configuration")
bp := strings.Replace(*backupPath, "<service-id>", *serviceID, -1)
if !backupConfig(*apiEndpoint, *apiKey, *serviceID, waf.ID, *client, bp) {
os.Exit(1)
}
case *provision:
Warning.Println("Publisher, Rules, OWASP Settings and Tags changes are versionless actions and thus do not generate a new config version")
//tags management
tagsConfig(config.APIEndpoint, *apiKey, *serviceID, waf.ID, config, *forceStatus)
//rule management
rulesConfig(config.APIEndpoint, *apiKey, *serviceID, waf.ID, config)
//publisher management
publisherConfig(config.APIEndpoint, *apiKey, *serviceID, waf.ID, config)
//OWASP
createOWASP(*client, *serviceID, config, waf.ID)
//patch ruleset
if PatchRules(*serviceID, waf.ID, *client) {
Info.Println("Rule set successfully patched")
} else {
Error.Println("Issue patching ruleset see above error..")
}
default:
Error.Println("Nothing to do. Exiting")
os.Exit(1)
}
//validate the config
Info.Println("Completed")
os.Exit(0)
}
} else if *provision {
Warning.Printf("Provisioning a new WAF on Service ID: %s\n", *serviceID)
//clone current version
version := cloneVersion(*client, *serviceID, activeVersion, *addComment)
//provision a new WAF service
wafID := provisionWAF(*client, *serviceID, config, version)
//publisher management
publisherConfig(config.APIEndpoint, *apiKey, *serviceID, wafID, config)
//tags management
tagsConfig(config.APIEndpoint, *apiKey, *serviceID, wafID, config, *forceStatus)
//rule management
rulesConfig(config.APIEndpoint, *apiKey, *serviceID, wafID, config)
//Default Disabled
DefaultRuleDisabled(config.APIEndpoint, *apiKey, *serviceID, wafID, config)
//Add logging conditions
// Ensure logging is defined in config and not being explicitly omitted
if !*omitLogs && config.Weblog.Name != "" {
AddLoggingCondition(*client, *serviceID, version, config, *withPX)
}
latest, err := client.LatestVersion(&fastly.LatestVersionInput{
Service: *serviceID,
})
if err != nil {
Error.Fatal(err)
}
//patch ruleset
if PatchRules(*serviceID, wafID, *client) {
Info.Println("Rule set successfully patched")
} else {
Error.Println("Issue patching ruleset see above error..")
}
//validate the config
validateVersion(*client, *serviceID, latest.Number)
Info.Println("Completed")
os.Exit(0)
} else {
Error.Println("Nothing to do. Exiting")
os.Exit(1)
}
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
main.go
|
//go:generate go install -v github.com/kevinburke/go-bindata/go-bindata
//go:generate go-bindata -prefix res/ -pkg assets -o assets/assets.go res/Brave.lnk
//go:generate go install -v github.com/josephspurrier/goversioninfo/cmd/goversioninfo
//go:generate goversioninfo -icon=res/papp.ico -manifest=res/papp.manifest
package main
import (
"os"
"path"
"github.com/portapps/brave-portable/assets"
"github.com/portapps/portapps/v3"
"github.com/portapps/portapps/v3/pkg/log"
"github.com/portapps/portapps/v3/pkg/registry"
"github.com/portapps/portapps/v3/pkg/shortcut"
"github.com/portapps/portapps/v3/pkg/utl"
)
type config struct {
Cleanup bool `yaml:"cleanup" mapstructure:"cleanup"`
}
var (
app *portapps.App
cfg *config
)
func init() {
var err error
// Default config
cfg = &config{
Cleanup: false,
}
// Init app
if app, err = portapps.NewWithCfg("brave-portable", "Brave", cfg); err != nil {
log.Fatal().Err(err).Msg("Cannot initialize application. See log file for more info.")
}
}
func main() {
utl.CreateFolder(app.DataPath)
app.Process = utl.PathJoin(app.AppPath, "brave.exe")
app.Args = []string{
"--user-data-dir=" + app.DataPath,
"--disable-brave-update",
"--no-default-browser-check",
"--disable-logging",
"--disable-breakpad",
"--disable-machine-id",
"--disable-encryption-win",
}
// Cleanup on exit
if cfg.Cleanup {
defer func() {
utl.Cleanup([]string{
path.Join(os.Getenv("APPDATA"), "BraveSoftware"),
path.Join(os.Getenv("LOCALAPPDATA"), "BraveSoftware"),
})
}()
}
// Copy default shortcut
shortcutPath := path.Join(os.Getenv("APPDATA"), "Microsoft", "Windows", "Start Menu", "Programs", "Brave Portable.lnk")
defaultShortcut, err := assets.Asset("Brave.lnk")
if err != nil {
log.Error().Err(err).Msg("Cannot load asset Brave.lnk")
}
err = os.WriteFile(shortcutPath, defaultShortcut, 0644)
if err != nil {
log.Error().Err(err).Msg("Cannot write default shortcut")
}
// Update default shortcut
err = shortcut.Create(shortcut.Shortcut{
ShortcutPath: shortcutPath,
TargetPath: app.Process,
Arguments: shortcut.Property{Clear: true},
Description: shortcut.Property{Value: "Brave Portable by Portapps"},
IconLocation: shortcut.Property{Value: app.Process},
WorkingDirectory: shortcut.Property{Value: app.AppPath},
})
if err != nil {
log.Error().Err(err).Msg("Cannot create shortcut")
}
defer func() {
if err := os.Remove(shortcutPath); err != nil {
log.Error().Err(err).Msg("Cannot remove shortcut")
}
}()
// Registry keys
regsPath := utl.CreateFolder(app.RootPath, "reg")
bsRegKey := registry.Key{
Key: `HKCU\SOFTWARE\BraveSoftware`,
Arch: "32",
}
bbdRegKey := registry.Key{
Key: `HKCU\SOFTWARE\Brave-Browser-Development`,
Arch: "32",
}
if err := bsRegKey.Import(utl.PathJoin(regsPath, "BraveSoftware.reg")); err != nil {
log.Error().Err(err).Msg("Cannot import registry key")
}
if err := bbdRegKey.Import(utl.PathJoin(regsPath, "Brave-Browser-Development.reg")); err != nil {
log.Error().Err(err).Msg("Cannot import registry key")
}
defer func() {
if err := bsRegKey.Export(utl.PathJoin(regsPath, "BraveSoftware.reg")); err != nil {
log.Error().Err(err).Msg("Cannot export registry key")
}
if err := bbdRegKey.Export(utl.PathJoin(regsPath, "Brave-Browser-Development.reg")); err != nil {
log.Error().Err(err).Msg("Cannot export registry key")
}
if cfg.Cleanup {
if err := bsRegKey.Delete(true); err != nil {
log.Error().Err(err).Msg("Cannot remove registry key")
}
if err := bbdRegKey.Delete(true); err != nil {
log.Error().Err(err).Msg("Cannot remove registry key")
}
}
}()
defer app.Close()
app.Launch(os.Args[1:])
}
|
[
"\"APPDATA\"",
"\"LOCALAPPDATA\"",
"\"APPDATA\""
] |
[] |
[
"APPDATA",
"LOCALAPPDATA"
] |
[]
|
["APPDATA", "LOCALAPPDATA"]
|
go
| 2 | 0 | |
hudi-integ-test/src/test/java/org/apache/hudi/integ/ITTestBase.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hudi.integ;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.awaitility.Awaitility.await;
import com.github.dockerjava.api.DockerClient;
import com.github.dockerjava.api.command.DockerCmdExecFactory;
import com.github.dockerjava.api.command.ExecCreateCmd;
import com.github.dockerjava.api.command.ExecCreateCmdResponse;
import com.github.dockerjava.api.model.Container;
import com.github.dockerjava.core.DefaultDockerClientConfig;
import com.github.dockerjava.core.DockerClientBuilder;
import com.github.dockerjava.core.DockerClientConfig;
import com.github.dockerjava.core.command.ExecStartResultCallback;
import com.github.dockerjava.jaxrs.JerseyDockerCmdExecFactory;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.hudi.common.util.FileIOUtils;
import org.apache.hudi.common.util.collection.Pair;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.junit.Assert;
import org.junit.Before;
public abstract class ITTestBase {
public static final Logger LOG = LogManager.getLogger(ITTestBase.class);
protected static final String SPARK_WORKER_CONTAINER = "/spark-worker-1";
protected static final String ADHOC_1_CONTAINER = "/adhoc-1";
protected static final String ADHOC_2_CONTAINER = "/adhoc-2";
protected static final String HIVESERVER = "/hiveserver";
protected static final String HOODIE_WS_ROOT = "/var/hoodie/ws";
protected static final String HOODIE_JAVA_APP = HOODIE_WS_ROOT + "/hudi-spark/run_hoodie_app.sh";
protected static final String HUDI_HADOOP_BUNDLE =
HOODIE_WS_ROOT + "/docker/hoodie/hadoop/hive_base/target/hoodie-hadoop-mr-bundle.jar";
protected static final String HUDI_HIVE_BUNDLE =
HOODIE_WS_ROOT + "/docker/hoodie/hadoop/hive_base/target/hoodie-hive-bundle.jar";
protected static final String HUDI_SPARK_BUNDLE =
HOODIE_WS_ROOT + "/docker/hoodie/hadoop/hive_base/target/hoodie-spark-bundle.jar";
protected static final String HUDI_UTILITIES_BUNDLE =
HOODIE_WS_ROOT + "/docker/hoodie/hadoop/hive_base/target/hoodie-utilities.jar";
protected static final String HIVE_SERVER_JDBC_URL = "jdbc:hive2://hiveserver:10000";
protected static final String HADOOP_CONF_DIR = "/etc/hadoop";
// Skip these lines when capturing output from hive
private static final String DEFAULT_DOCKER_HOST = "unix:///var/run/docker.sock";
private static final String OVERRIDDEN_DOCKER_HOST = System.getenv("DOCKER_HOST");
protected DockerClient dockerClient;
protected Map<String, Container> runningContainers;
static String[] getHiveConsoleCommand(String rawCommand) {
String jarCommand = "add jar " + HUDI_HADOOP_BUNDLE + ";";
String fullCommand = jarCommand + rawCommand;
List<String> cmd = new ArrayList<>();
cmd.add("hive");
cmd.add("--hiveconf");
cmd.add("hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat");
cmd.add("--hiveconf");
cmd.add("hive.stats.autogather=false");
cmd.add("-e");
cmd.add("\"" + fullCommand + "\"");
return cmd.stream().toArray(String[]::new);
}
private static String getHiveConsoleCommandFile(String commandFile, String additionalVar) {
StringBuilder builder = new StringBuilder().append("beeline -u " + HIVE_SERVER_JDBC_URL)
.append(" --hiveconf hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat ")
.append(" --hiveconf hive.stats.autogather=false ")
.append(" --hivevar hudi.hadoop.bundle=" + HUDI_HADOOP_BUNDLE);
if (additionalVar != null) {
builder.append(" --hivevar " + additionalVar + " ");
}
return builder.append(" -f ").append(commandFile).toString();
}
static String getSparkShellCommand(String commandFile) {
return new StringBuilder().append("spark-shell --jars ").append(HUDI_SPARK_BUNDLE)
.append(" --master local[2] --driver-class-path ").append(HADOOP_CONF_DIR)
.append(
" --conf spark.sql.hive.convertMetastoreParquet=false --deploy-mode client --driver-memory 1G --executor-memory 1G --num-executors 1 ")
.append(" --packages com.databricks:spark-avro_2.11:4.0.0 ").append(" -i ").append(commandFile).toString();
}
@Before
public void init() {
String dockerHost = (OVERRIDDEN_DOCKER_HOST != null) ? OVERRIDDEN_DOCKER_HOST : DEFAULT_DOCKER_HOST;
// Assuming insecure docker engine
DockerClientConfig config =
DefaultDockerClientConfig.createDefaultConfigBuilder().withDockerHost(dockerHost).build();
// using jaxrs/jersey implementation here (netty impl is also available)
DockerCmdExecFactory dockerCmdExecFactory = new JerseyDockerCmdExecFactory().withConnectTimeout(1000)
.withMaxTotalConnections(100).withMaxPerRouteConnections(10);
dockerClient = DockerClientBuilder.getInstance(config).withDockerCmdExecFactory(dockerCmdExecFactory).build();
await().atMost(60, SECONDS).until(this::servicesUp);
}
private boolean servicesUp() {
List<Container> containerList = dockerClient.listContainersCmd().exec();
for (Container c : containerList) {
if (!c.getState().equalsIgnoreCase("running")) {
LOG.info("Container : " + Arrays.toString(c.getNames()) + "not in running state, Curr State :" + c.getState());
return false;
}
}
runningContainers = containerList.stream().map(c -> Pair.of(c.getNames()[0], c))
.collect(Collectors.toMap(Pair::getLeft, Pair::getRight));
return true;
}
private String singleSpace(String str) {
return str.replaceAll("[\\s]+", " ");
}
private TestExecStartResultCallback executeCommandInDocker(String containerName, String[] command,
boolean expectedToSucceed) throws Exception {
Container sparkWorkerContainer = runningContainers.get(containerName);
ExecCreateCmd cmd = dockerClient.execCreateCmd(sparkWorkerContainer.getId()).withCmd(command).withAttachStdout(true)
.withAttachStderr(true);
ExecCreateCmdResponse createCmdResponse = cmd.exec();
TestExecStartResultCallback callback =
new TestExecStartResultCallback(new ByteArrayOutputStream(), new ByteArrayOutputStream());
dockerClient.execStartCmd(createCmdResponse.getId()).withDetach(false).withTty(false).exec(callback)
.awaitCompletion();
int exitCode = dockerClient.inspectExecCmd(createCmdResponse.getId()).exec().getExitCode();
LOG.info("Exit code for command : " + exitCode);
LOG.error("\n\n ###### Stdout #######\n" + callback.getStdout().toString());
LOG.error("\n\n ###### Stderr #######\n" + callback.getStderr().toString());
if (expectedToSucceed) {
Assert.assertTrue("Command (" + Arrays.toString(command) + ") expected to succeed. Exit (" + exitCode + ")",
exitCode == 0);
} else {
Assert.assertTrue("Command (" + Arrays.toString(command) + ") expected to fail. Exit (" + exitCode + ")",
exitCode != 0);
}
cmd.close();
return callback;
}
void executeCommandStringsInDocker(String containerName, List<String> commands) throws Exception {
for (String cmd : commands) {
executeCommandStringInDocker(containerName, cmd, true);
}
}
TestExecStartResultCallback executeCommandStringInDocker(String containerName, String cmd, boolean expectedToSucceed)
throws Exception {
LOG.info("\n\n#################################################################################################");
LOG.info("Container : " + containerName + ", Running command :" + cmd);
LOG.info("\n#################################################################################################");
String[] cmdSplits = singleSpace(cmd).split(" ");
return executeCommandInDocker(containerName, cmdSplits, expectedToSucceed);
}
Pair<String, String> executeHiveCommand(String hiveCommand) throws Exception {
LOG.info("\n\n#################################################################################################");
LOG.info("Running hive command :" + hiveCommand);
LOG.info("\n#################################################################################################");
String[] hiveCmd = getHiveConsoleCommand(hiveCommand);
TestExecStartResultCallback callback = executeCommandInDocker(HIVESERVER, hiveCmd, true);
return Pair.of(callback.getStdout().toString().trim(), callback.getStderr().toString().trim());
}
Pair<String, String> executeHiveCommandFile(String commandFile) throws Exception {
return executeHiveCommandFile(commandFile, null);
}
Pair<String, String> executeHiveCommandFile(String commandFile, String additionalVar) throws Exception {
String hiveCmd = getHiveConsoleCommandFile(commandFile, additionalVar);
TestExecStartResultCallback callback = executeCommandStringInDocker(HIVESERVER, hiveCmd, true);
return Pair.of(callback.getStdout().toString().trim(), callback.getStderr().toString().trim());
}
Pair<String, String> executeSparkSQLCommand(String commandFile, boolean expectedToSucceed) throws Exception {
String sparkShellCmd = getSparkShellCommand(commandFile);
TestExecStartResultCallback callback =
executeCommandStringInDocker(ADHOC_1_CONTAINER, sparkShellCmd, expectedToSucceed);
return Pair.of(callback.getStdout().toString(), callback.getStderr().toString());
}
private void saveUpLogs() {
try {
// save up the Hive log files for introspection
String hiveLogStr =
executeCommandStringInDocker(HIVESERVER, "cat /tmp/root/hive.log", true).getStdout().toString();
String filePath = System.getProperty("java.io.tmpdir") + "/" + System.currentTimeMillis() + "-hive.log";
FileIOUtils.writeStringToFile(hiveLogStr, filePath);
LOG.info("Hive log saved up at : " + filePath);
} catch (Exception e) {
LOG.error("Unable to save up logs..", e);
}
}
void assertStdOutContains(Pair<String, String> stdOutErr, String expectedOutput) {
assertStdOutContains(stdOutErr, expectedOutput, 1);
}
void assertStdOutContains(Pair<String, String> stdOutErr, String expectedOutput, int times) {
// this is so that changes in padding don't affect comparison
String stdOutSingleSpaced = singleSpace(stdOutErr.getLeft()).replaceAll(" ", "");
expectedOutput = singleSpace(expectedOutput).replaceAll(" ", "");
int lastIndex = 0;
int count = 0;
while (lastIndex != -1) {
lastIndex = stdOutSingleSpaced.indexOf(expectedOutput, lastIndex);
if (lastIndex != -1) {
count++;
lastIndex += expectedOutput.length();
}
}
if (times != count) {
saveUpLogs();
}
Assert.assertEquals("Did not find output the expected number of times", times, count);
}
public class TestExecStartResultCallback extends ExecStartResultCallback {
// Storing the reference in subclass to expose to clients
private final ByteArrayOutputStream stdout;
private final ByteArrayOutputStream stderr;
public TestExecStartResultCallback(ByteArrayOutputStream stdout, ByteArrayOutputStream stderr) {
super(stdout, stderr);
this.stdout = stdout;
this.stderr = stderr;
}
@Override
public void onComplete() {
super.onComplete();
LOG.info("onComplete called");
try {
stderr.flush();
stdout.flush();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public ByteArrayOutputStream getStdout() {
return stdout;
}
public ByteArrayOutputStream getStderr() {
return stderr;
}
}
}
|
[
"\"DOCKER_HOST\""
] |
[] |
[
"DOCKER_HOST"
] |
[]
|
["DOCKER_HOST"]
|
java
| 1 | 0 | |
config/config.go
|
// Copyright 2020 tree xie
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// 应用中的所有配置获取,拉取配置信息使用default配置中的值为默认值,再根据GO_ENV配置的环境变量获取对应的环境配置,
// 需要注意,尽可能按单个key的形式来获取对应的配置,这样的方式可以保证针对单个key优先获取GO_ENV对应配置,
// 再获取默认配置,如果一次获取map的形式,如果当前配置对应的map的所有key不全,不会再获取default的配置
package config
import (
"bytes"
"embed"
"fmt"
"io"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/spf13/cast"
"github.com/vicanso/forest/validate"
"github.com/vicanso/viperx"
)
//go:embed *.yml
var configFS embed.FS
var (
env = os.Getenv("GO_ENV")
defaultViperX = mustLoadConfig()
)
const (
// Dev 开发模式下的环境变量
Dev = "dev"
// Test 测试环境下的环境变量
Test = "test"
// Production 生产环境下的环境变量
Production = "production"
)
type (
// BasicConfig 应用基本配置信息
BasicConfig struct {
// 监听地址
Listen string `validate:"required,ascii" default:":7001"`
// 最大处理请求数
RequestLimit uint `validate:"required" default:"1000"`
// 应用名称
Name string `validate:"required,ascii"`
// PID文件
PidFile string `validate:"required"`
// 应用前缀
Prefixes []string `validate:"omitempty,dive,xPath"`
// 超时(用于设置所有请求)
Timeout time.Duration `default:"2m"`
}
// SessionConfig session相关配置信息
SessionConfig struct {
// cookie的保存路径
CookiePath string `validate:"required,ascii"`
// cookie的key
Key string `validate:"required,ascii"`
// cookie的有效期(允许为空)
MaxAge time.Duration
// session的有效期
TTL time.Duration `validate:"required"`
// 用于加密cookie的key
Keys []string `validate:"required"`
// 用于跟踪用户的cookie
TrackKey string `validate:"required,ascii"`
}
// RedisConfig redis配置
RedisConfig struct {
// 连接地址
Addrs []string `validate:"required,dive,hostname_port"`
// 用户名
Username string
// 密码
Password string
// 慢请求时长
Slow time.Duration `validate:"required"`
// 最大的正在处理请求量
MaxProcessing uint32 `validate:"required" default:"100"`
// 连接池大小
PoolSize int `default:"100"`
// key前缀
Prefix string
// sentinel模式下使用的master name
Master string
}
// DatabaseConfig 数据库配置
DatabaseConfig struct {
// 连接串
URI string `validate:"required"`
// 最大连接数
MaxOpenConns int `default:"100"`
// 最大空闲连接数
MaxIdleConns int `default:"10"`
// 最大空闲时长
MaxIdleTime time.Duration `default:"5m"`
}
// MailConfig email的配置
MailConfig struct {
Host string `validate:"required,hostname"`
Port int `validate:"required,number"`
User string `validate:"required,email"`
Password string `validate:"required,min=1,max=100"`
}
// Influxdb influxdb配置
InfluxdbConfig struct {
// 存储的bucket
Bucket string `validate:"required,min=1,max=50"`
// 配置的组织名称
Org string `validate:"required,min=1,max=100"`
// 连接地址
URI string `validate:"required,url"`
// 认证的token
Token string `validate:"required,ascii"`
// 批量提交大小
BatchSize uint `default:"100" validate:"required,min=1,max=5000"`
// 间隔提交时长
FlushInterval time.Duration `default:"30s" validate:"required"`
// 是否启用gzip
Gzip bool
// 是否禁用
Disabled bool
}
// LocationConfig 定位配置
LocationConfig struct {
Timeout time.Duration `validate:"required"`
BaseURL string `validate:"required,url"`
}
// MinioConfig minio的配置信息
MinioConfig struct {
Endpoint string `validate:"required,hostname_port"`
AccessKeyID string `validate:"required,min=3"`
SecretAccessKey string `validate:"required,min=6"`
SSL bool
}
// PyroscopeConfig pyroscope的配置信息
PyroscopeConfig struct {
Addr string `validate:"omitempty,url"`
Token string
}
)
// mustLoadConfig 加载配置,出错是则抛出panic
func mustLoadConfig() *viperx.ViperX {
configType := "yml"
defaultViperX := viperx.New(configType)
readers := make([]io.Reader, 0)
for _, name := range []string{
"default",
GetENV(),
} {
data, err := configFS.ReadFile(name + "." + configType)
if err != nil {
panic(err)
}
readers = append(readers, bytes.NewReader(data))
}
err := defaultViperX.ReadConfig(readers...)
if err != nil {
panic(err)
}
return defaultViperX
}
// mustValidate 对数据校验,如果出错则panic,仅用于初始化时的配置检查
func mustValidate(v interface{}) {
err := validate.Do(v, nil)
if err != nil {
panic(err)
}
}
// GetENV 获取当前运行环境
func GetENV() string {
if env == "" {
return Dev
}
return env
}
// MustGetBasicConfig 获取基本配置信息
func MustGetBasicConfig() *BasicConfig {
prefix := "basic."
basicConfig := &BasicConfig{
Name: defaultViperX.GetString(prefix + "name"),
RequestLimit: defaultViperX.GetUint(prefix + "requestLimit"),
Listen: defaultViperX.GetStringFromENV(prefix + "listen"),
Prefixes: defaultViperX.GetStringSlice(prefix + "prefixes"),
Timeout: defaultViperX.GetDurationFromENV(prefix + "timeout"),
}
pidFile := fmt.Sprintf("%s.pid", basicConfig.Name)
pwd, _ := os.Getwd()
if pwd != "" {
pidFile = pwd + "/" + pidFile
}
basicConfig.PidFile = pidFile
mustValidate(basicConfig)
return basicConfig
}
// MustGetSessionConfig 获取session的配置
func MustGetSessionConfig() *SessionConfig {
prefix := "session."
sessConfig := &SessionConfig{
MaxAge: defaultViperX.GetDurationFromENV(prefix + "maxAge"),
TTL: defaultViperX.GetDurationFromENV(prefix + "ttl"),
Key: defaultViperX.GetString(prefix + "key"),
CookiePath: defaultViperX.GetString(prefix + "path"),
Keys: defaultViperX.GetStringSlice(prefix + "keys"),
TrackKey: defaultViperX.GetString(prefix + "trackKey"),
}
mustValidate(sessConfig)
return sessConfig
}
// MustGetRedisConfig 获取redis的配置
func MustGetRedisConfig() *RedisConfig {
prefix := "redis."
uri := defaultViperX.GetStringFromENV(prefix + "uri")
uriInfo, err := url.Parse(uri)
if err != nil {
panic(err)
}
// 获取密码
password, _ := uriInfo.User.Password()
username := uriInfo.User.Username()
query := uriInfo.Query()
// 获取slow设置的时间间隔
slowValue := query.Get("slow")
slow := 100 * time.Millisecond
if slowValue != "" {
slow, err = time.ParseDuration(slowValue)
if err != nil {
panic(err)
}
}
// 获取最大处理数的配置
maxProcessing := 1000
maxValue := query.Get("maxProcessing")
if maxValue != "" {
maxProcessing, err = strconv.Atoi(maxValue)
if err != nil {
panic(err)
}
}
// 转换失败则为0
poolSize, _ := strconv.Atoi(query.Get("poolSize"))
redisConfig := &RedisConfig{
Addrs: strings.Split(uriInfo.Host, ","),
Username: username,
Password: password,
Slow: slow,
MaxProcessing: uint32(maxProcessing),
PoolSize: poolSize,
Master: query.Get("master"),
}
keyPrefix := query.Get("prefix")
if keyPrefix != "" {
redisConfig.Prefix = keyPrefix + ":"
}
mustValidate(redisConfig)
return redisConfig
}
// MustGetPostgresConfig 获取postgres配置
func MustGetDatabaseConfig() *DatabaseConfig {
prefix := "database."
uri := defaultViperX.GetStringFromENV(prefix + "uri")
maxIdleConns := 0
maxOpenConns := 0
var maxIdleTime time.Duration
arr := strings.Split(uri, "?")
if len(arr) == 2 {
query, _ := url.ParseQuery(arr[1])
maxIdleConns = cast.ToInt(query.Get("maxIdleConns"))
maxOpenConns = cast.ToInt(query.Get("maxOpenConns"))
maxIdleTime = cast.ToDuration(query.Get("maxIdleTime"))
query.Del("maxIdleConns")
query.Del("maxOpenConns")
query.Del("maxIdleTime")
uri = arr[0]
s := query.Encode()
if s != "" {
uri += ("?" + s)
}
}
databaseConfig := &DatabaseConfig{
URI: uri,
MaxIdleConns: maxIdleConns,
MaxOpenConns: maxOpenConns,
MaxIdleTime: maxIdleTime,
}
mustValidate(databaseConfig)
return databaseConfig
}
// MustGetMailConfig 获取邮件配置
func MustGetMailConfig() *MailConfig {
prefix := "mail."
urlInfo, err := url.Parse(defaultViperX.GetStringFromENV(prefix + "url"))
if err != nil {
panic(err)
}
pass, _ := urlInfo.User.Password()
port, err := strconv.Atoi(urlInfo.Port())
if err != nil {
panic(err)
}
mailConfig := &MailConfig{
Host: urlInfo.Hostname(),
Port: port,
User: urlInfo.User.Username(),
Password: pass,
}
mustValidate(mailConfig)
return mailConfig
}
// MustGetInfluxdbConfig 获取influxdb配置
func MustGetInfluxdbConfig() *InfluxdbConfig {
prefix := "influxdb."
urlInfo, err := url.Parse(defaultViperX.GetStringFromENV(prefix + "uri"))
if err != nil {
panic(err)
}
query := urlInfo.Query()
// 批量提交默认为100条
batchSize := uint(100)
batchSizeValue := query.Get("batchSize")
if batchSizeValue != "" {
batchSize = cast.ToUint(batchSizeValue)
}
// 定时刷新间隔10秒
flushInterval := 10 * time.Second
flushIntervalValue := query.Get("flushInterval")
if flushIntervalValue != "" {
flushInterval = cast.ToDuration(flushIntervalValue)
}
influxdbConfig := &InfluxdbConfig{
URI: fmt.Sprintf("%s://%s", urlInfo.Scheme, urlInfo.Host),
Bucket: query.Get("bucket"),
Org: query.Get("org"),
Token: query.Get("token"),
BatchSize: batchSize,
FlushInterval: flushInterval,
Gzip: cast.ToBool(query.Get("gzip")),
Disabled: cast.ToBool(query.Get("disabled")),
}
mustValidate(influxdbConfig)
return influxdbConfig
}
// MustGetLocationConfig 获取定位的配置
func MustGetLocationConfig() *LocationConfig {
prefix := "location."
locationConfig := &LocationConfig{
BaseURL: defaultViperX.GetString(prefix + "baseURL"),
Timeout: defaultViperX.GetDuration(prefix + "timeout"),
}
mustValidate(locationConfig)
return locationConfig
}
// MustGetMinioConfig 获取minio的配置
func MustGetMinioConfig() *MinioConfig {
prefix := "minio."
urlInfo, err := url.Parse(defaultViperX.GetStringFromENV(prefix + "uri"))
if err != nil {
panic(err)
}
query := urlInfo.Query()
minioConfig := &MinioConfig{
Endpoint: urlInfo.Host,
AccessKeyID: query.Get("accessKeyID"),
SecretAccessKey: query.Get("secretAccessKey"),
SSL: cast.ToBool(query.Get("ssl")),
}
mustValidate(minioConfig)
return minioConfig
}
// MustGetPyroscopeConfig 获取pyroscope的配置信息
func MustGetPyroscopeConfig() *PyroscopeConfig {
prefix := "pyroscope."
pyroscopeConfig := &PyroscopeConfig{
Addr: defaultViperX.GetString(prefix + "addr"),
Token: defaultViperX.GetString(prefix + "token"),
}
return pyroscopeConfig
}
|
[
"\"GO_ENV\""
] |
[] |
[
"GO_ENV"
] |
[]
|
["GO_ENV"]
|
go
| 1 | 0 | |
google-cloud-sdk/lib/third_party/containerregistry/transform/v1/metadata_.py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package manipulates Docker image metadata."""
from collections import namedtuple
import copy
import os
_OverridesT = namedtuple('OverridesT', [
'name', 'parent', 'size', 'entrypoint', 'cmd',
'env', 'labels', 'ports', 'volumes', 'workdir', 'user'
])
class Overrides(_OverridesT):
"""Docker image layer metadata options."""
def __new__(
cls,
name=None,
parent=None,
size=None,
entrypoint=None,
cmd=None,
user=None,
labels=None,
env=None,
ports=None,
volumes=None,
workdir=None):
"""Constructor."""
return super(Overrides, cls).__new__(
cls, name=name, parent=parent, size=size, entrypoint=entrypoint,
cmd=cmd, user=user, labels=labels, env=env, ports=ports,
volumes=volumes, workdir=workdir)
# NOT THREADSAFE
def _Resolve(
value,
environment
):
"""Resolves environment variables embedded in the given value."""
outer_env = os.environ
try:
os.environ = environment
return os.path.expandvars(value)
finally:
os.environ = outer_env
# TODO(user): Use a typing.Generic?
def _DeepCopySkipNull(
data
):
"""Do a deep copy, skipping null entry."""
if type(data) == type(dict()):
return dict((_DeepCopySkipNull(k), _DeepCopySkipNull(v))
for k, v in data.iteritems() if v is not None)
return copy.deepcopy(data)
def _KeyValueToDict(
pair
):
"""Converts an iterable object of key=value pairs to dictionary."""
d = dict()
for kv in pair:
(k, v) = kv.split('=', 1)
d[k] = v
return d
def _DictToKeyValue(
d
):
return ['%s=%s' % (k, d[k]) for k in sorted(d.keys())]
def Override(
data,
options,
docker_version='1.5.0',
architecture='amd64',
operating_system='linux'
):
"""Rewrite and return a copy of the input data according to options.
Args:
data: The dict of Docker image layer metadata we're copying and rewriting.
options: The changes this layer makes to the overall image's metadata, which
first appears in this layer's version of the metadata
docker_version: The version of docker write in the metadata (default: 1.5.0)
architecture: The architecture to write in the metadata (default: amd64)
operating_system: The os to write in the metadata (default: linux)
Returns:
A deep copy of data, which has been updated to reflect the metadata
additions of this layer.
Raises:
Exception: a required option was missing.
"""
output = _DeepCopySkipNull(data)
if not options.name:
raise Exception('Missing required option: name')
output['id'] = options.name
if options.parent:
output['parent'] = options.parent
elif data:
raise Exception(
'Expected empty input object when parent is omitted, got: %s' % data)
if options.size:
output['Size'] = options.size
elif 'Size' in output:
del output['Size']
if 'config' not in output:
output['config'] = {}
if options.entrypoint:
output['config']['Entrypoint'] = options.entrypoint
if options.cmd:
output['config']['Cmd'] = options.cmd
if options.user:
output['config']['User'] = options.user
output['docker_version'] = docker_version
output['architecture'] = architecture
output['os'] = operating_system
if options.env:
# Build a dictionary of existing environment variables (used by _Resolve).
environ_dict = _KeyValueToDict(output['config'].get('Env', []))
# Merge in new environment variables, resolving references.
for k, v in options.env.iteritems():
# _Resolve handles scenarios like "PATH=$PATH:...".
environ_dict[k] = _Resolve(v, environ_dict)
output['config']['Env'] = _DictToKeyValue(environ_dict)
if options.labels:
label_dict = _KeyValueToDict(output['config'].get('Label', []))
for k, v in options.labels.iteritems():
label_dict[k] = v
output['config']['Label'] = _DictToKeyValue(label_dict)
if options.ports:
if 'ExposedPorts' not in output['config']:
output['config']['ExposedPorts'] = {}
for p in options.ports:
if '/' in p:
# The port spec has the form 80/tcp, 1234/udp
# so we simply use it as the key.
output['config']['ExposedPorts'][p] = {}
else:
# Assume tcp for naked ports.
output['config']['ExposedPorts'][p + '/tcp'] = {}
if options.volumes:
if 'Volumes' not in output['config']:
output['config']['Volumes'] = {}
for p in options.volumes:
output['config']['Volumes'][p] = {}
if options.workdir:
output['config']['WorkingDir'] = options.workdir
# TODO(user): comment, created, container_config
# container_config contains information about the container
# that was used to create this layer, so it shouldn't
# propagate from the parent to child. This is where we would
# annotate information that can be extract by tools like Blubber
# or Quay.io's UI to gain insight into the source that generated
# the layer. A Dockerfile might produce something like:
# # (nop) /bin/sh -c "apt-get update"
# We might consider encoding the fully-qualified bazel build target:
# //tools/build_defs/docker:image
# However, we should be sensitive to leaking data through this field.
if 'container_config' in output:
del output['container_config']
return output
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
source/get_text_phi/lambda_function.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###################################################################################
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the "MIT No Attribution" License (the "License"). You may not #
# use this file except in compliance with the License. A copy of the #
# License is located in the "license" file accompanying this file. #
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS #
# OF ANY KIND, express or implied. See the License for the specific language #
# governing permissions and limitations under the License. #
###################################################################################
import logging
import json
import os
import boto3
import traceback
import mask_lib
def parse_object_pairs(pairs):
return pairs
def validate_parameters(event):
try:
if 'body' in event:
decoder = json.JSONDecoder(object_pairs_hook=parse_object_pairs)
decoded = decoder.decode(event['body'])
params = json.loads(event['body'])
# Ensure not duplicate keys
if len(params) != len(decoded):
return False
if len(params) == 1 and 'text' in params:
return True
elif len(params) == 2 and 'text' in params and 'phiDetectionThreshold' in params and 0.0 <= float(params['phiDetectionThreshold']) <= 1.0:
return True
return False
except:
return False
def get_parameters(event):
# Validate parameters
try:
params = json.loads(event['body'])
return params
except Exception as e:
raise e
def lambda_handler(event, context):
try:
global log_level
log_level = str(os.environ.get('LOG_LEVEL')).upper()
if log_level not in [
'DEBUG', 'INFO',
'WARNING', 'ERROR',
'CRITICAL'
]:
log_level = 'ERROR'
logging.getLogger().setLevel(log_level)
# Validate input parameters
if not validate_parameters(event):
logging.error('Bad Request')
return {
'statusCode': 400,
'body': ''
}
params = get_parameters(event)
phi_detection_threshold = params['phiDetectionThreshold'] if 'phiDetectionThreshold' in params else float(os.environ['PHI_DETECTION_THRESHOLD'])
phi_list = mask_lib.extract_phi_from_text(params['text'], phi_detection_threshold=phi_detection_threshold)
result = {
'statusCode': '200',
'body': json.dumps(phi_list)
}
return result
except Exception as error:
logging.error('lambda_handler error: %s' % (error))
logging.error('lambda_handler trace: %s' % traceback.format_exc())
result = {
'statusCode': '500',
'body': json.dumps({'message': 'error'})
}
return result
|
[] |
[] |
[
"PHI_DETECTION_THRESHOLD",
"LOG_LEVEL"
] |
[]
|
["PHI_DETECTION_THRESHOLD", "LOG_LEVEL"]
|
python
| 2 | 0 | |
src/git/config.go
|
/*
This file contains functionality around storing configuration settings
inside Git's metadata storage for the repository.
*/
package git
import (
"errors"
"fmt"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"github.com/git-town/git-town/src/command"
"github.com/git-town/git-town/src/util"
)
// Configuration manages the Git Town configuration
// stored in Git metadata in the given local repo and the global Git configuration.
// This class manages which config values are stored in local vs global settings.
type Configuration struct {
// localConfigCache is a cache of the Git configuration in the local Git repo.
localConfigCache map[string]string
// globalConfigCache is a cache of the global Git configuration.
globalConfigCache map[string]string
// for running shell commands
shell command.Shell
}
// Config provides the current configuration.
// This is used in the Git Town business logic, which runs in the current directory.
// The configuration is lazy-loaded this way to allow using some Git Town commands outside of Git repositories.
func Config() *Configuration {
if currentDirConfig == nil {
shell := command.ShellInCurrentDir{}
currentDirConfig = NewConfiguration(&shell)
}
return currentDirConfig
}
// currentDirConfig contains the Git Town configuration in the current working directory.
var currentDirConfig *Configuration
// NewConfiguration provides a Configuration instance reflecting the configuration values in the given directory.
func NewConfiguration(shell command.Shell) *Configuration {
return &Configuration{
shell: shell,
localConfigCache: loadGitConfig(shell, false),
globalConfigCache: loadGitConfig(shell, true),
}
}
// loadGitConfig provides the Git configuration from the given directory or the global one if the global flag is set.
func loadGitConfig(shell command.Shell, global bool) map[string]string {
result := map[string]string{}
cmdArgs := []string{"config", "-lz"}
if global {
cmdArgs = append(cmdArgs, "--global")
} else {
cmdArgs = append(cmdArgs, "--local")
}
res, err := shell.Run("git", cmdArgs...)
if err != nil {
return result
}
output := res.Output()
if output == "" {
return result
}
for _, line := range strings.Split(output, "\x00") {
if len(line) == 0 {
continue
}
parts := strings.SplitN(line, "\n", 2)
key, value := parts[0], parts[1]
result[key] = value
}
return result
}
// AddToPerennialBranches registers the given branch names as perennial branches.
// The branches must exist.
func (c *Configuration) AddToPerennialBranches(branchNames ...string) *command.Result {
return c.SetPerennialBranches(append(c.GetPerennialBranches(), branchNames...))
}
// AddGitAlias sets the given Git alias.
func (c *Configuration) AddGitAlias(command string) *command.Result {
return c.setGlobalConfigValue("alias."+command, "town "+command)
}
// DeleteMainBranchConfiguration removes the configuration entry for the main branch name.
func (c *Configuration) DeleteMainBranchConfiguration() {
c.removeLocalConfigValue("git-town.main-branch-name")
}
// DeleteParentBranch removes the parent branch entry for the given branch
// from the Git configuration.
func (c *Configuration) DeleteParentBranch(branchName string) {
c.removeLocalConfigValue("git-town-branch." + branchName + ".parent")
}
// DeletePerennialBranchConfiguration removes the configuration entry for the perennial branches.
func (c *Configuration) DeletePerennialBranchConfiguration() {
c.removeLocalConfigValue("git-town.perennial-branch-names")
}
// EnsureIsFeatureBranch asserts that the given branch is a feature branch.
func (c *Configuration) EnsureIsFeatureBranch(branchName, errorSuffix string) {
util.Ensure(c.IsFeatureBranch(branchName), fmt.Sprintf("The branch %q is not a feature branch. %s", branchName, errorSuffix))
}
// GetAncestorBranches returns the names of all parent branches for the given branch,
// This information is read from the cache in the Git config,
// so might be out of date when the branch hierarchy has been modified.
func (c *Configuration) GetAncestorBranches(branchName string) (result []string) {
parentBranchMap := c.GetParentBranchMap()
current := branchName
for {
if c.IsMainBranch(current) || c.IsPerennialBranch(current) {
return
}
parent := parentBranchMap[current]
if parent == "" {
return
}
result = append([]string{parent}, result...)
current = parent
}
}
// GetChildBranches returns the names of all branches for which the given branch
// is a parent.
func (c *Configuration) GetChildBranches(branchName string) (result []string) {
for _, key := range c.localConfigKeysMatching(`^git-town-branch\..*\.parent$`) {
parent := c.getLocalConfigValue(key)
if parent == branchName {
child := strings.TrimSuffix(strings.TrimPrefix(key, "git-town-branch."), ".parent")
result = append(result, child)
}
}
return
}
// GetCodeHostingDriverName provides the name of the code hosting driver to use.
func (c *Configuration) GetCodeHostingDriverName() string {
return c.getLocalOrGlobalConfigValue("git-town.code-hosting-driver")
}
// GetCodeHostingOriginHostname provides the host name of the code hosting server.
func (c *Configuration) GetCodeHostingOriginHostname() string {
return c.getLocalConfigValue("git-town.code-hosting-origin-hostname")
}
// getGlobalConfigValue provides the configuration value with the given key from the local Git configuration.
func (c *Configuration) getGlobalConfigValue(key string) string {
return c.globalConfigCache[key]
}
// getLocalConfigValue provides the configuration value with the given key from the local Git configuration.
func (c *Configuration) getLocalConfigValue(key string) string {
return c.localConfigCache[key]
}
// getLocalOrGlobalConfigValue provides the configuration value with the given key from the local and global Git configuration.
// Local configuration takes precedence.
func (c *Configuration) getLocalOrGlobalConfigValue(key string) string {
local := c.getLocalConfigValue(key)
if local != "" {
return local
}
return c.getGlobalConfigValue(key)
}
// GetParentBranchMap returns a map from branch name to its parent branch
func (c *Configuration) GetParentBranchMap() map[string]string {
result := map[string]string{}
for _, key := range c.localConfigKeysMatching(`^git-town-branch\..*\.parent$`) {
child := strings.TrimSuffix(strings.TrimPrefix(key, "git-town-branch."), ".parent")
parent := c.getLocalConfigValue(key)
result[child] = parent
}
return result
}
// GetGitAlias provides the currently set alias for the given Git Town command.
func (c *Configuration) GetGitAlias(command string) string {
return c.getGlobalConfigValue("alias." + command)
}
// GetGitHubToken provides the content of the GitHub API token stored in the local or global Git Town configuration.
func (c *Configuration) GetGitHubToken() string {
return c.getLocalOrGlobalConfigValue("git-town.github-token")
}
// GetMainBranch returns the name of the main branch.
func (c *Configuration) GetMainBranch() string {
return c.getLocalOrGlobalConfigValue("git-town.main-branch-name")
}
// GetParentBranch returns the name of the parent branch of the given branch.
func (c *Configuration) GetParentBranch(branchName string) string {
return c.getLocalConfigValue("git-town-branch." + branchName + ".parent")
}
// GetPerennialBranches returns all branches that are marked as perennial.
func (c *Configuration) GetPerennialBranches() []string {
result := c.getLocalOrGlobalConfigValue("git-town.perennial-branch-names")
if result == "" {
return []string{}
}
return strings.Split(result, " ")
}
// GetPullBranchStrategy returns the currently configured pull branch strategy.
func (c *Configuration) GetPullBranchStrategy() string {
config := c.getLocalOrGlobalConfigValue("git-town.pull-branch-strategy")
if config != "" {
return config
}
return "rebase"
}
// GetRemoteOriginURL returns the URL for the "origin" remote.
// In tests this value can be stubbed.
func (c *Configuration) GetRemoteOriginURL() string {
remote := os.Getenv("GIT_TOWN_REMOTE")
if remote != "" {
return remote
}
return c.shell.MustRun("git", "remote", "get-url", "origin").OutputSanitized()
}
// GetURLHostname returns the hostname contained within the given Git URL.
func (c *Configuration) GetURLHostname(url string) string {
hostnameRegex := regexp.MustCompile("(^[^:]*://([^@]*@)?|git@)([^/:]+).*")
matches := hostnameRegex.FindStringSubmatch(url)
if matches == nil {
return ""
}
return matches[3]
}
// GetURLRepositoryName returns the repository name contains within the given Git URL.
func (c *Configuration) GetURLRepositoryName(url string) string {
hostname := c.GetURLHostname(url)
repositoryNameRegex := regexp.MustCompile(".*" + hostname + "[/:](.+)")
matches := repositoryNameRegex.FindStringSubmatch(url)
if matches == nil {
return ""
}
return strings.TrimSuffix(matches[1], ".git")
}
// HasBranchInformation indicates whether this configuration contains any branch hierarchy entries.
func (c *Configuration) HasBranchInformation() bool {
for key := range c.localConfigCache {
if strings.HasPrefix(key, "git-town-branch.") {
return true
}
}
return false
}
// HasParentBranch returns whether or not the given branch has a parent
func (c *Configuration) HasParentBranch(branchName string) bool {
return c.GetParentBranch(branchName) != ""
}
// IsAncestorBranch indicates whether the given branch is an ancestor of the other given branch.
func (c *Configuration) IsAncestorBranch(branchName, ancestorBranchName string) bool {
ancestorBranches := c.GetAncestorBranches(branchName)
return util.DoesStringArrayContain(ancestorBranches, ancestorBranchName)
}
// IsFeatureBranch indicates whether the branch with the given name is
// a feature branch.
func (c *Configuration) IsFeatureBranch(branchName string) bool {
return !c.IsMainBranch(branchName) && !c.IsPerennialBranch(branchName)
}
// IsMainBranch indicates whether the branch with the given name
// is the main branch of the repository.
func (c *Configuration) IsMainBranch(branchName string) bool {
return branchName == c.GetMainBranch()
}
// IsOffline indicates whether Git Town is currently in offline mode
func (c *Configuration) IsOffline() bool {
config := c.getGlobalConfigValue("git-town.offline")
if config != "" {
return util.StringToBool(config)
}
return false
}
// IsPerennialBranch indicates whether the branch with the given name is
// a perennial branch.
func (c *Configuration) IsPerennialBranch(branchName string) bool {
perennialBranches := c.GetPerennialBranches()
return util.DoesStringArrayContain(perennialBranches, branchName)
}
// localConfigKeysMatching provides the names of the Git Town configuration keys matching the given RegExp string.
func (c *Configuration) localConfigKeysMatching(toMatch string) (result []string) {
re := regexp.MustCompile(toMatch)
for key := range c.localConfigCache {
if re.MatchString(key) {
result = append(result, key)
}
}
return result
}
// RemoveFromPerennialBranches removes the given branch as a perennial branch
func (c *Configuration) RemoveFromPerennialBranches(branchName string) {
c.SetPerennialBranches(util.RemoveStringFromSlice(c.GetPerennialBranches(), branchName))
}
// RemoveGitAlias removes the given Git alias.
func (c *Configuration) RemoveGitAlias(command string) *command.Result {
return c.removeGlobalConfigValue("alias." + command)
}
func (c *Configuration) removeGlobalConfigValue(key string) *command.Result {
delete(c.globalConfigCache, key)
return c.shell.MustRun("git", "config", "--global", "--unset", key)
}
// removeLocalConfigurationValue deletes the configuration value with the given key from the local Git Town configuration.
func (c *Configuration) removeLocalConfigValue(key string) {
delete(c.localConfigCache, key)
c.shell.MustRun("git", "config", "--unset", key)
}
// RemoveLocalGitConfiguration removes all Git Town configuration
func (c *Configuration) RemoveLocalGitConfiguration() {
_, err := c.shell.Run("git", "config", "--remove-section", "git-town")
if err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) && exitErr.ExitCode() == 128 {
// Git returns exit code 128 when trying to delete a non-existing config section.
// This is not an error condition in this workflow so we can ignore it here.
return
}
fmt.Printf("Unexpected error while removing the 'git-town' section from the Git configuration: %v\n", err)
os.Exit(1)
}
}
// RemoveOutdatedConfiguration removes outdated Git Town configuration
func (c *Configuration) RemoveOutdatedConfiguration() {
for child, parent := range c.GetParentBranchMap() {
if !HasBranch(child) || !HasBranch(parent) {
c.DeleteParentBranch(child)
}
}
}
// SetCodeHostingDriver sets the "github.code-hosting-driver" setting.
func (c *Configuration) SetCodeHostingDriver(value string) *command.Result {
const key = "git-town.code-hosting-driver"
c.localConfigCache[key] = value
return c.shell.MustRun("git", "config", key, value)
}
// SetCodeHostingOriginHostname sets the "github.code-hosting-driver" setting.
func (c *Configuration) SetCodeHostingOriginHostname(value string) *command.Result {
const key = "git-town.code-hosting-origin-hostname"
c.localConfigCache[key] = value
return c.shell.MustRun("git", "config", key, value)
}
func (c *Configuration) SetColorUI(value string) *command.Result {
return c.shell.MustRun("git", "config", "color.ui", value)
}
func (c *Configuration) setGlobalConfigValue(key, value string) *command.Result {
c.globalConfigCache[key] = value
return c.shell.MustRun("git", "config", "--global", key, value)
}
// setConfigurationValue sets the local configuration with the given key to the given value.
func (c *Configuration) setLocalConfigValue(key, value string) *command.Result {
c.localConfigCache[key] = value
return c.shell.MustRun("git", "config", key, value)
}
// SetMainBranch marks the given branch as the main branch
// in the Git Town configuration.
func (c *Configuration) SetMainBranch(branchName string) *command.Result {
return c.setLocalConfigValue("git-town.main-branch-name", branchName)
}
// SetNewBranchPush updates whether the current repository is configured to push
// freshly created branches up to the origin remote.
func (c *Configuration) SetNewBranchPush(value bool, global bool) *command.Result {
if global {
return c.setGlobalConfigValue("git-town.new-branch-push-flag", strconv.FormatBool(value))
}
return c.setLocalConfigValue("git-town.new-branch-push-flag", strconv.FormatBool(value))
}
// SetOffline updates whether Git Town is in offline mode
func (c *Configuration) SetOffline(value bool) *command.Result {
return c.setGlobalConfigValue("git-town.offline", strconv.FormatBool(value))
}
// SetTestOrigin sets the origin to be used for testing.
func (c *Configuration) SetTestOrigin(value string) {
_ = c.setLocalConfigValue("git-town.testing.remote-url", value)
}
// SetParentBranch marks the given branch as the direct parent of the other given branch
// in the Git Town configuration.
func (c *Configuration) SetParentBranch(branchName, parentBranchName string) *command.Result {
return c.setLocalConfigValue("git-town-branch."+branchName+".parent", parentBranchName)
}
// SetPerennialBranches marks the given branches as perennial branches
func (c *Configuration) SetPerennialBranches(branchNames []string) *command.Result {
return c.setLocalConfigValue("git-town.perennial-branch-names", strings.Join(branchNames, " "))
}
// SetPullBranchStrategy updates the configured pull branch strategy.
func (c *Configuration) SetPullBranchStrategy(strategy string) *command.Result {
return c.setLocalConfigValue("git-town.pull-branch-strategy", strategy)
}
// SetShouldShipDeleteRemoteBranch updates the configured pull branch strategy.
func (c *Configuration) SetShouldShipDeleteRemoteBranch(value bool) *command.Result {
return c.setLocalConfigValue("git-town.ship-delete-remote-branch", strconv.FormatBool(value))
}
// SetShouldSyncUpstream updates the configured pull branch strategy.
func (c *Configuration) SetShouldSyncUpstream(value bool) *command.Result {
return c.setLocalConfigValue("git-town.sync-upstream", strconv.FormatBool(value))
}
// ShouldNewBranchPush indicates whether the current repository is configured to push
// freshly created branches up to the origin remote.
func (c *Configuration) ShouldNewBranchPush() bool {
config := c.getLocalOrGlobalConfigValue("git-town.new-branch-push-flag")
if config == "" {
return false
}
return util.StringToBool(config)
}
// ShouldNewBranchPushGlobal indictes whether the global configuration requires to push
// freshly created branches up to the origin remote.
func (c *Configuration) ShouldNewBranchPushGlobal() bool {
config := c.getGlobalConfigValue("git-town.new-branch-push-flag")
return config == "true"
}
// ShouldShipDeleteRemoteBranch indicates whether to delete the remote branch after shipping.
func (c *Configuration) ShouldShipDeleteRemoteBranch() bool {
setting := c.getLocalOrGlobalConfigValue("git-town.ship-delete-remote-branch")
if setting == "" {
return true
}
return util.StringToBool(setting)
}
// ShouldSyncUpstream indicates whether this repo should sync with its upstream.
func (c *Configuration) ShouldSyncUpstream() bool {
return c.getLocalOrGlobalConfigValue("git-town.sync-upstream") != "false"
}
// ValidateIsOnline asserts that Git Town is not in offline mode
func (c *Configuration) ValidateIsOnline() error {
if c.IsOffline() {
return errors.New("this command requires an active internet connection")
}
return nil
}
|
[
"\"GIT_TOWN_REMOTE\""
] |
[] |
[
"GIT_TOWN_REMOTE"
] |
[]
|
["GIT_TOWN_REMOTE"]
|
go
| 1 | 0 | |
tensorflow/python/distribute/parameter_server_strategy_v2.py
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Parameter server strategy V2 class.
This is currently under development and the API is subject to change.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.distribute import sharded_variable
from tensorflow.python.eager import remote
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
@tf_export("distribute.experimental.ParameterServerStrategy", v1=[])
class ParameterServerStrategyV2(distribute_lib.Strategy):
"""An multi-worker tf.distribute strategy with parameter servers.
Parameter server training refers to the distributed training architecture that
requires two types of tasks in the cluster: workers (referred to as "worker"
task) and parameter servers (referred to as "ps" task). The variables and
updates to those variables are placed on ps, and most computation intensive
operations are placed on workers.
In TF2, parameter server training makes use of one coordinator, with some
number of workers, and (usually fewer) ps. The coordinator uses a
`tf.distribute.experimental.coordinator.ClusterCoordinator` to coordinate the
cluster, and a `tf.distribute.experimental.ParameterServerStrategy` for
variable distribution. The coordinator does not perform the actual training.
Each of the workers and ps runs a `tf.distribute.Server`, which the
coordinator connects to through the use of aforementioned two APIs.
For the training to work, the coordinator sends requests to workers for the
`tf.function`s to be executed on remote workers. Upon receiving requests from
the coordinator, a worker executes the `tf.function` by reading the variables
from parameter servers, executing the ops, and updating the variables on the
parameter servers. Each of the worker only processes the requests from the
coordinator, and communicates with parameter servers, without direct
interactions with any of the other workers in the cluster.
As a result, failures of some workers do not prevent the cluster from
continuing the work, and this allows the cluster to train with instances that
can be occasionally unavailable (e.g. preemptible or spot instances). The
coordinator and parameter servers though, must be available at all times for
the cluster to make progress.
Note that the coordinator is not one of the training worker. Instead, its
responsibility includes placing variables on ps, remotely executing
`tf.function`s on workers, and saving checkpoints. Parameter server training
thus consists of a server cluster with worker and ps, and a coordinator which
connects to them to coordinate. Optionally, an evaluator can be run on the
side that periodically reads the checkpoints saved by the coordinator, and
saves summaries for example.
`tf.distribute.experimental.ParameterServerStrategy` works closely with the
associated `tf.distribute.experimental.coordinator.ClusterCoordinator` object,
and should be used in conjunction with it. Standalone usage of
`tf.distribute.experimental.ParameterServerStrategy` without a
`tf.distribute.experimental.coordinator.ClusterCoordinator` indicates
a parameter server training scheme without a centralized coordinator, which is
not supported at this time.
__Example code for coordinator__
Here's an example usage of the API, with a custom training loop to train a
model. This code snippet is intended to be run on (the only) one machine that
is designated as the coordinator. Note that `cluster_resolver`,
`variable_partitioner`, and `dataset_fn` arguments are explained in the
following "Cluster setup", "Variable partitioning", and "Dataset preparation"
sections.
Currently, environment variable `GRPC_FAIL_FAST` needs to be set in all tasks
to work around a known hanging issue as the following code illustrates:
```python
# Set the environment variable to allow reporting worker and ps failure to the
# coordinator.
os.environ["GRPC_FAIL_FAST"] = "use_caller"
# Prepare a strategy to use with the cluster and variable partitioning info.
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver=...,
variable_partitioner=...)
coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(
strategy=strategy)
# Prepare a distribute dataset that will place datasets on the workers.
distributed_dataset = coordinator.create_per_worker_dataset(dataset_fn=...)
with strategy.scope():
model = ... # Variables created can possibly be container of variables
optimizer, metrics = ... # Keras optimizer/metrics are great choices
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, checkpoint_dir, max_to_keep=2)
# `load_checkpoint` infers initial epoch from `optimizer.iterations`.
initial_epoch = load_checkpoint(checkpoint_manager) or 0
@tf.function
def worker_fn(iterator):
def replica_fn(inputs):
batch_data, labels = inputs
# calculate gradient, applying gradient, metrics update etc.
strategy.run(replica_fn, args=(next(iterator),))
for epoch in range(initial_epoch, num_epoch):
distributed_iterator = iter(distributed_dataset) # Reset iterator state.
for step in range(steps_per_epoch):
# Asynchronously schedule the `worker_fn` to be executed on an arbitrary
# worker. This call returns immediately.
coordinator.schedule(worker_fn, args=(distributed_iterator,))
# `join` blocks until all scheduled `worker_fn`s finish execution. Once it
# returns, we can read the metrics and save checkpoints as needed.
coordinator.join()
logging.info('Metric result: %r', metrics.result())
train_accuracy.reset_states()
checkpoint_manager.save()
```
__Example code for worker and parameter servers__
In addition to the coordinator, there should be multiple machines designated
as "worker" or "ps". They should run the following code to start a TensorFlow
server, waiting for coordinator's request to execute functions or place
variables:
```python
# Set the environment variable to allow reporting worker and ps failure to the
# coordinator.
os.environ["GRPC_FAIL_FAST"] = "use_caller"
# Provide a `tf.distribute.cluster_resolver.ClusterResolver` that serves
# the cluster information. See below "Cluster setup" section.
cluster_resolver = ...
server = tf.distribute.Server(
cluster_resolver.cluster_spec().as_cluster_def(),
job_name=cluster_resolver.task_type,
task_index=cluster_resolver.task_id,
protocol=protocol)
# Blocking the process that starts a server from exiting.
server.join()
```
__Cluster setup__
In order for the tasks in the cluster to know other tasks' addresses,
a `tf.distribute.cluster_resolver.ClusterResolver` is required to be used
in coordinator, worker, and ps. The
`tf.distribute.cluster_resolver.ClusterResolver` is responsible for providing
the cluster information, as well as the task type and id of the current task.
See `tf.distribute.cluster_resolver.ClusterResolver` for more information.
If `TF_CONFIG` environment variable is used for the processes to know the
cluster information, a
`tf.distribute.cluster_resolver.TFConfigClusterResolver` should be used. Note
that for legacy reason, "chief" should be used as the task type for the
coordinator, as the following example demonstrates. Here we set `TF_CONFIG`
in environment variable, intended to be run by the process of the machine
designated as the parameter server (task type "ps") and index 1 (the second),
in a cluster with 1 chief, 2 parameter servers, and 3 workers. Note that the
it needs to be set before the use of
`tf.distribute.cluster_resolver.TFConfigClusterResolver`.
Example code for cluster setup:
```python
os.environ['TF_CONFIG'] = '''
{
"cluster": {
"chief": ["chief.example.com:2222"],
"ps": ["ps0.example.com:2222", "ps1.example.com:2222"],
"worker": ["worker0.example.com:2222", "worker1.example.com:2222",
"worker2.example.com:2222"]
},
"task": {
"type": "ps",
"index": 1
}
}
'''
os.environ["GRPC_FAIL_FAST"] = "use_caller"
cluster_resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()
# If coordinator ("chief" task type), create a strategy
if cluster_resolver.task_type == 'chief':
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver)
...
# If worker/ps, create a server
elif cluster_resolver.task_type in ("worker", "ps"):
server = tf.distribute.Server(...)
...
```
__Variable creation with `strategy.scope()`__
`tf.distribute.experimental.ParameterServerStrategy` follows the
`tf.distribute` API contract where variable creation is expected to be inside
the context manager returned by `strategy.scope()`, in order to be correctly
placed on parameter servers in a round-robin manner:
```python
# In this example, we're assuming having 3 ps.
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver=...)
coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(
strategy=strategy)
# Variables should be created inside scope to be placed on parameter servers.
# If created outside scope such as `v1` here, it would be placed on
coordinator.
v1 = tf.Variable(initial_value=0.0)
with strategy.scope():
v2 = tf.Variable(initial_value=1.0)
v3 = tf.Variable(initial_value=2.0)
v4 = tf.Variable(initial_value=3.0)
v5 = tf.Variable(initial_value=4.0)
# v2 through v5 are created in scope and are distributed on parameter servers.
# Default placement is round-robin but the order should not be relied on.
assert v2.device == "/job:ps/replica:0/task:0/device:CPU:0"
assert v3.device == "/job:ps/replica:0/task:1/device:CPU:0"
assert v4.device == "/job:ps/replica:0/task:2/device:CPU:0"
assert v5.device == "/job:ps/replica:0/task:0/device:CPU:0"
```
See `distribute.Strategy.scope` for more information.
__Variable partitioning__
Having dedicated servers to store variables means being able to divide up, or
"shard" the variables across the ps. Large embeddings that would otherwise
exceed memory limit of a single machine can be used in a cluster with enough
number of ps.
With `tf.distribute.experimental.ParameterServerStrategy`, if a
`variable_partitioner` is provided to `__init__` and certain conditions are
satisfied, the resulting variables created in scope are sharded across the
parameter servers, in a round-robin fashion. The variable reference returned
from `tf.Variable` becomes a type that serves as the container of the sharded
variables. Access `variables` attribute of this container for the actual
variable components. See arguments section of
`tf.distribute.experimental.ParameterServerStrategy.__init__` for more
information.
To initialize the sharded variables in a more memory-efficient way, use an
initializer whose `__call__` accepts a `shard_info` argument, and use
`shard_info.offset` and `shard_info.shape` to create and return a
partition-aware `tf.Tensor` to initialize the variable components.
```python
class PartitionAwareIdentity(object):
def __call__(self, shape, dtype, shard_info):
value = tf.eye(*shape, dtype=dtype)
if shard_info is not None:
value = tf.slice(value, shard_info.offset, shard_info.shape)
return value
cluster_resolver = ...
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver, tf.fixed_size_partitioner(2))
with strategy.scope():
initializer = PartitionAwareIdentity()
initial_value = functools.partial(initializer, shape=(4, 4), dtype=tf.int64)
v = tf.Variable(
initial_value=initial_value, shape=(4, 4), dtype=tf.int64)
# `v.variables` gives the actual variable components.
assert len(v.variables) == 2
assert v.variables[0].device == "/job:ps/replica:0/task:0/device:CPU:0"
assert v.variables[1].device == "/job:ps/replica:0/task:1/device:CPU:0"
assert np.array_equal(v.variables[0].numpy(), [[1, 0, 0, 0], [0, 1, 0, 0]])
assert np.array_equal(v.variables[1].numpy(), [[0, 0, 1, 0], [0, 0, 0, 1]])
```
__Dataset preparation__
With `tf.distribute.experimental.ParameterServerStrategy`, a dataset is
created in each of the workers to be used for training. This is done by
creating a `dataset_fn` that takes no argument and returns a
`tf.data.Dataset`, and passing the `dataset_fn` into
`tf.distribute.experimental.coordinator.
ClusterCoordinator.create_per_worker_dataset`. We recommend the dataset to be
shuffled and repeated to have the examples run through the training as evenly
as possible.
```python
def dataset_fn():
filenames = ...
dataset = tf.data.Dataset.from_tensor_slices(filenames)
# Dataset is recommended to be shuffled, and repeated.
return dataset.shuffle(buffer_size=...).repeat().batch(batch_size=...)
coordinator =
tf.distribute.experimental.coordinator.ClusterCoordinator(strategy=...)
distributed_dataset = coordinator.create_per_worker_dataset(dataset_fn)
```
__Limitations__
* `tf.distribute.experimental.ParameterServerStrategy` in TF2 is experimental,
and the API is subject to further changes.
* `tf.distribute.experimental.ParameterServerStrategy` does not yet support
training with GPU(s). This is a feature request being developed.
* `tf.distribute.experimental.ParameterServerStrategy` only supports
[custom training loop
API](https://www.tensorflow.org/tutorials/distribute/custom_training)
currently in TF2. Usage of it with Keras `compile`/`fit` API is being
developed.
* `tf.distribute.experimental.ParameterServerStrategy` must be used with
`tf.distribute.experimental.coordinator.ClusterCoordinator`.
* This strategy is not intended for TPU. Use
`tf.distribute.experimental.TPUStrategy` instead.
"""
# pyformat: disable
def __init__(self, cluster_resolver, variable_partitioner=None):
"""Initializes the TF2 parameter server strategy.
This initializes the `tf.distribute.experimental.ParameterServerStrategy`
object to be ready for use with
`tf.distribute.experimental.coordinator.ClusterCoordinator`.
Args:
cluster_resolver: a `tf.distribute.cluster_resolver.ClusterResolver`
object.
variable_partitioner:
a callable with the signature `num_partitions = fn(shape, dtype)`, where
`num_partitions` is a list/tuple representing the number of partitions
on each axis, and `shape` and `dtype` are of types `tf.TensorShape` and
`tf.dtypes.Dtype`. If `None`, variables will not be partitioned.
* `variable_partitioner` will be called for all variables created under
strategy `scope` to instruct how the variables should be partitioned.
Variables will be created in multiple partitions if there are more than
one partition along the partitioning axis, otherwise it falls back to
normal `tf.Variable`.
* Only the first / outermost axis partitioning is supported, namely,
elements in `num_partitions` must be 1 other than the first element.
* Partitioner like `tf.compat.v1.min_max_variable_partitioner`,
`tf.compat.v1.variable_axis_size_partitioner` and
`tf.compat.v1.fixed_size_partitioner` are also supported since they
conform to the required signature.
* Div partition
strategy is used to partition variables. Assuming we assign consecutive
integer ids along the first axis of a variable, then ids are assigned to
shards in a contiguous manner, while attempting to keep each shard size
identical. If the ids do not evenly divide the number of shards, each of
the first several shards will be assigned one more id. For instance, a
variable whose first dimension is 13 has 13 ids, and they are split
across 5 shards as:
`[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`.
* Variables created under `strategy.extended.colocate_vars_with` will
not be partitioned, e.g, optimizer's slot variables.
"""
# pyformat: enable
self._cluster_resolver = cluster_resolver
self._extended = ParameterServerStrategyV2Extended(self, cluster_resolver,
variable_partitioner)
self._verify_args_and_config(cluster_resolver)
logging.info(
"`tf.distribute.experimental.ParameterServerStrategy` is initialized "
"with cluster_spec: %s", cluster_resolver.cluster_spec())
# TODO(b/167894802): Make coordinator, worker, and ps names customizable.
self._connect_to_cluster(coordinator_name="chief")
super(ParameterServerStrategyV2, self).__init__(self._extended)
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"ParameterServerStrategy")
def _connect_to_cluster(self, coordinator_name):
if coordinator_name in ["worker", "ps"]:
raise ValueError("coordinator name should not be 'worker' or 'ps'.")
cluster_spec = self._cluster_resolver.cluster_spec()
self._num_workers = len(cluster_spec.as_dict().get("worker", ()))
self._num_ps = len(cluster_spec.as_dict().get("ps", ()))
device_filters = server_lib.ClusterDeviceFilters()
# For any worker, only the devices on ps and coordinator nodes are visible
for i in range(self._num_workers):
device_filters.set_device_filters(
"worker", i, ["/job:ps", "/job:%s" % coordinator_name])
# Similarly for any ps, only the devices on workers and coordinator are
# visible
for i in range(self._num_ps):
device_filters.set_device_filters(
"ps", i, ["/job:worker", "/job:%s" % coordinator_name])
# Allow at most one outstanding RPC for each worker at a certain time. This
# is to simplify worker failure handling in the runtime
os.environ["TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE"] = "False"
logging.info("%s is now connecting to cluster with cluster_spec: %r",
self.__class__.__name__, cluster_spec)
remote.connect_to_cluster(
cluster_spec,
job_name=coordinator_name,
protocol=self._cluster_resolver.rpc_layer,
cluster_device_filters=device_filters)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"ps_strategy_num_workers").set(self._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"ps_strategy_num_ps").set(self._num_ps)
def _verify_args_and_config(self, cluster_resolver):
if not cluster_resolver.cluster_spec():
raise ValueError("Cluster spec must be non-empty in `cluster_resolver`.")
if self.extended._num_gpus_per_worker > 1: # pylint: disable=protected-access
raise NotImplementedError("Multi-gpu is not supported yet.")
class ParameterServerStrategyV2Extended(
parameter_server_strategy.ParameterServerStrategyExtended):
"""Extended class for ParameterServerStrategyV2.
Please see `tf.distribute.StrategyExtended` doc for more information.
"""
def __init__(self, container_strategy, cluster_resolver,
variable_partitioner):
"""Initialization of ParameterServerStrategyV2Extended."""
super(ParameterServerStrategyV2Extended, self).__init__(container_strategy)
self._num_ps = len(cluster_resolver.cluster_spec().as_dict().get("ps", []))
self._variable_count = 0
self._variable_partitioner = variable_partitioner
def _create_variable(self, next_creator, **kwargs):
"""Implements StrategyExtendedV2._create_variable.
Creates a `Variable` or a `ShardedVariable`. A `ShardedVariable` will be
created if satisfying all the following criteria:
1. `self._variable_partitioner` results in more than one partition on the
first axis.
2. variable's rank is greater than 0.
3. variable is not colocated with another variable.
Otherwise a `Variable` will be created.
Args:
next_creator: See `variable_scope.variable_creator_scope`; the next
creator in the chain.
**kwargs: Passed through to the next creator.
Returns:
A `Variable` or `ShardedVariable`.
"""
if "colocate_with" in kwargs: # Never partition colocated_with variables.
colocate_with = kwargs["colocate_with"]
# Clear the variable scope to avoid possible conflicts between device
# scope and colocation scope.
with ops.device(None):
with ops.colocate_with(colocate_with):
var = next_creator(**kwargs)
logging.debug(
"Creating variable (name:%s, shape:%r) that colocates with %s",
var.name, var.shape, kwargs["colocate_with"].name)
return var
if self._variable_partitioner is None:
return self._create_variable_round_robin(next_creator, **kwargs)
name = kwargs.get("name", None)
initial_value = kwargs.get("initial_value", None)
if initial_value is None:
raise ValueError("initial_value must be specified.")
# Two cases where initial_value can be a callable:
# 1. initial_value is passed as a callable, e.g, an `initializer` class.
# 2. restoring from checkpoint, initial_value is a
# "CheckpointInitialValueCallable".
init_from_fn = callable(initial_value)
dtype = kwargs.get("dtype", None)
shape = kwargs.get("shape", None)
if init_from_fn and (shape is None or dtype is None):
init_from_fn = False
initial_value = initial_value()
if not init_from_fn:
# The initial_value is created on coordinator, it will need to be sent to
# ps for variable initialization, which can be inefficient and can
# potentially hit the 2GB limit on protobuf serialization.
initial_value = ops.convert_to_tensor(initial_value, dtype=dtype)
dtype = initial_value.dtype
shape = initial_value.shape
else:
shape = tensor_shape.as_shape(shape)
if shape.rank == 0: # Skip partitioning rank-0 variable.
return self._create_variable_round_robin(next_creator, **kwargs)
num_partitions = self._variable_partitioner(shape=shape, dtype=dtype)
if not num_partitions or num_partitions[0] == 0 or any(
v != 1 for v in num_partitions[1:]):
raise ValueError(
"variable_partitioner must return a list/tuple whose elements are 1"
" besides the first element (non-zero), got: %r" % num_partitions)
if num_partitions[0] == 1: # no partition
return self._create_variable_round_robin(next_creator, **kwargs)
# Use "div" partition strategy to partition the variable.
num_partitions = min(num_partitions[0], shape[0])
base = shape[0] // num_partitions
extra = shape[0] % num_partitions
# An example: num_partitions=4, shape[0]=10, partitions: [3, 3, 2, 2]
# offsets: [0, 3, 6, 8, 10]
offsets = []
for i in range(num_partitions):
if i == 0:
offsets.append(0)
else:
prev_shard_size = base + (1 if i - 1 < extra else 0)
offsets.append(offsets[i - 1] + prev_shard_size)
offsets.append(shape[0])
def init_shard_fn(shard_index):
if not init_from_fn:
logging.log_if(
logging.WARN, _INEFFICIENT_INIT_WARNING % name, shard_index == 0 and
shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)
return initial_value[offsets[shard_index]:offsets[shard_index + 1]]
arg_spec = tf_inspect.getfullargspec(initial_value)
if ("shard_info" not in arg_spec.args and
"shard_info" not in arg_spec.kwonlyargs):
# `initial_value` is a callable that doesn't accept `shard_info`.
logging.log_if(
logging.WARN, _INEFFICIENT_INIT_WARNING % name, shard_index == 0 and
shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)
full_value = initial_value()
return full_value[offsets[shard_index]:offsets[shard_index + 1]]
else:
# Memory-efficient way of initializing sharded variable. It requires
# the `init_fn` to accept a namedtuple `shard_info`.
component_shape = (offsets[shard_index + 1] -
offsets[shard_index],) + shape[1:]
offsets_all_axes = (offsets[shard_index],) + (0,) * len(shape[1:])
return initial_value(
shard_info=trackable.ShardInfo(
shape=tensor_shape.as_shape(component_shape),
offset=offsets_all_axes))
var_list = []
for i in range(num_partitions):
kwargs["shape"] = (offsets[i + 1] - offsets[i],) + shape[1:]
kwargs["initial_value"] = lambda: init_shard_fn(i)
if name is not None:
kwargs["name"] = "{}/part_{}".format(name, i)
var_list.append(self._create_variable_round_robin(next_creator, **kwargs))
result = sharded_variable.ShardedVariable(var_list)
return result
def _create_variable_round_robin(self, next_creator, **kwargs):
# Clear the colocation scope to avoid possible conflicts between device
# scope and colocation scope.
with ops.colocate_with(None, ignore_existing=True):
with ops.device("/job:ps/task:%d" %
(self._variable_count % self._num_ps)):
var = next_creator(**kwargs)
logging.debug(
"Creating variable (name:%s, shape:%r) on /job:ps/task:%d",
var.name, var.shape, (self._variable_count % self._num_ps))
self._variable_count += 1
return var
def _call_for_each_replica(self, fn, args, kwargs):
with distribute_lib.ReplicaContext(
self._container_strategy(),
replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)):
# TODO(rchao): Support multi-replica per worker or sync-group.
return distribute_utils.regroup((fn(*args, **kwargs),))
# The warning that will be logged if the way we initialize sharded variables
# is memory-inefficient.
_INEFFICIENT_INIT_WARNING = (
"Large variable %s is partitioned but not initialized in a memory-efficient"
" way. The full value is first being created and then sliced into smaller "
"values. To reduce the memory footprint, explicitly specify `dtype` and "
"`shape` when creating variables, and pass a callable to Variable's "
"`initial_value`. The callable should take only one argument which is a "
"namedtuple (shape: `tf.TensorShape`, offsets: list/tuple) where shape is "
"the shape of the component variable, and offsets is the offsets of the "
"smaller variable on each axis.")
_LARGE_VARIABLE_NUM_ELEMENTS = 1e9
|
[] |
[] |
[
"TF_CONFIG",
"TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE",
"GRPC_FAIL_FAST"
] |
[]
|
["TF_CONFIG", "TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE", "GRPC_FAIL_FAST"]
|
python
| 3 | 0 | |
python/az/aro/azext_aro/custom.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the Apache License 2.0.
import random
import os
import azext_aro.vendored_sdks.azure.mgmt.redhatopenshift.v2020_04_30.models as v2020_04_30
from azext_aro._aad import AADManager
from azext_aro._rbac import assign_contributor_to_vnet, assign_contributor_to_routetable
from azext_aro._validators import validate_subnets
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.profiles import ResourceType
from azure.cli.core.util import sdk_no_wait
from msrest.exceptions import HttpOperationError
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import resource_id, parse_resource_id
from knack.log import get_logger
from knack.util import CLIError
logger = get_logger(__name__)
FP_CLIENT_ID = 'f1dd0a37-89c6-4e07-bcd1-ffd3d43d8875'
def aro_create(cmd, # pylint: disable=too-many-locals
client,
resource_group_name,
resource_name,
master_subnet,
worker_subnet,
vnet=None,
vnet_resource_group_name=None, # pylint: disable=unused-argument
location=None,
pull_secret=None,
domain=None,
cluster_resource_group=None,
client_id=None,
client_secret=None,
pod_cidr=None,
service_cidr=None,
master_vm_size=None,
worker_vm_size=None,
worker_vm_disk_size_gb=None,
worker_count=None,
apiserver_visibility=None,
ingress_visibility=None,
tags=None,
no_wait=False):
if not rp_mode_development():
resource_client = get_mgmt_service_client(
cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
provider = resource_client.providers.get('Microsoft.RedHatOpenShift')
if provider.registration_state != 'Registered':
raise CLIError('Microsoft.RedHatOpenShift provider is not registered. Run `az provider ' +
'register -n Microsoft.RedHatOpenShift --wait`.')
vnet = validate_subnets(master_subnet, worker_subnet)
subscription_id = get_subscription_id(cmd.cli_ctx)
random_id = generate_random_id()
aad = AADManager(cmd.cli_ctx)
if client_id is None:
app, client_secret = aad.create_application(cluster_resource_group or 'aro-' + random_id)
client_id = app.app_id
client_sp = aad.get_service_principal(client_id)
if not client_sp:
client_sp = aad.create_service_principal(client_id)
rp_client_id = os.environ.get('AZURE_FP_CLIENT_ID', FP_CLIENT_ID)
rp_client_sp = aad.get_service_principal(rp_client_id)
for sp_id in [client_sp.object_id, rp_client_sp.object_id]:
assign_contributor_to_vnet(cmd.cli_ctx, vnet, sp_id)
assign_contributor_to_routetable(cmd.cli_ctx, [master_subnet, worker_subnet], sp_id)
if rp_mode_development():
worker_vm_size = worker_vm_size or 'Standard_D2s_v3'
else:
worker_vm_size = worker_vm_size or 'Standard_D4s_v3'
if apiserver_visibility is not None:
apiserver_visibility = apiserver_visibility.capitalize()
if ingress_visibility is not None:
ingress_visibility = ingress_visibility.capitalize()
oc = v2020_04_30.OpenShiftCluster(
location=location,
tags=tags,
cluster_profile=v2020_04_30.ClusterProfile(
pull_secret=pull_secret or "",
domain=domain or random_id,
resource_group_id='/subscriptions/%s/resourceGroups/%s' %
(subscription_id, cluster_resource_group or "aro-" + random_id),
),
service_principal_profile=v2020_04_30.ServicePrincipalProfile(
client_id=client_id,
client_secret=client_secret,
),
network_profile=v2020_04_30.NetworkProfile(
pod_cidr=pod_cidr or '10.128.0.0/14',
service_cidr=service_cidr or '172.30.0.0/16',
),
master_profile=v2020_04_30.MasterProfile(
vm_size=master_vm_size or 'Standard_D8s_v3',
subnet_id=master_subnet,
),
worker_profiles=[
v2020_04_30.WorkerProfile(
name='worker', # TODO: 'worker' should not be hard-coded
vm_size=worker_vm_size,
disk_size_gb=worker_vm_disk_size_gb or 128,
subnet_id=worker_subnet,
count=worker_count or 3,
)
],
apiserver_profile=v2020_04_30.APIServerProfile(
visibility=apiserver_visibility or 'Public',
),
ingress_profiles=[
v2020_04_30.IngressProfile(
name='default', # TODO: 'default' should not be hard-coded
visibility=ingress_visibility or 'Public',
)
],
)
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=oc)
def aro_delete(cmd, client, resource_group_name, resource_name, no_wait=False):
# TODO: clean up rbac
try:
oc = client.get(resource_group_name, resource_name)
master_subnet = oc.master_profile.subnet_id
worker_subnets = {w.subnet_id for w in oc.worker_profiles}
master_parts = parse_resource_id(master_subnet)
vnet = resource_id(
subscription=master_parts['subscription'],
resource_group=master_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=master_parts['name'],
)
aad = AADManager(cmd.cli_ctx)
rp_client_id = os.environ.get('AZURE_FP_CLIENT_ID', FP_CLIENT_ID)
rp_client_sp = aad.get_service_principal(rp_client_id)
# Customers frequently remove the RP's permissions, then cannot
# delete the cluster. Where possible, fix this before attempting
# deletion.
if rp_client_sp:
sp_id = rp_client_sp.object_id
assign_contributor_to_vnet(cmd.cli_ctx, vnet, sp_id)
assign_contributor_to_routetable(cmd.cli_ctx,
worker_subnets | {master_subnet},
sp_id)
except (CloudError, HttpOperationError) as e:
# Default to old deletion behaviour in case operations throw an
# exception above. Log the error.
logger.info(e.message)
return sdk_no_wait(no_wait, client.delete,
resource_group_name=resource_group_name,
resource_name=resource_name)
def aro_list(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def aro_show(client, resource_group_name, resource_name):
return client.get(resource_group_name, resource_name)
def aro_list_credentials(client, resource_group_name, resource_name):
return client.list_credentials(resource_group_name, resource_name)
def aro_update(client, resource_group_name, resource_name, no_wait=False):
oc = v2020_04_30.OpenShiftClusterUpdate()
return sdk_no_wait(no_wait, client.update,
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=oc)
def rp_mode_development():
return os.environ.get('RP_MODE', '').lower() == 'development'
def generate_random_id():
random_id = (random.choice('abcdefghijklmnopqrstuvwxyz') +
''.join(random.choice('abcdefghijklmnopqrstuvwxyz1234567890')
for _ in range(7)))
return random_id
|
[] |
[] |
[
"AZURE_FP_CLIENT_ID",
"RP_MODE"
] |
[]
|
["AZURE_FP_CLIENT_ID", "RP_MODE"]
|
python
| 2 | 0 | |
tests/e2e/helpers.go
|
package e2e
import (
"bytes"
goctx "context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"regexp"
"strconv"
"strings"
"testing"
"time"
backoff "github.com/cenkalti/backoff/v3"
ocpapi "github.com/openshift/api"
imagev1 "github.com/openshift/api/image/v1"
framework "github.com/operator-framework/operator-sdk/pkg/test"
"github.com/operator-framework/operator-sdk/pkg/test/e2eutil"
appsv1 "k8s.io/api/apps/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
configv1 "github.com/openshift/api/config/v1"
"github.com/openshift/compliance-operator/pkg/apis"
compv1alpha1 "github.com/openshift/compliance-operator/pkg/apis/compliance/v1alpha1"
compsuitectrl "github.com/openshift/compliance-operator/pkg/controller/compliancesuite"
"github.com/openshift/compliance-operator/pkg/utils"
mcfgapi "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io"
mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
)
var contentImagePath string
var shouldLogContainerOutput bool
var brokenContentImagePath string
var rhcosPb *compv1alpha1.ProfileBundle
var ocp4Pb *compv1alpha1.ProfileBundle
var defaultBackoff = backoff.WithMaxRetries(backoff.NewExponentialBackOff(), maxRetries)
type ObjectResouceVersioner interface {
runtime.Object
metav1.Common
}
func init() {
contentImagePath = os.Getenv("CONTENT_IMAGE")
if contentImagePath == "" {
fmt.Println("Please set the 'CONTENT_IMAGE' environment variable")
os.Exit(1)
}
logContainerOutputEnv := os.Getenv("LOG_CONTAINER_OUTPUT")
if logContainerOutputEnv != "" {
shouldLogContainerOutput = true
}
brokenContentImagePath = os.Getenv("BROKEN_CONTENT_IMAGE")
if brokenContentImagePath == "" {
fmt.Println("Please set the 'BROKEN_CONTENT_IMAGE' environment variable")
os.Exit(1)
}
}
type testExecution struct {
Name string
IsParallel bool
TestFn func(*testing.T, *framework.Framework, *framework.Context, *mcTestCtx, string) error
}
type mcTestCtx struct {
f *framework.Framework
t *testing.T
pools []*mcfgv1.MachineConfigPool
}
func E2ELogf(t *testing.T, format string, args ...interface{}) {
t.Logf(fmt.Sprintf("%s: %s", time.Now().Format(time.RFC3339), format), args...)
}
func E2ELog(t *testing.T, args ...interface{}) {
t.Log(fmt.Sprintf("%s: %s", time.Now().Format(time.RFC3339), fmt.Sprint(args...)))
}
func E2EErrorf(t *testing.T, format string, args ...interface{}) {
t.Errorf(fmt.Sprintf("E2E-FAILURE: %s: %s", time.Now().Format(time.RFC3339), format), args...)
}
func E2EFatalf(t *testing.T, format string, args ...interface{}) {
t.Fatalf(fmt.Sprintf("E2E-FAILURE: %s: %s", time.Now().Format(time.RFC3339), format), args...)
}
func getObjNameFromTest(t *testing.T) string {
fullTestName := t.Name()
regexForCapitals := regexp.MustCompile(`[A-Z]`)
testNameInitIndex := strings.LastIndex(fullTestName, "/") + 1
// Remove test prefix
testName := fullTestName[testNameInitIndex:]
// convert capitals to lower case letters with hyphens prepended
hyphenedTestName := regexForCapitals.ReplaceAllStringFunc(
testName,
func(currentMatch string) string {
return "-" + strings.ToLower(currentMatch)
})
// remove double hyphens
testNameNoDoubleHyphens := strings.ReplaceAll(hyphenedTestName, "--", "-")
// Remove leading and trailing hyphens
return strings.Trim(testNameNoDoubleHyphens, "-")
}
func newMcTestCtx(f *framework.Framework, t *testing.T) (*mcTestCtx, error) {
return &mcTestCtx{f: f, t: t}, nil
}
func (c *mcTestCtx) cleanupTrackedPools() {
for _, p := range c.pools {
// Then find all nodes that are labeled with this pool and remove the label
// Search the nodes with this label
poolNodes := getNodesWithSelectorOrFail(c.t, c.f, p.Spec.NodeSelector.MatchLabels)
rmPoolLabel := utils.GetFirstNodeRoleLabel(p.Spec.NodeSelector.MatchLabels)
err := unLabelNodes(c.t, c.f, rmPoolLabel, poolNodes)
if err != nil {
E2EErrorf(c.t, "Could not unlabel nodes from pool %s: %v\n", rmPoolLabel, err)
}
// Unlabeling the nodes triggers an update of the affected nodes because the nodes
// will then start using a different rendered pool. e.g a node that used to be labeled
// with "e2e,worker" and becomes labeled with "worker" switches from "rendered-e2e-*"
// to "rendered-worker-*". If we didn't wait, the node might have tried to use the
// e2e pool that would be gone when we remove it with the next call
err = waitForNodesToHaveARenderedPool(c.t, c.f, poolNodes, workerPoolName)
if err != nil {
E2EErrorf(c.t, "Error waiting for nodes to reach the worker pool again: %v\n", err)
}
err = waitForPoolCondition(c.t, c.f, mcfgv1.MachineConfigPoolUpdated, p.Name)
if err != nil {
E2EErrorf(c.t, "Error waiting for reboot after nodes were unlabeled: %v\n", err)
}
// Then delete the pool itself
E2ELogf(c.t, "Removing pool %s\n", p.Name)
err = c.f.Client.Delete(goctx.TODO(), p)
if err != nil {
E2EErrorf(c.t, "Could not remove pool %s: %v\n", p.Name, err)
}
}
}
func (c *mcTestCtx) trackPool(pool *mcfgv1.MachineConfigPool) {
for _, p := range c.pools {
if p.Name == pool.Name {
return
}
}
c.pools = append(c.pools, pool)
E2ELogf(c.t, "Tracking pool %s\n", pool.Name)
}
func (c *mcTestCtx) ensureE2EPool() {
pool, err := createReadyMachineConfigPoolSubset(c.t, c.f, workerPoolName, testPoolName)
if err != nil {
E2EFatalf(c.t, "error ensuring that test e2e pool exists: %s", err)
}
c.trackPool(pool)
}
// executeTest sets up everything that a e2e test needs to run, and executes the test.
func executeTests(t *testing.T, tests ...testExecution) {
ctx := setupTestRequirements(t)
defer ctx.Cleanup()
// get global framework variables
f := framework.Global
ns, err := ctx.GetOperatorNamespace()
if err != nil {
t.Fatalf("could not get namespace: %v", err)
}
E2ELogf(t, "Running e2e test on Namespace %s\n", ns)
setupComplianceOperatorCluster(t, ctx, f, ns)
mcTctx, err := newMcTestCtx(f, t)
if err != nil {
t.Fatalf("could not create the MC test context: %v", err)
}
defer mcTctx.cleanupTrackedPools()
rhcosPb, err = getReadyProfileBundle(t, f, "rhcos4", ns)
if err != nil {
t.Error(err)
}
// defer deleting the profiles or else the test namespace get stuck in Terminating
defer f.Client.Delete(goctx.TODO(), rhcosPb)
ocp4Pb, err = getReadyProfileBundle(t, f, "ocp4", ns)
if err != nil {
t.Error(err)
}
// defer deleting the profiles or else the test namespace get stuck in Terminating
defer f.Client.Delete(goctx.TODO(), ocp4Pb)
t.Run("Parallel tests", func(t *testing.T) {
for _, test := range tests {
// Don't loose test reference
test := test
if test.IsParallel {
t.Run(test.Name, func(tt *testing.T) {
tt.Parallel()
if err := test.TestFn(tt, f, ctx, mcTctx, ns); err != nil {
tt.Error(err)
}
})
}
}
})
t.Run("Serial tests", func(t *testing.T) {
for _, test := range tests {
// Don't loose test reference
test := test
if !test.IsParallel {
t.Run(test.Name, func(t *testing.T) {
if err := test.TestFn(t, f, ctx, mcTctx, ns); err != nil {
t.Error(err)
}
})
}
}
})
}
// setupTestRequirements Adds the items to the client's schema (So we can use our objects in the client)
// and creates a test context.
//
// NOTE: Whenever we add new types to the operator, we need to register them here for the e2e tests.
func setupTestRequirements(t *testing.T) *framework.Context {
// compliance-operator objects
coObjs := [3]runtime.Object{&compv1alpha1.ComplianceScanList{},
&compv1alpha1.ComplianceRemediationList{},
&compv1alpha1.ComplianceSuiteList{},
}
for _, obj := range coObjs {
err := framework.AddToFrameworkScheme(apis.AddToScheme, obj)
if err != nil {
t.Fatalf("TEST SETUP: failed to add custom resource scheme to framework: %v", err)
}
}
// Additional testing objects
testObjs := [1]runtime.Object{
&configv1.OAuth{},
}
for _, obj := range testObjs {
err := framework.AddToFrameworkScheme(configv1.Install, obj)
if err != nil {
t.Fatalf("TEST SETUP: failed to add configv1 resource scheme to framework: %v", err)
}
}
// MCO objects
mcoObjs := [2]runtime.Object{
&mcfgv1.MachineConfigPoolList{},
&mcfgv1.MachineConfigList{},
}
for _, obj := range mcoObjs {
err := framework.AddToFrameworkScheme(mcfgapi.Install, obj)
if err != nil {
t.Fatalf("TEST SETUP: failed to add custom resource scheme to framework: %v", err)
}
}
// OpenShift objects
ocpObjs := [2]runtime.Object{
&imagev1.ImageStreamList{},
&imagev1.ImageStreamTagList{},
}
for _, obj := range ocpObjs {
if err := framework.AddToFrameworkScheme(ocpapi.Install, obj); err != nil {
t.Fatalf("TEST SETUP: failed to add custom resource scheme to framework: %v", err)
}
}
return framework.NewContext(t)
}
// setupComplianceOperatorCluster creates a compliance-operator cluster and the resources it needs to operate
// such as the namespace, permissions, etc.
func setupComplianceOperatorCluster(t *testing.T, ctx *framework.Context, f *framework.Framework, namespace string) {
replaceNamespaceFromManifest(t, namespace, f.NamespacedManPath)
err := ctx.InitializeClusterResources(getCleanupOpts(ctx))
if err != nil {
t.Fatalf("failed to initialize cluster resources: %v", err)
}
E2ELog(t, "Initialized cluster resources")
// wait for compliance-operator to be ready
err = e2eutil.WaitForOperatorDeployment(t, f.KubeClient, namespace, "compliance-operator", 1, retryInterval, timeout)
if err != nil {
t.Fatal(err)
}
}
func getCleanupOpts(ctx *framework.Context) *framework.CleanupOptions {
return &framework.CleanupOptions{
TestContext: ctx,
Timeout: cleanupTimeout,
RetryInterval: cleanupRetryInterval,
}
}
func replaceNamespaceFromManifest(t *testing.T, namespace string, namespacedManPath *string) {
if namespacedManPath == nil {
t.Fatal("Error: no namespaced manifest given as test argument. operator-sdk might have changed.")
}
manPath := *namespacedManPath
// #nosec
read, err := ioutil.ReadFile(manPath)
if err != nil {
t.Fatalf("Error reading namespaced manifest file: %s", err)
}
newContents := strings.Replace(string(read), "openshift-compliance", namespace, -1)
// #nosec
err = ioutil.WriteFile(manPath, []byte(newContents), 644)
if err != nil {
t.Fatalf("Error writing namespaced manifest file: %s", err)
}
}
// waitForProfileBundleStatus will poll until the compliancescan that we're lookingfor reaches a certain status, or until
// a timeout is reached.
func waitForProfileBundleStatus(t *testing.T, f *framework.Framework, namespace, name string, targetStatus compv1alpha1.DataStreamStatusType) error {
pb := &compv1alpha1.ProfileBundle{}
var lastErr error
// retry and ignore errors until timeout
timeouterr := wait.Poll(retryInterval, timeout, func() (bool, error) {
lastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, pb)
if lastErr != nil {
if apierrors.IsNotFound(lastErr) {
E2ELogf(t, "Waiting for availability of %s ProfileBundle\n", name)
return false, nil
}
E2ELogf(t, "Retrying. Got error: %v\n", lastErr)
return false, nil
}
if pb.Status.DataStreamStatus == targetStatus {
return true, nil
}
E2ELogf(t, "Waiting for run of %s ProfileBundle (%s)\n", name, pb.Status.DataStreamStatus)
return false, nil
})
if err := processErrorOrTimeout(lastErr, timeouterr, "waiting for ProfileBundle status"); err != nil {
return err
}
E2ELogf(t, "ProfileBundle ready (%s)\n", pb.Status.DataStreamStatus)
return nil
}
// waitForScanStatus will poll until the compliancescan that we're lookingfor reaches a certain status, or until
// a timeout is reached.
func waitForScanStatus(t *testing.T, f *framework.Framework, namespace, name string, targetStatus compv1alpha1.ComplianceScanStatusPhase) {
exampleComplianceScan := &compv1alpha1.ComplianceScan{}
var lastErr error
defer logContainerOutput(t, f, namespace, name)
// retry and ignore errors until timeout
timeoutErr := wait.Poll(retryInterval, timeout, func() (bool, error) {
lastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, exampleComplianceScan)
if lastErr != nil {
if apierrors.IsNotFound(lastErr) {
E2ELogf(t, "Waiting for availability of %s compliancescan\n", name)
return false, nil
}
E2ELogf(t, "Retrying. Got error: %v\n", lastErr)
return false, nil
}
if exampleComplianceScan.Status.Phase == targetStatus {
return true, nil
}
E2ELogf(t, "Waiting for run of %s compliancescan (%s)\n", name, exampleComplianceScan.Status.Phase)
return false, nil
})
assertNoErrorNorTimeout(t, lastErr, timeoutErr, "waiting for compliance status")
E2ELogf(t, "ComplianceScan ready (%s)\n", exampleComplianceScan.Status.Phase)
}
// waitForReScanStatus will poll until the compliancescan that we're lookingfor reaches a certain status for a re-scan, or until
// a timeout is reached.
func waitForReScanStatus(t *testing.T, f *framework.Framework, namespace, name string, targetStatus compv1alpha1.ComplianceScanStatusPhase) error {
foundScan := &compv1alpha1.ComplianceScan{}
// unset initial index
var scanIndex int64 = -1
var lastErr error
// retry and ignore errors until timeout
timeouterr := wait.Poll(retryInterval, timeout, func() (bool, error) {
lastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, foundScan)
if lastErr != nil {
if apierrors.IsNotFound(lastErr) {
E2ELogf(t, "Waiting for availability of %s compliancescan\n", name)
return false, nil
}
E2ELogf(t, "Retrying. Got error: %v\n", lastErr)
return false, nil
}
// Set index
if scanIndex == -1 {
scanIndex = foundScan.Status.CurrentIndex
E2ELogf(t, "Initial scan index set to %d. Waiting for re-scan\n", scanIndex)
return false, nil
} else if foundScan.Status.CurrentIndex == scanIndex {
E2ELogf(t, "re-scan hasn't taken place. CurrentIndex %d. Waiting for re-scan\n", scanIndex)
return false, nil
}
if foundScan.Status.Phase == targetStatus {
return true, nil
}
E2ELogf(t, "Waiting for run of %s compliancescan (%s)\n", name, foundScan.Status.Phase)
return false, nil
})
// Error in function call
if lastErr != nil {
return lastErr
}
// Timeout
if timeouterr != nil {
return timeouterr
}
E2ELogf(t, "ComplianceScan ready (%s)\n", foundScan.Status.Phase)
return nil
}
// waitForRemediationState will poll until the complianceRemediation that we're lookingfor gets applied, or until
// a timeout is reached.
func waitForRemediationState(t *testing.T, f *framework.Framework, namespace, name string, state compv1alpha1.RemediationApplicationState) error {
rem := &compv1alpha1.ComplianceRemediation{}
var lastErr error
// retry and ignore errors until timeout
timeouterr := wait.Poll(retryInterval, timeout, func() (bool, error) {
lastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, rem)
if lastErr != nil {
if apierrors.IsNotFound(lastErr) {
E2ELogf(t, "Waiting for availability of %s ComplianceRemediation\n", name)
return false, nil
}
E2ELogf(t, "Retrying. Got error: %v\n", lastErr)
return false, nil
}
if rem.Status.ApplicationState == state {
return true, nil
}
E2ELogf(t, "Waiting for run of %s ComplianceRemediation (%s)\n", name, rem.Status.ApplicationState)
return false, nil
})
// Error in function call
if lastErr != nil {
return lastErr
}
// Timeout
if timeouterr != nil {
return timeouterr
}
E2ELogf(t, "ComplianceRemediation ready (%s)\n", rem.Status.ApplicationState)
return nil
}
func waitForObjectToExist(t *testing.T, f *framework.Framework, name, namespace string, obj runtime.Object) error {
var lastErr error
// retry and ignore errors until timeout
timeouterr := wait.Poll(retryInterval, timeout, func() (bool, error) {
lastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, obj)
if lastErr != nil {
if apierrors.IsNotFound(lastErr) {
E2ELogf(t, "Waiting for availability of %s ComplianceRemediation\n", name)
return false, nil
}
E2ELogf(t, "Retrying. Got error: %v\n", lastErr)
return false, nil
}
return true, nil
})
// Error in function call
if lastErr != nil {
return lastErr
}
// Timeout
if timeouterr != nil {
return timeouterr
}
E2ELogf(t, "Object found '%s' found\n", name)
return nil
}
func waitForObjectToUpdate(t *testing.T, f *framework.Framework, name, namespace string, obj ObjectResouceVersioner) error {
var lastErr error
initialVersion := obj.GetResourceVersion()
// retry and ignore errors until timeout
timeouterr := wait.Poll(retryInterval, timeout, func() (bool, error) {
lastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, obj)
if lastErr != nil {
E2ELogf(t, "Retrying. Got error: %v\n", lastErr)
return false, nil
}
if obj.GetResourceVersion() == initialVersion {
E2ELogf(t, "Retrying. Object still doesn't update. got version %s ... wanted %s\n", obj.GetResourceVersion(), initialVersion)
return false, nil
}
return true, nil
})
// Error in function call
if lastErr != nil {
return lastErr
}
// Timeout
if timeouterr != nil {
return timeouterr
}
E2ELogf(t, "Object found '%s' found\n", name)
return nil
}
// waitForScanStatus will poll until the compliancescan that we're lookingfor reaches a certain status, or until
// a timeout is reached.
func waitForSuiteScansStatus(t *testing.T, f *framework.Framework, namespace, name string, targetStatus compv1alpha1.ComplianceScanStatusPhase, targetComplianceStatus compv1alpha1.ComplianceScanStatusResult) error {
suite := &compv1alpha1.ComplianceSuite{}
var lastErr error
// retry and ignore errors until timeout
defer logContainerOutput(t, f, namespace, name)
timeouterr := wait.Poll(retryInterval, timeout, func() (bool, error) {
lastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, suite)
if lastErr != nil {
if apierrors.IsNotFound(lastErr) {
E2ELogf(t, "Waiting for availability of %s compliancesuite\n", name)
return false, nil
}
E2ELogf(t, "Retrying. Got error: %v\n", lastErr)
return false, nil
}
if suite.Status.Phase != targetStatus {
E2ELogf(t, "Waiting until suite %s reaches target status '%s'. Current status: %s", suite.Name, targetStatus, suite.Status.Phase)
return false, nil
}
// The suite is now done, make sure the compliance status is expected
if suite.Status.Result != targetComplianceStatus {
return false, fmt.Errorf("expecting %s got %s", targetComplianceStatus, suite.Status.Result)
}
// If we were expecting an error, there's no use checking the scans
if targetComplianceStatus == compv1alpha1.ResultError {
return true, nil
}
// Now as a sanity check make sure that the scan statuses match the aggregated
// suite status
// Got the suite. There should be at least one scan or else we're still initialising
if len(suite.Status.ScanStatuses) < 1 {
return false, errors.New("not enough scan statuses")
}
//Examine the scan status both in the suite status and the scan
for _, scanStatus := range suite.Status.ScanStatuses {
if scanStatus.Phase != targetStatus {
return false, fmt.Errorf("suite in status %s but scan wrapper %s in status %s", targetStatus, scanStatus.Name, scanStatus.Phase)
}
// If the status was present in the suite, then /any/ error
// should fail the test as the scans should be read /from/
// the scan itself
waitForScanStatus(t, f, namespace, scanStatus.Name, targetStatus)
}
return true, nil
})
// Error in function call
if lastErr != nil {
return lastErr
}
// Timeout
if timeouterr != nil {
return timeouterr
}
E2ELogf(t, "All scans in ComplianceSuite have finished (%s)\n", suite.Name)
return nil
}
func scanResultIsExpected(t *testing.T, f *framework.Framework, namespace, name string, expectedResult compv1alpha1.ComplianceScanStatusResult) error {
cs := &compv1alpha1.ComplianceScan{}
defer logContainerOutput(t, f, namespace, name)
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, cs)
if err != nil {
return err
}
if cs.Status.Result != expectedResult {
return fmt.Errorf("The ComplianceScan Result wasn't what we expected. Got '%s', expected '%s'", cs.Status.Result, expectedResult)
}
if expectedResult == compv1alpha1.ResultError {
if cs.Status.ErrorMessage == "" {
return fmt.Errorf("The ComplianceScan 'errormsg' wasn't set (it was empty). Even if we expected an error")
}
}
return nil
}
func scanHasWarnings(t *testing.T, f *framework.Framework, namespace, name string) error {
cs := &compv1alpha1.ComplianceScan{}
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, cs)
if err != nil {
return err
}
if cs.Status.Warnings == "" {
return fmt.Errorf("E2E-FAILURE: Excepted the scan %s to contain a warning", name)
}
return nil
}
func suiteErrorMessageMatchesRegex(t *testing.T, f *framework.Framework, namespace, name, regexToMatch string) error {
E2ELogf(t, "Fetching suite: '%s'", name)
cs := &compv1alpha1.ComplianceSuite{}
key := types.NamespacedName{Name: name, Namespace: namespace}
err := f.Client.Get(goctx.TODO(), key, cs)
if err != nil {
return err
}
re := regexp.MustCompile(regexToMatch)
if !re.MatchString(cs.Status.ErrorMessage) {
return fmt.Errorf("The error message found in the compliance suite '%s' "+
"didn't match the expected regex. Found: '%s', Expected regex: '%s'",
name, cs.Status.ErrorMessage, regexToMatch)
}
return nil
}
// getNodesWithSelector lists nodes according to a specific selector
func getNodesWithSelector(f *framework.Framework, labelselector map[string]string) ([]corev1.Node, error) {
var nodes corev1.NodeList
lo := &client.ListOptions{
LabelSelector: labels.SelectorFromSet(labelselector),
}
listErr := backoff.Retry(
func() error {
return f.Client.List(goctx.TODO(), &nodes, lo)
},
defaultBackoff)
if listErr != nil {
return nodes.Items, fmt.Errorf("couldn't list nodes with selector %s: %w", labelselector, listErr)
}
return nodes.Items, nil
}
func getNodesWithSelectorOrFail(t *testing.T, f *framework.Framework, labelselector map[string]string) []corev1.Node {
nodes, err := getNodesWithSelector(f, labelselector)
if err != nil {
E2EFatalf(t, "couldn't get nodes with selector %s: %w", labelselector, err)
}
return nodes
}
func getPodsForScan(f *framework.Framework, scanName string) ([]corev1.Pod, error) {
selectPods := map[string]string{
compv1alpha1.ComplianceScanLabel: scanName,
}
var pods corev1.PodList
lo := &client.ListOptions{
LabelSelector: labels.SelectorFromSet(selectPods),
}
err := f.Client.List(goctx.TODO(), &pods, lo)
if err != nil {
return nil, err
}
return pods.Items, nil
}
// getConfigMapsFromScan lists the configmaps from the specified openscap scan instance
func getConfigMapsFromScan(f *framework.Framework, scaninstance *compv1alpha1.ComplianceScan) []corev1.ConfigMap {
var configmaps corev1.ConfigMapList
labelselector := map[string]string{
compv1alpha1.ComplianceScanLabel: scaninstance.Name,
compv1alpha1.ResultLabel: "",
}
lo := &client.ListOptions{
LabelSelector: labels.SelectorFromSet(labelselector),
}
f.Client.List(goctx.TODO(), &configmaps, lo)
return configmaps.Items
}
func assertHasCheck(f *framework.Framework, suiteName, scanName string, check compv1alpha1.ComplianceCheckResult) error {
var getCheck compv1alpha1.ComplianceCheckResult
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: check.Name, Namespace: check.Namespace}, &getCheck)
if err != nil {
return err
}
if getCheck.Status != check.Status {
return fmt.Errorf("expected result %s got result %s", check.Status, getCheck.Status)
}
if getCheck.ID != check.ID {
return fmt.Errorf("expected ID %s got ID %s", check.ID, getCheck.ID)
}
if getCheck.Labels == nil {
return fmt.Errorf("complianceCheckResult has no labels")
}
if getCheck.Labels[compv1alpha1.SuiteLabel] != suiteName {
return fmt.Errorf("Did not find expected suite name label %s, found %s", suiteName, getCheck.Labels[compv1alpha1.SuiteLabel])
}
if getCheck.Labels[compv1alpha1.ComplianceScanLabel] != scanName {
return fmt.Errorf("Did not find expected suite name label %s, found %s", suiteName, getCheck.Labels[compv1alpha1.SuiteLabel])
}
if getCheck.Labels[compv1alpha1.ComplianceCheckResultSeverityLabel] != string(getCheck.Severity) {
return fmt.Errorf("did not find expected severity name label %s, found %s", suiteName, getCheck.Labels[compv1alpha1.ComplianceCheckResultSeverityLabel])
}
if getCheck.Labels[compv1alpha1.ComplianceCheckResultStatusLabel] != string(getCheck.Status) {
return fmt.Errorf("did not find expected status name label %s, found %s", suiteName, getCheck.Labels[compv1alpha1.ComplianceCheckResultStatusLabel])
}
return nil
}
func assertCheckRemediation(f *framework.Framework, name, namespace string, shouldHaveRem bool) error {
var getCheck compv1alpha1.ComplianceCheckResult
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, &getCheck)
if err != nil {
return err
}
_, hasRemLabel := getCheck.Labels[compv1alpha1.ComplianceCheckResultHasRemediation]
if hasRemLabel != shouldHaveRem {
return fmt.Errorf("unexpected label found: %v (expected: %s)", getCheck.Labels, strconv.FormatBool(shouldHaveRem))
}
// Also make sure a remediation with the same name exists (or not)
var getRem compv1alpha1.ComplianceRemediation
var hasRem bool
err = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, &getRem)
if apierrors.IsNotFound(err) {
hasRem = false
} else if err != nil {
return err
} else {
hasRem = true
}
if hasRemLabel != shouldHaveRem {
return fmt.Errorf("unexpected remediation object: expected: %s, found: %s", strconv.FormatBool(shouldHaveRem), strconv.FormatBool(hasRem))
}
return nil
}
func getRemediationsFromScan(f *framework.Framework, suiteName, scanName string) ([]compv1alpha1.ComplianceRemediation, error) {
var scanSuiteRemediations compv1alpha1.ComplianceRemediationList
scanSuiteSelector := make(map[string]string)
scanSuiteSelector[compv1alpha1.SuiteLabel] = suiteName
scanSuiteSelector[compv1alpha1.ComplianceScanLabel] = scanName
listOpts := client.ListOptions{
LabelSelector: labels.SelectorFromSet(scanSuiteSelector),
}
if err := f.Client.List(goctx.TODO(), &scanSuiteRemediations, &listOpts); err != nil {
return nil, err
}
return scanSuiteRemediations.Items, nil
}
func assertHasRemediations(t *testing.T, f *framework.Framework, suiteName, scanName, roleLabel string, remNameList []string) error {
var scanSuiteMapNames = make(map[string]bool)
var scanSuiteRemediations []compv1alpha1.ComplianceRemediation
// FIXME: This is a temporary hack. At the moment, the ARF parser is too slow
// and it might take a bit for the remediations to appear. It would be cleaner
// to signify somehow that the remediations were already processed, but in the
// meantime, poll for 5 minutes while the remediations are being created
err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
var listErr error
scanSuiteRemediations, listErr = getRemediationsFromScan(f, suiteName, scanName)
if listErr != nil {
E2ELogf(t, "Error listing remediations. Retrying: %s", listErr)
}
for idx := range scanSuiteRemediations {
rem := &scanSuiteRemediations[idx]
scanSuiteMapNames[rem.Name] = true
}
for _, expRem := range remNameList {
_, ok := scanSuiteMapNames[expRem]
if !ok {
E2ELogf(t, "expected remediation %s not yet found", expRem)
return false, nil
}
}
E2ELogf(t, "expected remediations found!")
return true, nil
})
if err != nil {
E2EErrorf(t, "Error waiting for remediations to appear")
return err
}
return nil
}
type machineConfigActionFunc func() error
type poolPredicate func(t *testing.T, pool *mcfgv1.MachineConfigPool) (bool, error)
// waitForMachinePoolUpdate retrieves the original version of a MCP, then performs an
// action passed in as a parameter and then waits until a MCP passes a predicate
// If a pool is already given (poolPre), that will be used to check the previous state of the pool.
func waitForMachinePoolUpdate(t *testing.T, f *framework.Framework, name string, action machineConfigActionFunc, predicate poolPredicate, poolPre *mcfgv1.MachineConfigPool) error {
if poolPre == nil {
// initialize empty pool if it wasn't already given
poolPre = &mcfgv1.MachineConfigPool{}
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name}, poolPre)
if err != nil {
E2EErrorf(t, "Could not find the pool pre update")
return err
}
}
E2ELogf(t, "Pre-update, MC Pool %s has generation %d", poolPre.Name, poolPre.Status.ObservedGeneration)
err := action()
if err != nil {
E2EErrorf(t, "Action failed %v", err)
return err
}
err = wait.PollImmediate(machineOperationRetryInterval, machineOperationTimeout, func() (bool, error) {
pool := &mcfgv1.MachineConfigPool{}
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name}, pool)
if err != nil {
// even not found is a hard error here
E2EErrorf(t, "Could not find the pool post update")
return false, err
}
ok, err := predicate(t, pool)
if err != nil {
E2EErrorf(t, "Predicate failed %v", err)
return false, err
}
if !ok {
E2ELogf(t, "Predicate not true yet, waiting")
return false, nil
}
E2ELogf(t, "Will check for update, Gen: %d, previous %d updated %d/%d unavailable %d",
pool.Status.ObservedGeneration, poolPre.Status.ObservedGeneration,
pool.Status.UpdatedMachineCount, pool.Status.MachineCount,
pool.Status.UnavailableMachineCount)
// Check if the pool has finished updating yet. If the pool was paused, we just check that
// the generation was increased and wait for machines to reboot separately
if (pool.Status.ObservedGeneration != poolPre.Status.ObservedGeneration) &&
pool.Spec.Paused == true || ((pool.Status.UpdatedMachineCount == pool.Status.MachineCount) &&
(pool.Status.UnavailableMachineCount == 0)) {
E2ELogf(t, "The pool has updated")
return true, nil
}
E2ELogf(t, "The pool has not updated yet. Gen: %d, expected %d updated %d/%d unavailable %d",
pool.Status.ObservedGeneration, poolPre.Status.ObservedGeneration,
pool.Status.UpdatedMachineCount, pool.Status.MachineCount,
pool.Status.UnavailableMachineCount)
return false, nil
})
if err != nil {
return err
}
return nil
}
// waitForNodesToBeReady waits until all the nodes in the cluster have
// reached the expected machineConfig.
func waitForNodesToBeReady(t *testing.T, f *framework.Framework, errorMessage string) {
err := wait.PollImmediate(machineOperationRetryInterval, machineOperationTimeout, func() (bool, error) {
var nodes corev1.NodeList
f.Client.List(goctx.TODO(), &nodes, &client.ListOptions{})
for _, node := range nodes.Items {
E2ELogf(t, "Node %s has config %s, desired config %s state %s",
node.Name,
node.Annotations["machineconfiguration.openshift.io/currentConfig"],
node.Annotations["machineconfiguration.openshift.io/desiredConfig"],
node.Annotations["machineconfiguration.openshift.io/state"])
if (node.Annotations["machineconfiguration.openshift.io/currentConfig"] != node.Annotations["machineconfiguration.openshift.io/desiredConfig"]) ||
(node.Annotations["machineconfiguration.openshift.io/state"] != "Done") {
E2ELogf(t, "Node %s still updating", node.Name)
return false, nil
}
E2ELogf(t, "Node %s was updated", node.Name)
}
E2ELogf(t, "All machines updated")
return true, nil
})
if err != nil {
E2EFatalf(t, "%s: %s", errorMessage, err)
}
}
// waitForNodesToHaveARenderedPool wait until all nodes passed through a parameter transition to a rendered
// config from a pool that is passed through a parameter as well. A typical use-case is when a node is unlabeled
// from a pool, in that case we need to wait until MCO makes the node use the other available pool. Only then it
// is safe to remove the pool the node was labeled with, otherwise the node might still on next reboot use the
// pool that was removed and this would mean the node transitions into Degraded state
func waitForNodesToHaveARenderedPool(t *testing.T, f *framework.Framework, nodes []corev1.Node, poolName string) error {
pool := &mcfgv1.MachineConfigPool{}
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: poolName}, pool)
if err != nil {
E2EErrorf(t, "Could not find pool %s\n", poolName)
return err
}
E2ELogf(t, "We'll wait for the nodes to reach %s\n", pool.Spec.Configuration.Name)
return wait.PollImmediate(machineOperationRetryInterval, machineOperationTimeout, func() (bool, error) {
for _, loopNode := range nodes {
node := &corev1.Node{}
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: loopNode.Name}, node)
if err != nil {
return false, err
}
E2ELogf(t, "Node %s has config %s, desired config %s state %s",
node.Name,
node.Annotations["machineconfiguration.openshift.io/currentConfig"],
node.Annotations["machineconfiguration.openshift.io/desiredConfig"],
node.Annotations["machineconfiguration.openshift.io/state"])
if node.Annotations["machineconfiguration.openshift.io/desiredConfig"] != pool.Spec.Configuration.Name ||
node.Annotations["machineconfiguration.openshift.io/currentConfig"] != node.Annotations["machineconfiguration.openshift.io/desiredConfig"] {
E2ELogf(t, "Node %s still updating", node.Name)
return false, nil
}
E2ELogf(t, "Node %s was updated", node.Name)
}
E2ELogf(t, "All machines updated")
return true, nil
})
}
func applyRemediationAndCheck(t *testing.T, f *framework.Framework, namespace, name, pool string) error {
rem := &compv1alpha1.ComplianceRemediation{}
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, rem)
if err != nil {
return err
}
E2ELogf(t, "Remediation %s found", name)
applyRemediation := func() error {
rem.Spec.Apply = true
err = f.Client.Update(goctx.TODO(), rem)
if err != nil {
E2EErrorf(t, "Cannot apply remediation")
return err
}
E2ELogf(t, "Remediation applied")
return nil
}
predicate := func(t *testing.T, pool *mcfgv1.MachineConfigPool) (bool, error) {
// When checking if a MC is applied to a pool, we can't check the pool status
// when the pool is paused..
source := pool.Status.Configuration.Source
if pool.Spec.Paused == true {
source = pool.Spec.Configuration.Source
}
for _, mc := range source {
if mc.Name == rem.GetMcName() {
// When applying a remediation, check that the MC *is* in the pool
E2ELogf(t, "Remediation %s present in pool %s, returning true", mc.Name, pool.Name)
return true, nil
}
}
E2ELogf(t, "Remediation %s not present in pool %s, returning false", rem.GetMcName(), pool.Name)
return false, nil
}
err = waitForMachinePoolUpdate(t, f, pool, applyRemediation, predicate, nil)
if err != nil {
E2EErrorf(t, "Failed to wait for pool to update after applying MC: %v", err)
return err
}
E2ELogf(t, "Machines updated with remediation")
return nil
}
func removeObsoleteRemediationAndCheck(t *testing.T, f *framework.Framework, namespace, name, renderedMcName, pool string) error {
rem := &compv1alpha1.ComplianceRemediation{}
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, rem)
if err != nil {
return err
}
E2ELogf(t, "Remediation %s found", name)
removeObsoleteContents := func() error {
E2ELogf(t, "pre-update %v", rem.Status)
remCopy := rem.DeepCopy()
remCopy.Spec.Apply = true
remCopy.Spec.Outdated.Object = nil
err = f.Client.Update(goctx.TODO(), remCopy)
if err != nil {
E2EErrorf(t, "Cannot update remediation")
return err
}
E2ELogf(t, "Obsolete data removed")
rem2 := &compv1alpha1.ComplianceRemediation{}
f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, rem2)
E2ELogf(t, "post-update %v", rem2.Status)
return nil
}
// Get the MachineConfigPool before the remediation has been made current
// This way, we can check that it changed without race-conditions
poolBeforeRemediation := &mcfgv1.MachineConfigPool{}
err = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: testPoolName}, poolBeforeRemediation)
if err != nil {
return err
}
obsoleteMc := &mcfgv1.MachineConfig{}
err = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: renderedMcName}, obsoleteMc)
if err != nil {
return err
}
predicate := func(t *testing.T, pool *mcfgv1.MachineConfigPool) (bool, error) {
// make sure the composite remediation has been re-rendered
currentMc := &mcfgv1.MachineConfig{}
err = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: renderedMcName}, currentMc)
if err != nil {
return false, err
}
if currentMc.Generation == obsoleteMc.Generation {
E2ELogf(t, "MC %s still has generation %d, looping", renderedMcName, currentMc.Generation)
return false, nil
}
E2ELogf(t, "MC has been re-rendered from %d to %d", obsoleteMc.Generation, currentMc.Generation)
return true, nil
}
err = waitForMachinePoolUpdate(t, f, pool, removeObsoleteContents, predicate, poolBeforeRemediation)
if err != nil {
E2EErrorf(t, "Failed to wait for pool to update after applying MC: %v", err)
return err
}
E2ELogf(t, "Machines updated with remediation that is no longer obsolete")
return nil
}
func assertRemediationIsObsolete(t *testing.T, f *framework.Framework, namespace, name string) {
err, isObsolete := remediationIsObsolete(t, f, namespace, name)
if err != nil {
E2EFatalf(t, "%s", err)
}
if !isObsolete {
E2EFatalf(t, "expected that the remediation is obsolete")
}
}
func assertRemediationIsCurrent(t *testing.T, f *framework.Framework, namespace, name string) {
err, isObsolete := remediationIsObsolete(t, f, namespace, name)
if err != nil {
E2EFatalf(t, "%s", err)
}
if isObsolete {
E2EFatalf(t, "expected that the remediation is not obsolete")
}
}
func remediationIsObsolete(t *testing.T, f *framework.Framework, namespace, name string) (error, bool) {
rem := &compv1alpha1.ComplianceRemediation{}
var lastErr error
timeouterr := wait.Poll(retryInterval, timeout, func() (bool, error) {
lastErr := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, rem)
if lastErr != nil {
return false, nil
}
return true, nil
})
if lastErr != nil {
return fmt.Errorf("Got error trying to get remediation's obsolescence: %w", lastErr), false
}
if timeouterr != nil {
return fmt.Errorf("Timed out trying to get remediation's obsolescence: %w", lastErr), false
}
E2ELogf(t, "Remediation %s found", name)
if rem.Status.ApplicationState == compv1alpha1.RemediationOutdated &&
rem.Spec.Outdated.Object != nil {
return nil, true
}
return nil, false
}
func unApplyRemediationAndCheck(t *testing.T, f *framework.Framework, namespace, name, pool string) error {
rem := &compv1alpha1.ComplianceRemediation{}
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, rem)
if err != nil {
return err
}
E2ELogf(t, "Remediation found")
applyRemediation := func() error {
rem.Spec.Apply = false
err = f.Client.Update(goctx.TODO(), rem)
if err != nil {
E2EErrorf(t, "Cannot apply remediation")
return err
}
E2ELogf(t, "Remediation applied")
return nil
}
predicate := func(t *testing.T, pool *mcfgv1.MachineConfigPool) (bool, error) {
// We want to check that the MC created by the operator went away. Let's
// poll the pool until we no longer see the remediation in the status
for _, mc := range pool.Status.Configuration.Source {
if mc.Name == rem.GetMcName() {
E2ELogf(t, "Remediation %s present in pool %s, returning false", mc.Name, pool.Name)
return false, nil
}
}
E2ELogf(t, "Remediation %s not present in pool %s, returning true", rem.GetMcName(), pool.Name)
return true, nil
}
err = waitForMachinePoolUpdate(t, f, pool, applyRemediation, predicate, nil)
if err != nil {
E2EErrorf(t, "Failed to wait for pool to update after applying MC: %v", err)
return err
}
E2ELogf(t, "Machines updated with remediation")
return nil
}
func waitForRemediationToBeAutoApplied(t *testing.T, f *framework.Framework, remName, remNamespace string, pool *mcfgv1.MachineConfigPool) {
rem := &compv1alpha1.ComplianceRemediation{}
var lastErr error
timeouterr := wait.Poll(retryInterval, timeout, func() (bool, error) {
lastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: remName, Namespace: remNamespace}, rem)
if apierrors.IsNotFound(lastErr) {
E2ELogf(t, "Waiting for availability of %s remediation\n", remName)
return false, nil
}
if lastErr != nil {
E2ELogf(t, "Retrying. Got error: %v\n", lastErr)
return false, nil
}
E2ELogf(t, "Found remediation: %s\n", remName)
return true, nil
})
assertNoErrorNorTimeout(t, lastErr, timeouterr, "getting remediation before auto-applying it")
preNoop := func() error {
return nil
}
predicate := func(t *testing.T, pool *mcfgv1.MachineConfigPool) (bool, error) {
// When checking if a MC is applied to a pool, we can't check the pool status
// when the pool is paused..
source := pool.Status.Configuration.Source
if pool.Spec.Paused == true {
source = pool.Spec.Configuration.Source
}
for _, mc := range source {
if mc.Name == rem.GetMcName() {
// When applying a remediation, check that the MC *is* in the pool
E2ELogf(t, "Remediation %s present in pool %s, returning true", mc.Name, pool.Name)
return true, nil
}
}
E2ELogf(t, "Remediation %s not present in pool %s, returning false", rem.GetMcName(), pool.Name)
return false, nil
}
err := waitForMachinePoolUpdate(t, f, pool.Name, preNoop, predicate, pool)
if err != nil {
E2EFatalf(t, "Failed to wait for pool to update after applying MC: %v", err)
}
E2ELogf(t, "Machines updated with remediation")
waitForNodesToBeReady(t, f, "Failed to wait for nodes to come back up after auto-applying remediation")
E2ELogf(t, "Remediation applied to machines and machines rebooted")
}
func unPauseMachinePoolAndWait(t *testing.T, f *framework.Framework, poolName string) {
if err := unPauseMachinePool(t, f, poolName); err != nil {
E2EFatalf(t, "Could not unpause the MC pool")
}
// When the pool updates, we need to wait for the machines to pick up the new rendered
// config
var lastErr error
timeoutErr := wait.PollImmediate(machineOperationRetryInterval, machineOperationTimeout, func() (bool, error) {
pool := &mcfgv1.MachineConfigPool{}
lastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: poolName}, pool)
if apierrors.IsNotFound(lastErr) {
E2EFatalf(t, "Could not find the pool post update")
} else if lastErr != nil {
// even not found is a hard error here
E2ELogf(t, "Got error while getting MachineConfigPool. Retrying: %s", lastErr)
return false, nil
}
E2ELogf(t, "Will check for update, updated %d/%d unavailable %d",
pool.Status.UpdatedMachineCount, pool.Status.MachineCount,
pool.Status.UnavailableMachineCount)
if pool.Status.UpdatedMachineCount == pool.Status.MachineCount &&
pool.Status.UnavailableMachineCount == 0 {
E2ELogf(t, "The pool has updated")
return true, nil
}
E2ELogf(t, "The pool has not updated yet. updated %d/%d unavailable %d",
pool.Status.UpdatedMachineCount, pool.Status.MachineCount,
pool.Status.UnavailableMachineCount)
return false, nil
})
if lastErr != nil {
E2EFatalf(t, "Got error waiting for MCP unpausing: %s", timeoutErr)
}
if timeoutErr != nil {
E2EFatalf(t, "Timed out waiting for MCP unpausing: %s", timeoutErr)
}
}
func pauseMachinePool(t *testing.T, f *framework.Framework, poolName string) error {
return modMachinePoolPause(t, f, poolName, true)
}
func unPauseMachinePool(t *testing.T, f *framework.Framework, poolName string) error {
return modMachinePoolPause(t, f, poolName, false)
}
func modMachinePoolPause(t *testing.T, f *framework.Framework, poolName string, pause bool) error {
pool := &mcfgv1.MachineConfigPool{}
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: poolName}, pool)
if err != nil {
E2EErrorf(t, "Could not find the pool to modify")
return err
}
poolCopy := pool.DeepCopy()
poolCopy.Spec.Paused = pause
err = f.Client.Update(goctx.TODO(), poolCopy)
if err != nil {
E2EErrorf(t, "Could not update the pool")
return err
}
return nil
}
func createReadyMachineConfigPoolSubset(t *testing.T, f *framework.Framework, oldPoolName, newPoolName string) (*mcfgv1.MachineConfigPool, error) {
pool, err := createMachineConfigPoolSubset(t, f, oldPoolName, newPoolName)
if err != nil {
return nil, err
}
err = waitForPoolCondition(t, f, mcfgv1.MachineConfigPoolUpdated, newPoolName)
if err != nil {
return nil, err
}
return pool, nil
}
// picks a random machine from an existing pool and creates a subset of the pool with
// one machine
func createMachineConfigPoolSubset(t *testing.T, f *framework.Framework, oldPoolName, newPoolName string) (*mcfgv1.MachineConfigPool, error) {
// retrieve the old pool
oldPool := &mcfgv1.MachineConfigPool{}
getErr := backoff.RetryNotify(
func() error {
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: oldPoolName}, oldPool)
if apierrors.IsNotFound(err) {
// Can't recover from this
E2EFatalf(t, "Could not find the pool to modify")
}
// might be a transcient error
return err
},
defaultBackoff,
func(err error, interval time.Duration) {
E2ELogf(t, "error while getting MachineConfig pool to create sub-pool from: %s. Retrying after %s", err, interval)
})
if getErr != nil {
return nil, fmt.Errorf("couldn't get MachineConfigPool to create sub-pool from: %w", getErr)
}
// list the nodes matching the node selector
poolNodes, getnodesErr := getNodesWithSelector(f, oldPool.Spec.NodeSelector.MatchLabels)
if getnodesErr != nil {
return nil, getnodesErr
}
if len(poolNodes) == 0 {
return nil, errors.New("no nodes found with the old pool selector")
}
// just pick one of them and create the new pool out of that one-item node slice
return createMachineConfigPool(t, f, oldPoolName, newPoolName, poolNodes[:1])
}
// creates a new pool named newPoolName from a list of nodes
func createMachineConfigPool(t *testing.T, f *framework.Framework, oldPoolName, newPoolName string, nodes []corev1.Node) (*mcfgv1.MachineConfigPool, error) {
newPoolNodeLabel := fmt.Sprintf("node-role.kubernetes.io/%s", newPoolName)
err := labelNodes(t, f, newPoolNodeLabel, nodes)
if err != nil {
return nil, err
}
return createMCPObject(t, f, newPoolNodeLabel, oldPoolName, newPoolName)
}
func labelNodes(t *testing.T, f *framework.Framework, newPoolNodeLabel string, nodes []corev1.Node) error {
for _, node := range nodes {
nodeCopy := node.DeepCopy()
nodeCopy.Labels[newPoolNodeLabel] = ""
E2ELogf(t, "Adding label %s to node %s\n", newPoolNodeLabel, nodeCopy.Name)
updateErr := backoff.RetryNotify(
func() error {
return f.Client.Update(goctx.TODO(), nodeCopy)
},
defaultBackoff,
func(err error, interval time.Duration) {
E2ELogf(t, "error while labeling node: %s. Retrying after %s", err, interval)
})
if updateErr != nil {
E2ELogf(t, "Could not label node %s with %s\n", nodeCopy.Name, newPoolNodeLabel)
return fmt.Errorf("couldn't label node: %w", updateErr)
}
}
return nil
}
func unLabelNodes(t *testing.T, f *framework.Framework, rmPoolNodeLabel string, nodes []corev1.Node) error {
for _, node := range nodes {
nodeCopy := node.DeepCopy()
delete(nodeCopy.Labels, rmPoolNodeLabel)
E2ELogf(t, "Removing label %s from node %s\n", rmPoolNodeLabel, nodeCopy.Name)
err := f.Client.Update(goctx.TODO(), nodeCopy)
if err != nil {
E2ELogf(t, "Could not label node %s with %s\n", nodeCopy.Name, rmPoolNodeLabel)
return err
}
}
return nil
}
func createMCPObject(t *testing.T, f *framework.Framework, newPoolNodeLabel, oldPoolName, newPoolName string) (*mcfgv1.MachineConfigPool, error) {
nodeSelectorMatchLabel := make(map[string]string)
nodeSelectorMatchLabel[newPoolNodeLabel] = ""
newPool := &mcfgv1.MachineConfigPool{
ObjectMeta: metav1.ObjectMeta{Name: newPoolName},
Spec: mcfgv1.MachineConfigPoolSpec{
NodeSelector: &metav1.LabelSelector{
MatchLabels: nodeSelectorMatchLabel,
},
MachineConfigSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: mcfgv1.MachineConfigRoleLabelKey,
Operator: metav1.LabelSelectorOpIn,
Values: []string{oldPoolName, newPoolName},
},
},
},
},
}
// We create but don't clean up, we'll call a function for this since we need to
// re-label hosts first.
createErr := backoff.RetryNotify(
func() error {
err := f.Client.Create(goctx.TODO(), newPool, nil)
if apierrors.IsAlreadyExists(err) {
return nil
}
return err
},
defaultBackoff,
func(err error, interval time.Duration) {
E2ELogf(t, "error while labeling node: %s. Retrying after %s", err, interval)
})
if createErr != nil {
return newPool, fmt.Errorf("couldn't create MCP: %w", createErr)
}
return newPool, nil
}
func waitForPoolCondition(t *testing.T, f *framework.Framework, conditionType mcfgv1.MachineConfigPoolConditionType, newPoolName string) error {
return wait.PollImmediate(machineOperationRetryInterval, machineOperationTimeout, func() (bool, error) {
pool := mcfgv1.MachineConfigPool{}
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: newPoolName}, &pool)
if err != nil {
E2EErrorf(t, "Could not find the pool post update")
return false, err
}
if isMachineConfigPoolConditionTrue(pool.Status.Conditions, conditionType) {
return true, nil
}
E2ELogf(t, "The pool has not updated yet\n")
return false, nil
})
}
// isMachineConfigPoolConditionTrue returns true when the conditionType is present and set to `ConditionTrue`
func isMachineConfigPoolConditionTrue(conditions []mcfgv1.MachineConfigPoolCondition, conditionType mcfgv1.MachineConfigPoolConditionType) bool {
return IsMachineConfigPoolConditionPresentAndEqual(conditions, conditionType, corev1.ConditionTrue)
}
// IsMachineConfigPoolConditionPresentAndEqual returns true when conditionType is present and equal to status.
func IsMachineConfigPoolConditionPresentAndEqual(conditions []mcfgv1.MachineConfigPoolCondition, conditionType mcfgv1.MachineConfigPoolConditionType, status corev1.ConditionStatus) bool {
for _, condition := range conditions {
if condition.Type == conditionType {
return condition.Status == status
}
}
return false
}
func getPoolNodeRoleSelector() map[string]string {
return utils.GetNodeRoleSelector(testPoolName)
}
func assertMustHaveParsedProfiles(f *framework.Framework, name string, productType, productName string) error {
var pl compv1alpha1.ProfileList
lo := &client.ListOptions{
LabelSelector: labels.SelectorFromSet(map[string]string{
compv1alpha1.ProfileBundleOwnerLabel: name,
}),
}
if err := f.Client.List(goctx.TODO(), &pl, lo); err != nil {
return err
}
if len(pl.Items) <= 0 {
return fmt.Errorf("Profiles weren't parsed from the ProfileBundle. Expected more than one, got %d", len(pl.Items))
}
for _, prof := range pl.Items {
if prof.Annotations[compv1alpha1.ProductTypeAnnotation] != productType {
return fmt.Errorf("expected %s to be %s, got %s instead", compv1alpha1.ProductTypeAnnotation, productType, prof.Annotations[compv1alpha1.ProductTypeAnnotation])
}
if prof.Annotations[compv1alpha1.ProductAnnotation] != productName {
return fmt.Errorf("expected %s to be %s, got %s instead", compv1alpha1.ProductAnnotation, productName, prof.Annotations[compv1alpha1.ProductAnnotation])
}
}
return nil
}
func doesRuleExist(f *framework.Framework, namespace, ruleName string) (error, bool) {
return doesObjectExist(f, "Rule", namespace, ruleName)
}
func doesObjectExist(f *framework.Framework, kind, namespace, name string) (error, bool) {
obj := unstructured.Unstructured{}
obj.SetGroupVersionKind(schema.GroupVersionKind{
Group: compv1alpha1.SchemeGroupVersion.Group,
Version: compv1alpha1.SchemeGroupVersion.Version,
Kind: kind,
})
key := types.NamespacedName{Namespace: namespace, Name: name}
err := f.Client.Get(goctx.TODO(), key, &obj)
if apierrors.IsNotFound(err) {
return nil, false
} else if err == nil {
return nil, true
}
return err, false
}
func findRuleReference(profile *compv1alpha1.Profile, ruleName string) bool {
for _, ruleRef := range profile.Rules {
if string(ruleRef) == ruleName {
return true
}
}
return false
}
func waitForDeploymentContentUpdate(t *testing.T, f *framework.Framework, name, imgDigest string) error {
lo := &client.ListOptions{
LabelSelector: labels.SelectorFromSet(map[string]string{
"profile-bundle": name,
"workload": "profileparser",
}),
}
var depls appsv1.DeploymentList
var lastErr error
timeouterr := wait.Poll(retryInterval, timeout, func() (bool, error) {
lastErr = f.Client.List(goctx.TODO(), &depls, lo)
if lastErr != nil {
E2ELogf(t, "Retrying. Got error: %v\n", lastErr)
return false, nil
}
depl := depls.Items[0]
currentImg := depl.Spec.Template.Spec.InitContainers[0].Image
// The image will have a different path, but the digest should be the same
if !strings.HasSuffix(currentImg, imgDigest) {
E2ELogf(t, "Retrying. Content image isn't up-to-date yet in the Deployment\n")
return false, nil
}
return true, nil
})
// Error in function call
if lastErr != nil {
return lastErr
}
// Timeout
if timeouterr != nil {
return timeouterr
}
E2ELogf(t, "Profile parser Deployment updated\n")
var pods corev1.PodList
timeouterr = wait.Poll(retryInterval, timeout, func() (bool, error) {
lastErr = f.Client.List(goctx.TODO(), &pods, lo)
if lastErr != nil {
E2ELogf(t, "Retrying. Got error: %v\n", lastErr)
return false, nil
}
// Deployment updates will trigger a rolling update, so we might have
// more than one pod. We only care about the newest
pod := utils.FindNewestPod(pods.Items)
currentImg := pod.Spec.InitContainers[0].Image
if !strings.HasSuffix(currentImg, imgDigest) {
E2ELogf(t, "Retrying. Content image isn't up-to-date yet in the Pod\n")
return false, nil
}
if len(pod.Status.InitContainerStatuses) != 2 {
E2ELogf(t, "Retrying. Content parsing isn't done yet\n")
return false, nil
}
// The profileparser will take time, so we know it'll be index 1
ppStatus := pod.Status.InitContainerStatuses[1]
if !ppStatus.Ready {
E2ELogf(t, "Retrying. Content parsing isn't done yet (container not ready yet)\n")
return false, nil
}
return true, nil
})
// Error in function call
if lastErr != nil {
return lastErr
}
// Timeout
if timeouterr != nil {
return timeouterr
}
E2ELogf(t, "Profile parser Deployment Done\n")
return nil
}
func assertMustHaveParsedRules(f *framework.Framework, name string) error {
var rl compv1alpha1.RuleList
lo := &client.ListOptions{
LabelSelector: labels.SelectorFromSet(map[string]string{
compv1alpha1.ProfileBundleOwnerLabel: name,
}),
}
if err := f.Client.List(goctx.TODO(), &rl, lo); err != nil {
return err
}
if len(rl.Items) <= 0 {
return fmt.Errorf("Rules weren't parsed from the ProfileBundle. Expected more than one, got %d", len(rl.Items))
}
return nil
}
func scanHasValidPVCReference(f *framework.Framework, namespace, scanName string) error {
scan := &compv1alpha1.ComplianceScan{}
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: scanName, Namespace: namespace}, scan)
if err != nil {
return err
}
pvc := &corev1.PersistentVolumeClaim{}
pvcName := scan.Status.ResultsStorage.Name
pvcNamespace := scan.Status.ResultsStorage.Namespace
return f.Client.Get(goctx.TODO(), types.NamespacedName{Name: pvcName, Namespace: pvcNamespace}, pvc)
}
func waitForCronJobWithSchedule(t *testing.T, f *framework.Framework, namespace, suiteName, schedule string) error {
job := &batchv1beta1.CronJob{}
jobName := compsuitectrl.GetRerunnerName(suiteName)
var lastErr error
// retry and ignore errors until timeout
timeouterr := wait.Poll(retryInterval, timeout, func() (bool, error) {
lastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: jobName, Namespace: namespace}, job)
if lastErr != nil {
if apierrors.IsNotFound(lastErr) {
E2ELogf(t, "Waiting for availability of %s CronJob\n", jobName)
return false, nil
}
E2ELogf(t, "Retrying. Got error: %v\n", lastErr)
return false, nil
}
if job.Spec.Schedule != schedule {
E2ELogf(t, "Retrying. Schedule in found job (%s) doesn't match excpeted schedule: %s\n",
job.Spec.Schedule, schedule)
return false, nil
}
return true, nil
})
// Error in function call
if lastErr != nil {
return lastErr
}
// Timeout
if timeouterr != nil {
return timeouterr
}
E2ELogf(t, "Found %s CronJob\n", jobName)
return nil
}
func scanHasValidPVCReferenceWithSize(f *framework.Framework, namespace, scanName, size string) error {
scan := &compv1alpha1.ComplianceScan{}
err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: scanName, Namespace: namespace}, scan)
if err != nil {
return err
}
pvc := &corev1.PersistentVolumeClaim{}
pvcName := scan.Status.ResultsStorage.Name
pvcNamespace := scan.Status.ResultsStorage.Namespace
err = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: pvcName, Namespace: pvcNamespace}, pvc)
if err != nil {
return err
}
qty := resource.MustParse(size)
if qty.Cmp(*pvc.Status.Capacity.Storage()) != 0 {
expected := qty.String()
current := pvc.Status.Capacity.Storage().String()
return fmt.Errorf("Error: PVC '%s' storage doesn't match expected value. Has '%s', Expected '%s'", pvc.Name, current, expected)
}
return nil
}
func getRawResultClaimNameFromScan(t *testing.T, f *framework.Framework, namespace, scanName string) (string, error) {
scan := &compv1alpha1.ComplianceScan{}
key := types.NamespacedName{Name: scanName, Namespace: namespace}
E2ELogf(t, "Getting scan to fetch raw storage reference from it: %s/%s", namespace, scanName)
err := f.Client.Get(goctx.TODO(), key, scan)
if err != nil {
return "", err
}
referenceName := scan.Status.ResultsStorage.Name
if referenceName == "" {
return "", fmt.Errorf("ResultStorage reference in scan '%s' was empty", scanName)
}
return referenceName, nil
}
func getRotationCheckerWorkload(namespace, rawResultName string) *corev1.Pod {
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "rotation-checker",
Namespace: namespace,
},
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyOnFailure,
Containers: []corev1.Container{
{
Name: "checker",
Image: "registry.access.redhat.com/ubi8/ubi-minimal",
Command: []string{"/bin/bash", "-c", "ls /raw-results | grep -v 'lost+found'"},
VolumeMounts: []corev1.VolumeMount{
{
Name: "raw-results",
MountPath: "/raw-results",
ReadOnly: true,
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "raw-results",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: rawResultName,
ReadOnly: true,
},
},
},
},
},
}
}
func assertResultStorageHasExpectedItemsAfterRotation(t *testing.T, f *framework.Framework, expected int, namespace, checkerPodName string) error {
// wait for pod to be ready
pod := &corev1.Pod{}
key := types.NamespacedName{Name: checkerPodName, Namespace: namespace}
E2ELogf(t, "Waiting until the raw result checker workload is done: %s/%s", namespace, checkerPodName)
timeouterr := wait.Poll(retryInterval, timeout, func() (bool, error) {
err := f.Client.Get(goctx.TODO(), key, pod)
if err != nil {
E2ELogf(t, "Got an error while fetching the result checker workload. retrying: %s", err)
return false, nil
}
if pod.Status.Phase == corev1.PodSucceeded {
return true, nil
} else if pod.Status.Phase == corev1.PodFailed {
E2ELogf(t, "Pod failed!")
return true, fmt.Errorf("status checker pod failed unexpectedly: %s", pod.Status.Message)
}
E2ELogf(t, "Pod not done. retrying.")
return false, nil
})
if timeouterr != nil {
return timeouterr
}
logopts := &corev1.PodLogOptions{
Container: "checker",
}
E2ELogf(t, "raw result checker workload is done. Getting logs.")
req := f.KubeClient.CoreV1().Pods(namespace).GetLogs(checkerPodName, logopts)
podLogs, err := req.Stream(goctx.Background())
if err != nil {
return err
}
buf := new(bytes.Buffer)
_, err = io.Copy(buf, podLogs)
if err != nil {
return fmt.Errorf("error in copy information from podLogs to buffer")
}
logs := buf.String()
got := len(strings.Split(strings.Trim(logs, "\n"), "\n"))
if got != expected {
return fmt.Errorf(
"Unexpected number of directories came from the result checker.\n"+
" Expected: %d. Got: %d. Output:\n%s", expected, got, logs)
}
E2ELogf(t, "raw result checker's output matches rotation policy.")
return nil
}
// privCommandTuplePodOnHost returns a pod that calls commandPre in an init container, then sleeps for an hour
// and registers commandPost to be run in a PreStop handler.
func privCommandTuplePodOnHost(namespace, name, nodeName, commandPre string, commandPost []string) *corev1.Pod {
runAs := int64(0)
priv := true
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: corev1.PodSpec{
InitContainers: []corev1.Container{
{
Name: name + "-init",
Image: "busybox",
Command: []string{"/bin/sh"},
Args: []string{"-c", commandPre},
VolumeMounts: []corev1.VolumeMount{
{
Name: "hostroot",
MountPath: "/hostroot",
},
},
SecurityContext: &corev1.SecurityContext{
Privileged: &priv,
RunAsUser: &runAs,
},
},
},
Containers: []corev1.Container{
{
Name: name,
Image: "busybox",
Command: []string{"/bin/sh"},
Args: []string{"-c", "sleep 3600"},
VolumeMounts: []corev1.VolumeMount{
{
Name: "hostroot",
MountPath: "/hostroot",
},
},
SecurityContext: &corev1.SecurityContext{
Privileged: &priv,
RunAsUser: &runAs,
},
Lifecycle: &corev1.Lifecycle{
PreStop: &corev1.Handler{
Exec: &corev1.ExecAction{Command: commandPost},
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "hostroot",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/",
},
},
},
},
RestartPolicy: "Never",
NodeSelector: map[string]string{
corev1.LabelHostname: nodeName,
},
ServiceAccountName: "resultscollector",
},
}
}
// Creates a file /etc/securetty on the pod in an init container, then sleeps. The function returns the pod which
// the caller can later delete, at that point, the file would be removed
func createAndRemoveEtcSecurettyPod(namespace, name, nodeName string) *corev1.Pod {
return privCommandTuplePodOnHost(namespace, name, nodeName, "touch /hostroot/etc/securetty", []string{"rm", "-f", "/hostroot/etc/securetty"})
}
func waitForPod(podCallback wait.ConditionFunc) error {
return wait.PollImmediate(retryInterval, timeout, podCallback)
}
// initContainerComplated returns a ConditionFunc that passes if all init containers have succeeded
func initContainerCompleted(t *testing.T, c kubernetes.Interface, name, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(goctx.TODO(), name, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
if apierrors.IsNotFound(err) {
E2ELogf(t, "Pod %s not found yet", name)
return false, nil
}
for _, initStatus := range pod.Status.InitContainerStatuses {
E2ELog(t, initStatus)
// the init container must have passed the readiness probe
if initStatus.Ready == false {
E2ELog(t, "Init container not ready yet")
return false, nil
}
// the init container must have terminated
if initStatus.State.Terminated == nil {
E2ELog(t, "Init container did not terminate yet")
return false, nil
}
if initStatus.State.Terminated.ExitCode != 0 {
return true, errors.New("the init container failed")
} else {
E2ELogf(t, "init container in pod %s has finished", name)
return true, nil
}
}
E2ELogf(t, "init container in pod %s not finished yet", name)
return false, nil
}
}
func runPod(t *testing.T, f *framework.Framework, namespace string, podToRun *corev1.Pod) (*corev1.Pod, error) {
pod, err := f.KubeClient.CoreV1().Pods(namespace).Create(goctx.TODO(), podToRun, metav1.CreateOptions{})
if err != nil {
return nil, err
}
if err := waitForPod(initContainerCompleted(t, f.KubeClient, pod.Name, namespace)); err != nil {
return nil, err
}
return pod, nil
}
// createAndRemoveEtcSecurettyOnNode creates a pod that creates the file /etc/securetty on node, returns the pod
// object for the caller to delete at which point the pod, before exiting, removes the file
func createAndRemoveEtcSecurettyOnNode(t *testing.T, f *framework.Framework, namespace, name, nodeName string) (*corev1.Pod, error) {
return runPod(t, f, namespace, createAndRemoveEtcSecurettyPod(namespace, name, nodeName))
}
func taintNode(t *testing.T, f *framework.Framework, node *corev1.Node, taint corev1.Taint) error {
taintedNode := node.DeepCopy()
if taintedNode.Spec.Taints == nil {
taintedNode.Spec.Taints = []corev1.Taint{}
}
taintedNode.Spec.Taints = append(taintedNode.Spec.Taints, taint)
E2ELogf(t, "Tainting node: %s", taintedNode.Name)
return f.Client.Update(goctx.TODO(), taintedNode)
}
func removeNodeTaint(t *testing.T, f *framework.Framework, nodeName, taintKey string) error {
taintedNode := &corev1.Node{}
nodeKey := types.NamespacedName{Name: nodeName}
if err := f.Client.Get(goctx.TODO(), nodeKey, taintedNode); err != nil {
E2ELogf(t, "Couldn't get node: %s", nodeName)
return err
}
untaintedNode := taintedNode.DeepCopy()
untaintedNode.Spec.Taints = []corev1.Taint{}
for _, taint := range taintedNode.Spec.Taints {
if taint.Key != taintKey {
untaintedNode.Spec.Taints = append(untaintedNode.Spec.Taints, taint)
}
}
E2ELogf(t, "Removing taint from node: %s", nodeName)
return f.Client.Update(goctx.TODO(), untaintedNode)
}
func getReadyProfileBundle(t *testing.T, f *framework.Framework, name, namespace string) (*compv1alpha1.ProfileBundle, error) {
if err := waitForProfileBundleStatus(t, f, namespace, name, compv1alpha1.DataStreamValid); err != nil {
return nil, err
}
pb := &compv1alpha1.ProfileBundle{}
if err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, pb); err != nil {
return nil, err
}
return pb, nil
}
func writeToArtifactsDir(dir, scan, pod, container, log string) error {
logPath := path.Join(dir, fmt.Sprintf("%s_%s_%s.log", scan, pod, container))
logFile, err := os.Create(logPath)
if err != nil {
return err
}
// #nosec G307
defer logFile.Close()
_, err = io.WriteString(logFile, log)
if err != nil {
return err
}
logFile.Sync()
return nil
}
func logContainerOutput(t *testing.T, f *framework.Framework, namespace, name string) {
if shouldLogContainerOutput == false {
return
}
// Try all container/init variants for each pod and the pod itself (self), log nothing if the container is not applicable.
containers := []string{"self", "api-resource-collector", "log-collector", "scanner", "content-container"}
artifacts := os.Getenv("ARTIFACT_DIR")
if artifacts == "" {
return
}
pods, err := getPodsForScan(f, name)
if err != nil {
E2ELogf(t, "Warning: Error getting pods for container logging: %s", err)
} else {
for _, pod := range pods {
for _, con := range containers {
logOpts := &corev1.PodLogOptions{}
if con != "self" {
logOpts.Container = con
}
req := f.KubeClient.CoreV1().Pods(namespace).GetLogs(pod.Name, logOpts)
podLogs, err := req.Stream(goctx.TODO())
if err != nil {
// Silence this error if the container is not valid for the pod
if !apierrors.IsBadRequest(err) {
E2ELogf(t, "error getting logs for %s/%s: reason: %v, err: %v", pod.Name, con, apierrors.ReasonForError(err), err)
}
continue
}
buf := new(bytes.Buffer)
_, err = io.Copy(buf, podLogs)
if err != nil {
E2ELogf(t, "error copying logs for %s/%s: %v", pod.Name, con, err)
continue
}
logs := buf.String()
if len(logs) == 0 {
E2ELogf(t, "no logs for %s/%s", pod.Name, con)
} else {
err := writeToArtifactsDir(artifacts, name, pod.Name, con, logs)
if err != nil {
E2ELogf(t, "error writing logs for %s/%s: %v", pod.Name, con, err)
} else {
E2ELogf(t, "wrote logs for %s/%s", pod.Name, con)
}
}
}
}
}
}
func reRunScan(t *testing.T, f *framework.Framework, scanName, namespace string) error {
scanKey := types.NamespacedName{Name: scanName, Namespace: namespace}
err := backoff.Retry(func() error {
foundScan := &compv1alpha1.ComplianceScan{}
geterr := f.Client.Get(goctx.TODO(), scanKey, foundScan)
if geterr != nil {
return geterr
}
scapCopy := foundScan.DeepCopy()
if scapCopy.Annotations == nil {
scapCopy.Annotations = make(map[string]string)
}
scapCopy.Annotations[compv1alpha1.ComplianceScanRescanAnnotation] = ""
return f.Client.Update(goctx.TODO(), scapCopy)
}, defaultBackoff)
if err != nil {
return fmt.Errorf("couldn't update scan to re-launch it: %w", err)
}
E2ELogf(t, "Scan re-launched")
return nil
}
func createImageStream(f *framework.Framework, ctx *framework.Context, iSName, ns, imgPath string) error {
stream := &imagev1.ImageStream{
TypeMeta: metav1.TypeMeta{APIVersion: imagev1.SchemeGroupVersion.String(), Kind: "ImageStream"},
ObjectMeta: metav1.ObjectMeta{Name: iSName, Namespace: ns},
Spec: imagev1.ImageStreamSpec{
Tags: []imagev1.TagReference{
{
Name: "latest",
From: &corev1.ObjectReference{
Kind: "DockerImage",
Name: imgPath,
},
ReferencePolicy: imagev1.TagReferencePolicy{
Type: imagev1.LocalTagReferencePolicy,
},
},
},
},
}
return f.Client.Create(goctx.TODO(), stream, getCleanupOpts(ctx))
}
func updateImageStreamTag(f *framework.Framework, iSName, ns, imgPath string) error {
foundstream := &imagev1.ImageStream{}
key := types.NamespacedName{Name: iSName, Namespace: ns}
if err := f.Client.Get(goctx.TODO(), key, foundstream); err != nil {
return err
}
stream := foundstream.DeepCopy()
// Updated tracked image reference
stream.Spec.Tags[0].From.Name = imgPath
return f.Client.Update(goctx.TODO(), stream)
}
func updateSuiteContentImage(t *testing.T, f *framework.Framework, newImg, suiteName, suiteNs string) error {
var lastErr error
timeoutErr := wait.Poll(retryInterval, timeout, func() (bool, error) {
suite := &compv1alpha1.ComplianceSuite{}
// Now update the suite with a different image that contains different remediations
lastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: suiteName, Namespace: suiteNs}, suite)
if lastErr != nil {
E2ELogf(t, "Got error while trying to get suite %s. Retrying... - %s", suiteName, lastErr)
return false, nil
}
modSuite := suite.DeepCopy()
modSuite.Spec.Scans[0].ContentImage = newImg
lastErr = f.Client.Update(goctx.TODO(), modSuite)
if lastErr != nil {
E2ELogf(t, "Got error while trying to update suite %s. Retrying... - %s", suiteName, lastErr)
return false, nil
}
return true, nil
})
if timeoutErr != nil {
return fmt.Errorf("couldn't update suite's content image. Timed out: %w", timeoutErr)
}
if lastErr != nil {
return fmt.Errorf("couldn't update suite's content image. Errored out: %w", lastErr)
}
return nil
}
func assertNoErrorNorTimeout(t *testing.T, err, timeoutErr error, message string) {
if finalErr := processErrorOrTimeout(err, timeoutErr, message); finalErr != nil {
E2EFatalf(t, "%s", finalErr)
}
}
func processErrorOrTimeout(err, timeoutErr error, message string) error {
// Error in function call
if err != nil {
return fmt.Errorf("Got error when %s: %w", message, err)
}
// Timeout
if timeoutErr != nil {
return fmt.Errorf("Timed out when %s: %w", message, timeoutErr)
}
return nil
}
|
[
"\"CONTENT_IMAGE\"",
"\"LOG_CONTAINER_OUTPUT\"",
"\"BROKEN_CONTENT_IMAGE\"",
"\"ARTIFACT_DIR\""
] |
[] |
[
"BROKEN_CONTENT_IMAGE",
"CONTENT_IMAGE",
"ARTIFACT_DIR",
"LOG_CONTAINER_OUTPUT"
] |
[]
|
["BROKEN_CONTENT_IMAGE", "CONTENT_IMAGE", "ARTIFACT_DIR", "LOG_CONTAINER_OUTPUT"]
|
go
| 4 | 0 | |
main.go
|
package main
import (
"context"
"fmt"
"log"
"os"
"os/signal"
"strings"
"syscall"
"cloud.google.com/go/translate"
"github.com/bwmarrin/discordgo"
"github.com/joho/godotenv"
"golang.org/x/text/language"
)
var (
ctx context.Context
client *translate.Client
)
func init() {
err := godotenv.Load(".env")
if err != nil {
log.Fatal(err)
}
ctx = context.Background()
client, err = translate.NewClient(ctx)
defer client.Close()
if err != nil {
log.Fatal(err)
}
}
func main() {
botToken := os.Getenv("DISCORD_BOT_TOKEN")
if botToken == "" {
log.Fatal("DISCORD_BOT_TOKEN is not set.")
}
dg, err := discordgo.New("Bot " + botToken)
if err != nil {
log.Fatal(err)
}
defer dg.Close()
dg.AddHandler(messageCreate)
dg.Identify.Intents = discordgo.IntentsGuildMessages
err = dg.Open()
if err != nil {
log.Fatal(err)
}
fmt.Println("Bot is now running. Press CTRL-C to exit.")
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
}
func messageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {
if m.Author.ID == s.State.User.ID {
return
}
roles, err := s.GuildRoles(m.GuildID)
if err != nil {
return
}
var detectedRole *discordgo.Role
for _, role := range roles {
if role.Name == "translation" {
detectedRole = role
break
}
}
channel, err := s.Channel(m.ChannelID)
if err != nil {
return
}
isSendable := false
for _, permission := range channel.PermissionOverwrites {
if permission.ID == detectedRole.ID && permission.Type == discordgo.PermissionOverwriteTypeRole && permission.Allow == 2048 {
isSendable = true
break
}
}
if isSendable == false {
return
}
mes := m.Content
lang, err := DetectLanguage(mes)
if err != nil {
return
}
trans := Translate(mes, lang)
s.ChannelMessageSend(m.ChannelID, trans)
}
func DetectLanguage(mes string) (language.Tag, error) {
lang, err := client.DetectLanguage(ctx, []string{mes})
if err != nil {
log.Fatal(err)
}
if len(lang) == 0 || len(lang[0]) == 0 {
return language.English, fmt.Errorf("DetectLanguage return value empty")
}
return lang[0][0].Language, nil
}
func Translate(mes string, lang language.Tag) string {
targetLang := language.English
option := &translate.Options{
Source: language.Japanese,
Format: translate.Text,
}
if lang != language.Japanese {
targetLang = language.Japanese
option.Source = lang
}
res, err := client.Translate(ctx, []string{mes}, targetLang, option)
if err != nil {
log.Fatal(err)
}
trans := []string{}
for _, r := range res {
trans = append(trans, r.Text)
}
return strings.Join(trans, "\n")
}
|
[
"\"DISCORD_BOT_TOKEN\""
] |
[] |
[
"DISCORD_BOT_TOKEN"
] |
[]
|
["DISCORD_BOT_TOKEN"]
|
go
| 1 | 0 | |
kweetservice/kweetservice/wsgi.py
|
"""
WSGI config for kweetservice project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kweetservice.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
wsgi.py
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/nginx/integration/integration_suite_test.go
|
package integration_test
import (
"encoding/json"
"flag"
"fmt"
"net/url"
"os"
"os/exec"
"path/filepath"
"time"
"github.com/cloudfoundry/libbuildpack/cutlass"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
var bpDir string
var buildpackVersion string
var packagedBuildpack cutlass.VersionedBuildpackPackage
func init() {
flag.StringVar(&buildpackVersion, "version", "", "version to use (builds if empty)")
flag.BoolVar(&cutlass.Cached, "cached", true, "cached buildpack")
flag.StringVar(&cutlass.DefaultMemory, "memory", "64M", "default memory for pushed apps")
flag.StringVar(&cutlass.DefaultDisk, "disk", "64M", "default disk for pushed apps")
flag.Parse()
}
var _ = SynchronizedBeforeSuite(func() []byte {
// Run once
if buildpackVersion == "" {
packagedBuildpack, err := cutlass.PackageUniquelyVersionedBuildpack(os.Getenv("CF_STACK"), ApiHasStackAssociation())
Expect(err).NotTo(HaveOccurred())
data, err := json.Marshal(packagedBuildpack)
Expect(err).NotTo(HaveOccurred())
return data
}
return []byte{}
}, func(data []byte) {
// Run on all nodes
var err error
if len(data) > 0 {
err = json.Unmarshal(data, &packagedBuildpack)
Expect(err).NotTo(HaveOccurred())
buildpackVersion = packagedBuildpack.Version
}
bpDir, err = cutlass.FindRoot()
Expect(err).NotTo(HaveOccurred())
Expect(cutlass.CopyCfHome()).To(Succeed())
cutlass.SeedRandom()
cutlass.DefaultStdoutStderr = GinkgoWriter
SetDefaultEventuallyTimeout(10 * time.Second)
})
var _ = SynchronizedAfterSuite(func() {
// Run on all nodes
}, func() {
// Run once
Expect(cutlass.RemovePackagedBuildpack(packagedBuildpack)).To(Succeed())
Expect(cutlass.DeleteOrphanedRoutes()).To(Succeed())
})
func TestIntegration(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Integration Suite")
}
func PushAppAndConfirm(app *cutlass.App) {
Expect(app.Push()).To(Succeed())
Eventually(func() ([]string, error) { return app.InstanceStates() }, 20*time.Second).Should(Equal([]string{"RUNNING"}))
Expect(app.ConfirmBuildpack(buildpackVersion)).To(Succeed())
}
func Restart(app *cutlass.App) {
Expect(app.Restart()).To(Succeed())
Eventually(func() ([]string, error) { return app.InstanceStates() }, 20*time.Second).Should(Equal([]string{"RUNNING"}))
}
func ApiHasTask() bool {
supported, err := cutlass.ApiGreaterThan("2.75.0")
Expect(err).NotTo(HaveOccurred())
return supported
}
func ApiHasMultiBuildpack() bool {
supported, err := cutlass.ApiGreaterThan("2.90.0")
Expect(err).NotTo(HaveOccurred())
return supported
}
func ApiHasStackAssociation() bool {
supported, err := cutlass.ApiGreaterThan("2.113.0")
Expect(err).NotTo(HaveOccurred())
return supported
}
func AssertUsesProxyDuringStagingIfPresent(fixtureName string) {
Context("with an uncached buildpack", func() {
BeforeEach(func() {
if cutlass.Cached {
Skip("Running cached tests")
}
})
It("uses a proxy during staging if present", func() {
proxy, err := cutlass.NewProxy()
Expect(err).To(BeNil())
defer proxy.Close()
bpFile := filepath.Join(bpDir, buildpackVersion+"tmp")
cmd := exec.Command("cp", packagedBuildpack.File, bpFile)
err = cmd.Run()
Expect(err).To(BeNil())
defer os.Remove(bpFile)
traffic, built, _, err := cutlass.InternetTraffic(
bpDir,
filepath.Join("fixtures", fixtureName),
bpFile,
[]string{"HTTP_PROXY=" + proxy.URL, "HTTPS_PROXY=" + proxy.URL},
)
Expect(err).To(BeNil())
Expect(built).To(BeTrue())
destUrl, err := url.Parse(proxy.URL)
Expect(err).To(BeNil())
Expect(cutlass.UniqueDestination(
traffic, fmt.Sprintf("%s.%s", destUrl.Hostname(), destUrl.Port()),
)).To(BeNil())
})
})
}
func AssertNoInternetTraffic(fixtureName string) {
It("has no traffic", func() {
if !cutlass.Cached {
Skip("Running uncached tests")
}
bpFile := filepath.Join(bpDir, buildpackVersion+"tmp")
cmd := exec.Command("cp", packagedBuildpack.File, bpFile)
err := cmd.Run()
Expect(err).To(BeNil())
defer os.Remove(bpFile)
traffic, built, _, err := cutlass.InternetTraffic(
bpDir,
filepath.Join("fixtures", fixtureName),
bpFile,
[]string{},
)
Expect(err).To(BeNil())
Expect(built).To(BeTrue())
Expect(traffic).To(BeEmpty())
})
}
|
[
"\"CF_STACK\""
] |
[] |
[
"CF_STACK"
] |
[]
|
["CF_STACK"]
|
go
| 1 | 0 | |
pkg/tests/helpers.go
|
package tests
import (
"fmt"
"io"
"io/ioutil"
"os"
"strings"
)
func IsDebugLog() bool {
return strings.ToLower(os.Getenv("JX_TEST_DEBUG")) == "true"
}
func Debugf(message string, args ...interface{}) {
if IsDebugLog() {
fmt.Printf(message, args...)
}
}
// Output returns the output to use for tests
func Output() io.Writer {
if IsDebugLog() {
return os.Stdout
}
return ioutil.Discard
}
|
[
"\"JX_TEST_DEBUG\""
] |
[] |
[
"JX_TEST_DEBUG"
] |
[]
|
["JX_TEST_DEBUG"]
|
go
| 1 | 0 | |
dds_cli/user.py
|
"""User module."""
###############################################################################
# IMPORTS ########################################################### IMPORTS #
###############################################################################
# Standard library
import datetime
import getpass
import logging
import os
import pathlib
import requests
import simplejson
import stat
import subprocess
# Installed
import rich
# Own modules
import dds_cli
from dds_cli import exceptions
from dds_cli.utils import get_token_expiration_time, readable_timedelta
import dds_cli.utils
###############################################################################
# START LOGGING CONFIG ################################# START LOGGING CONFIG #
###############################################################################
LOG = logging.getLogger(__name__)
###############################################################################
# CLASSES ########################################################### CLASSES #
###############################################################################
class User:
"""Represents a DDS user.
when instantiating, an authentication token will be read from a file or
renewed from the DDS API if the saved token is not found or has expired."""
def __init__(
self,
force_renew_token: bool = False,
no_prompt: bool = False,
token_path: str = None,
):
self.force_renew_token = force_renew_token
self.no_prompt = no_prompt
self.token = None
self.token_path = token_path
# Fetch encrypted JWT token or authenticate against API
self.__retrieve_token()
@property
def token_dict(self):
"""Get token as authorization dict for requests."""
return {"Authorization": f"Bearer {self.token}"}
# Private methods ######################### Private methods #
def __retrieve_token(self):
"""Fetch saved token from file otherwise authenticate user and saves the new token."""
token_file = TokenFile(token_path=self.token_path)
if not self.force_renew_token:
LOG.debug("Retrieving token.")
# Get token from file
try:
LOG.debug("Checking if token file exists.")
self.token = token_file.read_token()
except dds_cli.exceptions.TokenNotFoundError:
self.token = None
# Authenticate user and save token
if not self.token:
if not self.force_renew_token:
LOG.info(
"No saved token found, or token has expired, proceeding with authentication"
)
else:
LOG.info("Attempting to create the session token")
self.token = self.__authenticate_user()
token_file.save_token(self.token)
def __authenticate_user(self):
"""Authenticates the username and password via a call to the API."""
LOG.debug("Starting authentication on the API.")
if self.no_prompt:
raise exceptions.AuthenticationError(
message=(
"Authentication not possible when running with --no-prompt. "
"Please run the `dds auth login` command and authenticate interactively."
)
)
username = rich.prompt.Prompt.ask("DDS username")
password = getpass.getpass(prompt="DDS password: ")
if password == "":
raise exceptions.AuthenticationError(
message="Non-empty password needed to be able to authenticate."
)
response_json = dds_cli.utils.perform_request(
dds_cli.DDSEndpoint.ENCRYPTED_TOKEN,
headers=None,
method="get",
auth=(username, password),
error_message="Failed to authenticate user",
)
# Token received from API needs to be completed with a mfa timestamp
partial_auth_token = response_json.get("token")
# Verify 2fa email token
LOG.info(
"Please enter the one-time authentication code sent "
"to your email address (leave empty to exit):"
)
done = False
while not done:
entered_one_time_code = rich.prompt.Prompt.ask("Authentication one-time code")
if entered_one_time_code == "":
raise exceptions.AuthenticationError(
message="Exited due to no one-time authentication code entered."
)
if not entered_one_time_code.isdigit():
LOG.info("Please enter a valid one-time code. It should consist of only digits.")
continue
if len(entered_one_time_code) != 8:
LOG.info(
"Please enter a valid one-time code. It should consist of 8 digits "
f"(you entered {len(entered_one_time_code)} digits)."
)
continue
response_json = dds_cli.utils.perform_request(
dds_cli.DDSEndpoint.SECOND_FACTOR,
method="get",
headers={"Authorization": f"Bearer {partial_auth_token}"},
json={"HOTP": entered_one_time_code},
error_message="Failed to authenticate with second factor",
)
# Step out of the while-loop
done = True
# Get token from response
token = response_json.get("token")
if not token:
raise exceptions.AuthenticationError(
message="Missing token in authentication response."
)
LOG.debug(f"User {username} granted access to the DDS")
return token
@staticmethod
def get_user_name_if_logged_in(token_path=None):
"""Returns a user name if logged in, otherwise None"""
tokenfile = TokenFile(token_path=token_path)
username = None
if tokenfile.file_exists():
token = tokenfile.read_token()
if token and not tokenfile.token_expired(token=token):
try:
response_json = dds_cli.utils.perform_request(
dds_cli.DDSEndpoint.DISPLAY_USER_INFO,
method="get",
headers={"Authorization": f"Bearer {token}"},
error_message="Failed to get a username",
)
# Get response
username = response_json["info"]["username"]
except:
pass
return username
class TokenFile:
"""A class to manage the saved token."""
def __init__(self, token_path=None):
if token_path is None:
self.token_file = dds_cli.TOKEN_FILE
else:
self.token_file = pathlib.Path(os.path.expanduser(token_path))
def read_token(self):
"""Attempts to fetch a valid token from the token file.
Returns None if no valid token can be found."""
if not self.file_exists():
LOG.debug(f"Token file {self.token_file} does not exist.")
return None
self.check_token_file_permissions()
# Read token from file
with self.token_file.open(mode="r") as file:
token = file.read()
if not token:
raise exceptions.TokenNotFoundError(message="Token file is empty.")
if self.token_expired(token=token):
LOG.debug("No token retrieved from file, will fetch new token from API")
return None
LOG.debug("Token retrieved from file.")
return token
def file_exists(self):
"""Returns True if the token file exists."""
return self.token_file.is_file()
def save_token(self, token):
"""Saves the token to the token file."""
if not self.token_file.is_file():
self.token_file.touch(mode=0o600)
self.check_token_file_permissions()
with self.token_file.open("w") as file:
file.write(token)
if os.name == "nt":
cli_username = os.environ.get("USERNAME")
try:
subprocess.check_call(
[
"icacls.exe",
str(self.token_file),
"/inheritance:r",
"/grant",
f"{cli_username}:(R,W)",
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
except subprocess.CalledProcessError as exc:
LOG.error("Failed to set token file permissions")
LOG.debug("New token saved to file.")
def delete_token(self):
"""Deletes the token file."""
if self.file_exists():
self.token_file.unlink()
def check_token_file_permissions(self):
"""Verify permissions for token file. Raises dds_cli.exceptions.DDSCLIException if
permissions are not properly set.
Returns None otherwise.
"""
if os.name != "nt":
st_mode = os.stat(self.token_file).st_mode
permissions_octal = oct(stat.S_IMODE(st_mode))
permissions_readable = stat.filemode(st_mode)
if permissions_octal != "0o600":
raise exceptions.DDSCLIException(
message=f"Token file permissions are not properly set, (got {permissions_readable} instead of required '-rw-------'). Please remove {self.token_file} and rerun the command."
)
else:
LOG.info("Unable to confirm whether file permissions are correct on Windows.")
def token_expired(self, token):
"""Check if the token has expired or is about to expire soon based on the UTC time.
:param token: The DDS token that is obtained after successful basic and two-factor authentication.
Token is already obtained before coming here, so not expected to be None.
Returns True if the token has expired, False otherwise.
"""
expiration_time = self.__token_dates(token=token)
time_to_expire = expiration_time - datetime.datetime.utcnow()
if expiration_time <= datetime.datetime.utcnow():
LOG.debug("Token has expired. Now deleting it and fetching new token.")
self.delete_token()
return True
elif time_to_expire < dds_cli.TOKEN_EXPIRATION_WARNING_THRESHOLD:
LOG.warning(
f"Saved token will expire in {readable_timedelta(time_to_expire)}, "
f"please consider renewing the session using the 'dds auth login' command."
)
return False
def token_report(self, token):
"""Produce report of token status.
:param token: The DDS token that is obtained after successful basic and two-factor authentication.
Token is already obtained before coming here, so not expected to be None.
"""
expiration_time = self.__token_dates(token=token)
time_to_expire = expiration_time - datetime.datetime.utcnow()
expiration_message = f"Token will expire in {readable_timedelta(time_to_expire)}!"
if expiration_time <= datetime.datetime.utcnow():
markup_color = "red"
sign = ":no_entry_sign:"
message = "Token has expired!"
elif time_to_expire < dds_cli.TOKEN_EXPIRATION_WARNING_THRESHOLD:
markup_color = "yellow"
sign = ":warning-emoji:"
message = ""
else:
markup_color = "green"
sign = ":white_check_mark:"
message = "Token is OK!"
if message:
LOG.info(f"[{markup_color}]{sign} {message} {sign} [/{markup_color}]")
LOG.info(f"[{markup_color}]{sign} {expiration_message} {sign} [/{markup_color}]")
# Private methods ############################################################ Private methods #
def __token_dates(self, token):
"""Returns the expiration time in UTC that is extracted from the token jose header."""
expiration_time = get_token_expiration_time(token=token)
if expiration_time:
return datetime.datetime.fromisoformat(expiration_time)
|
[] |
[] |
[
"USERNAME"
] |
[]
|
["USERNAME"]
|
python
| 1 | 0 | |
modules/orchestrator/scheduler/scheduler_utils.go
|
// Copyright (c) 2021 Terminus, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scheduler
import (
"bytes"
"encoding/json"
"os"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/erda-project/erda/pkg/http/httpclient"
)
// call this in form of goroutine
func GetDCOSTokenAuthPeriodically() {
client := httpclient.New()
waitTime := 10 * time.Millisecond
for {
select {
case <-time.After(waitTime):
token, err := getTokenAuthAndSetEnv(client)
if err != nil {
waitTime = 2 * time.Minute
os.Setenv("AUTH_TOKEN", "")
logrus.Errorf("get auth token error: %v", err)
break
}
// Update every 24 hours
waitTime = 24 * time.Hour
if len(token) > 0 {
os.Setenv("AUTH_TOKEN", token)
logrus.Debugf("get auth token: %s", token)
} else {
// If err is nil and token is empty, it means that the user has not set token auth
os.Unsetenv("AUTH_TOKEN")
logrus.Debugf("clear auth token")
}
}
}
}
func getTokenAuthAndSetEnv(client *httpclient.HTTPClient) (string, error) {
dcosAddr := os.Getenv("DCOS_ADDR")
id := os.Getenv("DCOS_UID")
password := os.Getenv("DCOS_PASSWORD")
// uid and password required
if len(id) == 0 || len(password) == 0 {
return "", nil
}
// dcosAddr is optional, default is internal dcos cluster addr
if len(dcosAddr) == 0 {
dcosAddr = "master.mesos"
}
logrus.Debugf("id: %v, password: %v, dcosAddr: %v", id, password, dcosAddr)
var b bytes.Buffer
type IdAndPassword struct {
Uid string `json:"uid"`
Password string `json:"password"`
}
requestIdAndPwd := IdAndPassword{
Uid: id,
Password: password,
}
type Token struct {
AuthToken string `json:"token,omitempty"`
}
var token Token
resp, err := client.Post(dcosAddr).
Path("/acs/api/v1/auth/login").
JSONBody(&requestIdAndPwd).
Header("Content-Type", "application/json").
Do().
Body(&b)
if err != nil {
return "", err
}
if !resp.IsOK() {
return "", errors.Errorf("get token auth status code: %v, body: %v", resp.StatusCode(), b.String())
}
r := bytes.NewReader(b.Bytes())
if err := json.NewDecoder(r).Decode(&token); err != nil {
return "", err
}
return token.AuthToken, nil
}
|
[
"\"DCOS_ADDR\"",
"\"DCOS_UID\"",
"\"DCOS_PASSWORD\""
] |
[] |
[
"DCOS_ADDR",
"DCOS_PASSWORD",
"DCOS_UID"
] |
[]
|
["DCOS_ADDR", "DCOS_PASSWORD", "DCOS_UID"]
|
go
| 3 | 0 | |
test/integration/maincluster_test.go
|
package integration
import (
"fmt"
"log"
"os"
"reflect"
"regexp"
"strings"
"testing"
"github.com/kubernetes-incubator/kube-aws/cfnstack"
controlplane_config "github.com/kubernetes-incubator/kube-aws/core/controlplane/config"
"github.com/kubernetes-incubator/kube-aws/core/root"
"github.com/kubernetes-incubator/kube-aws/core/root/config"
"github.com/kubernetes-incubator/kube-aws/model"
"github.com/kubernetes-incubator/kube-aws/plugin/pluginmodel"
"github.com/kubernetes-incubator/kube-aws/test/helper"
)
type ConfigTester func(c *config.Config, t *testing.T)
type ClusterTester func(c root.Cluster, t *testing.T)
// Integration testing with real AWS services including S3, KMS, CloudFormation
func TestMainClusterConfig(t *testing.T) {
kubeAwsSettings := newKubeAwsSettingsFromEnv(t)
s3URI, s3URIExists := os.LookupEnv("KUBE_AWS_S3_DIR_URI")
if !s3URIExists || s3URI == "" {
s3URI = "s3://mybucket/mydir"
t.Logf(`Falling back s3URI to a stub value "%s" for tests of validating stack templates. No assets will actually be uploaded to S3`, s3URI)
} else {
log.Printf("s3URI is %s", s3URI)
}
s3Loc, err := cfnstack.S3URIFromString(s3URI)
if err != nil {
t.Errorf("failed to parse s3 uri: %v", err)
t.FailNow()
}
s3Bucket := s3Loc.Bucket()
s3Dir := s3Loc.PathComponents()[0]
firstAz := kubeAwsSettings.region + "c"
hasDefaultEtcdSettings := func(c *config.Config, t *testing.T) {
subnet1 := model.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
expected := controlplane_config.EtcdSettings{
Etcd: model.Etcd{
EC2Instance: model.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
Tenancy: "default",
RootVolume: model.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
},
DataVolume: model.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Subnets: model.Subnets{
subnet1,
},
},
}
actual := c.EtcdSettings
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"EtcdSettings didn't match: expected=%v actual=%v",
expected,
actual,
)
}
}
hasDefaultExperimentalFeatures := func(c *config.Config, t *testing.T) {
expected := controlplane_config.Experimental{
Admission: controlplane_config.Admission{
PodSecurityPolicy: controlplane_config.PodSecurityPolicy{
Enabled: false,
},
AlwaysPullImages: controlplane_config.AlwaysPullImages{
Enabled: false,
},
DenyEscalatingExec: controlplane_config.DenyEscalatingExec{
Enabled: false,
},
Priority: controlplane_config.Priority{
Enabled: false,
},
MutatingAdmissionWebhook: controlplane_config.MutatingAdmissionWebhook{
Enabled: false,
},
ValidatingAdmissionWebhook: controlplane_config.ValidatingAdmissionWebhook{
Enabled: false,
},
PersistentVolumeClaimResize: controlplane_config.PersistentVolumeClaimResize{
Enabled: false,
},
},
AuditLog: controlplane_config.AuditLog{
Enabled: false,
MaxAge: 30,
LogPath: "/var/log/kube-apiserver-audit.log",
},
Authentication: controlplane_config.Authentication{
Webhook: controlplane_config.Webhook{
Enabled: false,
CacheTTL: "5m0s",
Config: "",
},
},
AwsEnvironment: controlplane_config.AwsEnvironment{
Enabled: false,
},
AwsNodeLabels: controlplane_config.AwsNodeLabels{
Enabled: false,
},
ClusterAutoscalerSupport: model.ClusterAutoscalerSupport{
Enabled: true,
},
TLSBootstrap: controlplane_config.TLSBootstrap{
Enabled: false,
},
EphemeralImageStorage: controlplane_config.EphemeralImageStorage{
Enabled: false,
Disk: "xvdb",
Filesystem: "xfs",
},
KIAMSupport: controlplane_config.KIAMSupport{
Enabled: false,
},
Kube2IamSupport: controlplane_config.Kube2IamSupport{
Enabled: false,
},
GpuSupport: controlplane_config.GpuSupport{
Enabled: false,
Version: "",
InstallImage: "shelmangroup/coreos-nvidia-driver-installer:latest",
},
LoadBalancer: controlplane_config.LoadBalancer{
Enabled: false,
},
Oidc: model.Oidc{
Enabled: false,
IssuerUrl: "https://accounts.google.com",
ClientId: "kubernetes",
UsernameClaim: "email",
GroupsClaim: "groups",
},
NodeDrainer: model.NodeDrainer{
Enabled: false,
DrainTimeout: 5,
},
}
actual := c.Experimental
if !reflect.DeepEqual(expected, actual) {
t.Errorf("experimental settings didn't match :\nexpected=%v\nactual=%v", expected, actual)
}
if !c.WaitSignal.Enabled() {
t.Errorf("waitSignal should be enabled but was not: %v", c.WaitSignal)
}
if c.WaitSignal.MaxBatchSize() != 1 {
t.Errorf("waitSignal.maxBatchSize should be 1 but was %d: %v", c.WaitSignal.MaxBatchSize(), c.WaitSignal)
}
if len(c.NodePools) > 0 && c.NodePools[0].ClusterAutoscalerSupport.Enabled {
t.Errorf("ClusterAutoscalerSupport must be disabled by default on node pools")
}
}
everyPublicSubnetHasRouteToIGW := func(c *config.Config, t *testing.T) {
for i, s := range c.PublicSubnets() {
if !s.ManageRouteToInternet() {
t.Errorf("Public subnet %d should have a route to the IGW but it doesn't: %+v", i, s)
}
}
}
hasDefaultLaunchSpecifications := func(c *config.Config, t *testing.T) {
expected := []model.LaunchSpecification{
{
WeightedCapacity: 1,
InstanceType: "c4.large",
SpotPrice: "",
RootVolume: model.NewGp2RootVolume(30),
},
{
WeightedCapacity: 2,
InstanceType: "c4.xlarge",
SpotPrice: "",
RootVolume: model.NewGp2RootVolume(60),
},
}
p := c.NodePools[0]
actual := p.NodePoolConfig.SpotFleet.LaunchSpecifications
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"LaunchSpecifications didn't match: expected=%v actual=%v",
expected,
actual,
)
}
globalSpotPrice := p.NodePoolConfig.SpotFleet.SpotPrice
if globalSpotPrice != "0.06" {
t.Errorf("Default spot price is expected to be 0.06 but was: %s", globalSpotPrice)
}
}
spotFleetBasedNodePoolHasWaitSignalDisabled := func(c *config.Config, t *testing.T) {
p := c.NodePools[0]
if !p.SpotFleet.Enabled() {
t.Errorf("1st node pool is expected to be a spot fleet based one but was not: %+v", p)
}
if p.WaitSignal.Enabled() {
t.Errorf(
"WaitSignal should be enabled but was not: %v",
p.WaitSignal,
)
}
}
asgBasedNodePoolHasWaitSignalEnabled := func(c *config.Config, t *testing.T) {
p := c.NodePools[0]
if p.SpotFleet.Enabled() {
t.Errorf("1st node pool is expected to be an asg-based one but was not: %+v", p)
}
if !p.WaitSignal.Enabled() {
t.Errorf(
"WaitSignal should be disabled but was not: %v",
p.WaitSignal,
)
}
}
hasPrivateSubnetsWithManagedNGWs := func(numExpectedNum int) func(c *config.Config, t *testing.T) {
return func(c *config.Config, t *testing.T) {
for i, s := range c.PrivateSubnets() {
if !s.ManageNATGateway() {
t.Errorf("NAT gateway for the existing private subnet #%d should be created by kube-aws but was not", i)
}
if s.ManageRouteToInternet() {
t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
}
}
}
}
hasSpecificNumOfManagedNGWsWithUnmanagedEIPs := func(ngwExpectedNum int) func(c *config.Config, t *testing.T) {
return func(c *config.Config, t *testing.T) {
ngwActualNum := len(c.NATGateways())
if ngwActualNum != ngwExpectedNum {
t.Errorf("Number of NAT gateways(%d) doesn't match with the expexted one: %d", ngwActualNum, ngwExpectedNum)
}
for i, n := range c.NATGateways() {
if !n.ManageNATGateway() {
t.Errorf("NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
}
if n.ManageEIP() {
t.Errorf("EIP for NGW #%d is expected to be unmanaged by kube-aws but was not: %+v", i, n)
}
if !n.ManageRoute() {
t.Errorf("Routes for NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
}
}
}
}
hasSpecificNumOfManagedNGWsAndEIPs := func(ngwExpectedNum int) func(c *config.Config, t *testing.T) {
return func(c *config.Config, t *testing.T) {
ngwActualNum := len(c.NATGateways())
if ngwActualNum != ngwExpectedNum {
t.Errorf("Number of NAT gateways(%d) doesn't match with the expexted one: %d", ngwActualNum, ngwExpectedNum)
}
for i, n := range c.NATGateways() {
if !n.ManageNATGateway() {
t.Errorf("NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
}
if !n.ManageEIP() {
t.Errorf("EIP for NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
}
if !n.ManageRoute() {
t.Errorf("Routes for NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
}
}
}
}
hasTwoManagedNGWsAndEIPs := hasSpecificNumOfManagedNGWsAndEIPs(2)
hasNoManagedNGWsButSpecificNumOfRoutesToUnmanagedNGWs := func(ngwExpectedNum int) func(c *config.Config, t *testing.T) {
return func(c *config.Config, t *testing.T) {
ngwActualNum := len(c.NATGateways())
if ngwActualNum != ngwExpectedNum {
t.Errorf("Number of NAT gateways(%d) doesn't match with the expexted one: %d", ngwActualNum, ngwExpectedNum)
}
for i, n := range c.NATGateways() {
if n.ManageNATGateway() {
t.Errorf("NGW #%d is expected to be unmanaged by kube-aws but was not: %+v", i, n)
}
if n.ManageEIP() {
t.Errorf("EIP for NGW #%d is expected to be unmanaged by kube-aws but was not: %+v", i, n)
}
if !n.ManageRoute() {
t.Errorf("Routes for NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
}
}
}
}
hasNoNGWsOrEIPsOrRoutes := func(c *config.Config, t *testing.T) {
ngwActualNum := len(c.NATGateways())
ngwExpectedNum := 0
if ngwActualNum != ngwExpectedNum {
t.Errorf("Number of NAT gateways(%d) doesn't match with the expexted one: %d", ngwActualNum, ngwExpectedNum)
}
}
hasDefaultCluster := func(c root.Cluster, t *testing.T) {
assets, err := c.Assets()
if err != nil {
t.Errorf("failed to list assets: %v", err)
t.FailNow()
}
t.Run("Assets/RootStackTemplate", func(t *testing.T) {
cluster := kubeAwsSettings.clusterName
stack := kubeAwsSettings.clusterName
file := "stack.json"
expected := model.Asset{
Content: "",
AssetLocation: model.AssetLocation{
ID: model.NewAssetID(stack, file),
Bucket: s3Bucket,
Key: s3Dir + "/kube-aws/clusters/" + cluster + "/exported/stacks/" + stack + "/" + file,
Path: stack + "/stack.json",
},
}
actual, err := assets.FindAssetByStackAndFileName(stack, file)
if err != nil {
t.Errorf("failed to find asset: %v", err)
}
if expected.ID != actual.ID {
t.Errorf(
"Asset id didn't match: expected=%v actual=%v",
expected.ID,
actual.ID,
)
}
if expected.Key != actual.Key {
t.Errorf(
"Asset key didn't match: expected=%v actual=%v",
expected.Key,
actual.Key,
)
}
})
t.Run("Assets/ControlplaneStackTemplate", func(t *testing.T) {
cluster := kubeAwsSettings.clusterName
stack := "control-plane"
file := "stack.json"
expected := model.Asset{
Content: string(controlplane_config.StackTemplateTemplate),
AssetLocation: model.AssetLocation{
ID: model.NewAssetID(stack, file),
Bucket: s3Bucket,
Key: s3Dir + "/kube-aws/clusters/" + cluster + "/exported/stacks/" + stack + "/" + file,
Path: stack + "/stack.json",
},
}
actual, err := assets.FindAssetByStackAndFileName(stack, file)
if err != nil {
t.Errorf("failed to find asset: %v", err)
}
if expected.ID != actual.ID {
t.Errorf(
"Asset id didn't match: expected=%v actual=%v",
expected.ID,
actual.ID,
)
}
if expected.Key != actual.Key {
t.Errorf(
"Asset key didn't match: expected=%v actual=%v",
expected.Key,
actual.Key,
)
}
})
}
mainClusterYaml := kubeAwsSettings.mainClusterYaml()
minimalValidConfigYaml := kubeAwsSettings.minimumValidClusterYamlWithAZ("c")
configYamlWithoutExernalDNSName := kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
availabilityZone: us-west-1c
`
validCases := []struct {
context string
configYaml string
assertConfig []ConfigTester
assertCluster []ClusterTester
}{
{
context: "WithAddons",
configYaml: minimalValidConfigYaml + `
addons:
rescheduler:
enabled: true
clusterAutoscaler:
enabled: true
metricsServer:
enabled: true
worker:
nodePools:
- name: pool1
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
asgBasedNodePoolHasWaitSignalEnabled,
func(c *config.Config, t *testing.T) {
expected := model.Addons{
Rescheduler: model.Rescheduler{
Enabled: true,
},
ClusterAutoscaler: model.ClusterAutoscalerSupport{
Enabled: true,
},
MetricsServer: model.MetricsServer{
Enabled: true,
},
}
actual := c.Addons
if !reflect.DeepEqual(expected, actual) {
t.Errorf("addons didn't match : expected=%+v actual=%+v", expected, actual)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithAutoscalingByClusterAutoscaler",
configYaml: minimalValidConfigYaml + `
addons:
clusterAutoscaler:
enabled: true
worker:
nodePools:
- name: pool1
autoscaling:
clusterAutoscaler:
enabled: true
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
asgBasedNodePoolHasWaitSignalEnabled,
func(c *config.Config, t *testing.T) {
p := c.NodePools[0]
expected := true
actual := p.Autoscaling.ClusterAutoscaler.Enabled
if !reflect.DeepEqual(expected, actual) {
t.Errorf("autoscaling.clusterAutoscaler.enabled didn't match : expected=%v actual=%v", expected, actual)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithAPIEndpointLBAPIAccessAllowedSourceCIDRsSpecified",
configYaml: configYamlWithoutExernalDNSName + `
apiEndpoints:
- name: default
dnsName: k8s.example.com
loadBalancer:
apiAccessAllowedSourceCIDRs:
- 1.2.3.255/32
hostedZone:
id: a1b2c4
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
l := len(c.APIEndpointConfigs[0].LoadBalancer.APIAccessAllowedSourceCIDRs)
if l != 1 {
t.Errorf("unexpected size of apiEndpoints[0].loadBalancer.apiAccessAllowedSourceCIDRs: %d", l)
t.FailNow()
}
actual := c.APIEndpointConfigs[0].LoadBalancer.APIAccessAllowedSourceCIDRs[0].String()
expected := "1.2.3.255/32"
if actual != expected {
t.Errorf("unexpected cidr in apiEndpoints[0].loadBalancer.apiAccessAllowedSourceCIDRs[0]. expected = %s, actual = %s", expected, actual)
}
},
},
},
{
context: "WithAPIEndpointLBAPIAccessAllowedSourceCIDRsOmitted",
configYaml: configYamlWithoutExernalDNSName + `
apiEndpoints:
- name: default
dnsName: k8s.example.com
loadBalancer:
hostedZone:
id: a1b2c4
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
l := len(c.APIEndpointConfigs[0].LoadBalancer.APIAccessAllowedSourceCIDRs)
if l != 1 {
t.Errorf("unexpected size of apiEndpoints[0].loadBalancer.apiAccessAllowedSourceCIDRs: %d", l)
t.FailNow()
}
actual := c.APIEndpointConfigs[0].LoadBalancer.APIAccessAllowedSourceCIDRs[0].String()
expected := "0.0.0.0/0"
if actual != expected {
t.Errorf("unexpected cidr in apiEndpoints[0].loadBalancer.apiAccessAllowedSourceCIDRs[0]. expected = %s, actual = %s", expected, actual)
}
},
},
},
{
context: "WithKubeProxyIPVSModeDisabledByDefault",
configYaml: minimalValidConfigYaml,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
if c.KubeProxy.IPVSMode.Enabled != false {
t.Errorf("kube-proxy IPVS mode must be disabled by default")
}
expectedScheduler := "rr"
if c.KubeProxy.IPVSMode.Scheduler != expectedScheduler {
t.Errorf("IPVS scheduler should be by default set to: %s (actual = %s)", expectedScheduler, c.KubeProxy.IPVSMode.Scheduler)
}
expectedSyncPeriod := "60s"
if c.KubeProxy.IPVSMode.SyncPeriod != expectedSyncPeriod {
t.Errorf("Sync period should be by default set to: %s (actual = %s)", expectedSyncPeriod, c.KubeProxy.IPVSMode.SyncPeriod)
}
expectedMinSyncPeriod := "10s"
if c.KubeProxy.IPVSMode.MinSyncPeriod != expectedMinSyncPeriod {
t.Errorf("Minimal sync period should be by default set to: %s (actual = %s)", expectedMinSyncPeriod, c.KubeProxy.IPVSMode.MinSyncPeriod)
}
},
},
},
{
context: "WithKubeProxyIPVSModeEnabled",
configYaml: minimalValidConfigYaml + `
kubeProxy:
ipvsMode:
enabled: true
scheduler: lc
syncPeriod: 90s
minSyncPeriod: 15s
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
if c.KubeProxy.IPVSMode.Enabled != true {
t.Errorf("kube-proxy IPVS mode must be enabled")
}
expectedScheduler := "lc"
if c.KubeProxy.IPVSMode.Scheduler != expectedScheduler {
t.Errorf("IPVS scheduler should be set to: %s (actual = %s)", expectedScheduler, c.KubeProxy.IPVSMode.Scheduler)
}
expectedSyncPeriod := "90s"
if c.KubeProxy.IPVSMode.SyncPeriod != expectedSyncPeriod {
t.Errorf("Sync period should be set to: %s (actual = %s)", expectedSyncPeriod, c.KubeProxy.IPVSMode.SyncPeriod)
}
expectedMinSyncPeriod := "15s"
if c.KubeProxy.IPVSMode.MinSyncPeriod != expectedMinSyncPeriod {
t.Errorf("Minimal sync period should be set to: %s (actual = %s)", expectedMinSyncPeriod, c.KubeProxy.IPVSMode.MinSyncPeriod)
}
},
},
},
{
// See https://github.com/kubernetes-incubator/kube-aws/issues/365
context: "WithClusterNameContainsHyphens",
configYaml: kubeAwsSettings.withClusterName("my-cluster").minimumValidClusterYaml(),
},
{
context: "WithCustomSettings",
configYaml: minimalValidConfigYaml + `
customSettings:
stack-type: control-plane
worker:
nodePools:
- name: pool1
customSettings:
stack-type: node-pool
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
asgBasedNodePoolHasWaitSignalEnabled,
func(c *config.Config, t *testing.T) {
p := c.NodePools[0]
{
expected := map[string]interface{}{
"stack-type": "control-plane",
}
actual := c.CustomSettings
if !reflect.DeepEqual(expected, actual) {
t.Errorf("customSettings didn't match : expected=%v actual=%v", expected, actual)
}
}
{
expected := map[string]interface{}{
"stack-type": "node-pool",
}
actual := p.CustomSettings
if !reflect.DeepEqual(expected, actual) {
t.Errorf("customSettings didn't match : expected=%v actual=%v", expected, actual)
}
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithDifferentReleaseChannels",
configYaml: minimalValidConfigYaml + `
releaseChannel: stable
worker:
nodePools:
- name: pool1
releaseChannel: alpha
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
asgBasedNodePoolHasWaitSignalEnabled,
},
assertCluster: []ClusterTester{
func(c root.Cluster, t *testing.T) {
cp := c.ControlPlane().StackConfig.AMI
np := c.NodePools()[0].AMI
if cp == "" {
t.Error("the default AMI ID should not be empty but it was")
}
if np == "" {
t.Error("the AMI ID for the node pool should not be empty but it was")
}
if cp == np {
t.Errorf("the default AMI ID and the AMI ID for the node pool should not match but they did: default=%s, nodepool=%s", cp, np)
}
},
},
},
{
context: "WithElasticFileSystemId",
configYaml: minimalValidConfigYaml + `
elasticFileSystemId: efs-12345
worker:
nodePools:
- name: pool1
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
if c.NodePools[0].ElasticFileSystemID != "efs-12345" {
t.Errorf("The value of worker.nodePools[0].elasticFileSystemId should match the one for the top-leve elasticFileSystemId, but it wan't: worker.nodePools[0].elasticFileSystemId=%s", c.NodePools[0].ElasticFileSystemID)
}
},
},
},
{
context: "WithElasticFileSystemIdInSpecificNodePool",
configYaml: mainClusterYaml + `
subnets:
- name: existing1
id: subnet-12345
availabilityZone: us-west-1a
worker:
nodePools:
- name: pool1
subnets:
- name: existing1
elasticFileSystemId: efs-12345
- name: pool2
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
if c.NodePools[0].ElasticFileSystemID != "efs-12345" {
t.Errorf("Unexpected worker.nodePools[0].elasticFileSystemId: %s", c.NodePools[0].ElasticFileSystemID)
}
if c.NodePools[1].ElasticFileSystemID != "" {
t.Errorf("Unexpected worker.nodePools[1].elasticFileSystemId: %s", c.NodePools[1].ElasticFileSystemID)
}
},
},
},
{
context: "WithEtcdDataVolumeEncrypted",
configYaml: minimalValidConfigYaml + `
etcd:
dataVolume:
encrypted: true
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
if !c.Etcd.DataVolume.Encrypted {
t.Errorf("Etcd data volume should be encrypted but was not: %v", c.Etcd)
}
},
},
},
{
context: "WithEtcdDataVolumeEncryptedKMSKeyARN",
configYaml: minimalValidConfigYaml + `
etcd:
dataVolume:
encrypted: true
kmsKeyArn: arn:aws:kms:eu-west-1:XXX:key/XXX
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
expected := "arn:aws:kms:eu-west-1:XXX:key/XXX"
if c.Etcd.KMSKeyARN() != expected {
t.Errorf("Etcd data volume KMS Key ARN didn't match : expected=%v actual=%v", expected, c.Etcd.KMSKeyARN())
}
if !c.Etcd.DataVolume.Encrypted {
t.Error("Etcd data volume should be encrypted but was not")
}
},
},
},
{
context: "WithEtcdMemberIdentityProviderEIP",
configYaml: minimalValidConfigYaml + `
etcd:
memberIdentityProvider: eip
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
subnet1 := model.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
expected := controlplane_config.EtcdSettings{
Etcd: model.Etcd{
Cluster: model.EtcdCluster{
MemberIdentityProvider: "eip",
},
EC2Instance: model.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
Tenancy: "default",
RootVolume: model.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
},
DataVolume: model.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Subnets: model.Subnets{
subnet1,
},
},
}
actual := c.EtcdSettings
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"EtcdSettings didn't match: expected=%v actual=%v",
expected,
actual,
)
}
if !actual.NodeShouldHaveEIP() {
t.Errorf(
"NodeShouldHaveEIP returned unexpected value: %v",
actual.NodeShouldHaveEIP(),
)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithEtcdMemberIdentityProviderENI",
configYaml: minimalValidConfigYaml + `
etcd:
memberIdentityProvider: eni
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
subnet1 := model.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
expected := controlplane_config.EtcdSettings{
Etcd: model.Etcd{
EC2Instance: model.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
RootVolume: model.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
Tenancy: "default",
},
DataVolume: model.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Cluster: model.EtcdCluster{
MemberIdentityProvider: "eni",
},
Subnets: model.Subnets{
subnet1,
},
},
}
actual := c.EtcdSettings
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"EtcdSettings didn't match: expected=%v actual=%v",
expected,
actual,
)
}
if !actual.NodeShouldHaveSecondaryENI() {
t.Errorf(
"NodeShouldHaveSecondaryENI returned unexpected value: %v",
actual.NodeShouldHaveSecondaryENI(),
)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithEtcdMemberIdentityProviderENIWithCustomDomain",
configYaml: minimalValidConfigYaml + `
etcd:
memberIdentityProvider: eni
internalDomainName: internal.example.com
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
subnet1 := model.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
expected := controlplane_config.EtcdSettings{
Etcd: model.Etcd{
Cluster: model.EtcdCluster{
MemberIdentityProvider: "eni",
InternalDomainName: "internal.example.com",
},
EC2Instance: model.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
RootVolume: model.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
Tenancy: "default",
},
DataVolume: model.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
}, Subnets: model.Subnets{
subnet1,
},
},
}
actual := c.EtcdSettings
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"EtcdSettings didn't match: expected=%v actual=%v",
expected,
actual,
)
}
if !actual.NodeShouldHaveSecondaryENI() {
t.Errorf(
"NodeShouldHaveSecondaryENI returned unexpected value: %v",
actual.NodeShouldHaveSecondaryENI(),
)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithEtcdMemberIdentityProviderENIWithCustomFQDNs",
configYaml: minimalValidConfigYaml + `
etcd:
memberIdentityProvider: eni
internalDomainName: internal.example.com
nodes:
- fqdn: etcd1a.internal.example.com
- fqdn: etcd1b.internal.example.com
- fqdn: etcd1c.internal.example.com
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
subnet1 := model.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
expected := controlplane_config.EtcdSettings{
Etcd: model.Etcd{
Cluster: model.EtcdCluster{
MemberIdentityProvider: "eni",
InternalDomainName: "internal.example.com",
},
EC2Instance: model.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
RootVolume: model.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
Tenancy: "default",
},
DataVolume: model.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Nodes: []model.EtcdNode{
model.EtcdNode{
FQDN: "etcd1a.internal.example.com",
},
model.EtcdNode{
FQDN: "etcd1b.internal.example.com",
},
model.EtcdNode{
FQDN: "etcd1c.internal.example.com",
},
},
Subnets: model.Subnets{
subnet1,
},
},
}
actual := c.EtcdSettings
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"EtcdSettings didn't match: expected=%v actual=%v",
expected,
actual,
)
}
if !actual.NodeShouldHaveSecondaryENI() {
t.Errorf(
"NodeShouldHaveSecondaryENI returned unexpected value: %v",
actual.NodeShouldHaveSecondaryENI(),
)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithEtcdMemberIdentityProviderENIWithCustomNames",
configYaml: minimalValidConfigYaml + `
etcd:
memberIdentityProvider: eni
internalDomainName: internal.example.com
nodes:
- name: etcd1a
- name: etcd1b
- name: etcd1c
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
subnet1 := model.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
expected := controlplane_config.EtcdSettings{
Etcd: model.Etcd{
Cluster: model.EtcdCluster{
MemberIdentityProvider: "eni",
InternalDomainName: "internal.example.com",
},
EC2Instance: model.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
RootVolume: model.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
Tenancy: "default",
},
DataVolume: model.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Nodes: []model.EtcdNode{
model.EtcdNode{
Name: "etcd1a",
},
model.EtcdNode{
Name: "etcd1b",
},
model.EtcdNode{
Name: "etcd1c",
},
},
Subnets: model.Subnets{
subnet1,
},
},
}
actual := c.EtcdSettings
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"EtcdSettings didn't match: expected=%v actual=%v",
expected,
actual,
)
}
if !actual.NodeShouldHaveSecondaryENI() {
t.Errorf(
"NodeShouldHaveSecondaryENI returned unexpected value: %v",
actual.NodeShouldHaveSecondaryENI(),
)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithEtcdMemberIdentityProviderENIWithoutRecordSets",
configYaml: minimalValidConfigYaml + `
etcd:
memberIdentityProvider: eni
internalDomainName: internal.example.com
manageRecordSets: false
nodes:
- name: etcd1a
- name: etcd1b
- name: etcd1c
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
subnet1 := model.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
manageRecordSets := false
expected := controlplane_config.EtcdSettings{
Etcd: model.Etcd{
Cluster: model.EtcdCluster{
ManageRecordSets: &manageRecordSets,
MemberIdentityProvider: "eni",
InternalDomainName: "internal.example.com",
},
EC2Instance: model.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
RootVolume: model.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
Tenancy: "default",
},
DataVolume: model.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Nodes: []model.EtcdNode{
model.EtcdNode{
Name: "etcd1a",
},
model.EtcdNode{
Name: "etcd1b",
},
model.EtcdNode{
Name: "etcd1c",
},
},
Subnets: model.Subnets{
subnet1,
},
},
}
actual := c.EtcdSettings
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"EtcdSettings didn't match: expected=%v actual=%v",
expected,
actual,
)
}
if !actual.NodeShouldHaveSecondaryENI() {
t.Errorf(
"NodeShouldHaveSecondaryENI returned unexpected value: %v",
actual.NodeShouldHaveSecondaryENI(),
)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithEtcdMemberIdentityProviderENIWithHostedZoneID",
configYaml: minimalValidConfigYaml + `
etcd:
memberIdentityProvider: eni
internalDomainName: internal.example.com
hostedZone:
id: hostedzone-abcdefg
nodes:
- name: etcd1a
- name: etcd1b
- name: etcd1c
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
subnet1 := model.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
expected := controlplane_config.EtcdSettings{
Etcd: model.Etcd{
Cluster: model.EtcdCluster{
HostedZone: model.Identifier{ID: "hostedzone-abcdefg"},
MemberIdentityProvider: "eni",
InternalDomainName: "internal.example.com",
},
EC2Instance: model.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
RootVolume: model.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
Tenancy: "default",
},
DataVolume: model.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Nodes: []model.EtcdNode{
model.EtcdNode{
Name: "etcd1a",
},
model.EtcdNode{
Name: "etcd1b",
},
model.EtcdNode{
Name: "etcd1c",
},
},
Subnets: model.Subnets{
subnet1,
},
},
}
actual := c.EtcdSettings
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"EtcdSettings didn't match: expected=%v actual=%v",
expected,
actual,
)
}
if !actual.NodeShouldHaveSecondaryENI() {
t.Errorf(
"NodeShouldHaveSecondaryENI returned unexpected value: %v",
actual.NodeShouldHaveSecondaryENI(),
)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithExperimentalFeatures",
configYaml: minimalValidConfigYaml + `
experimental:
admission:
podSecurityPolicy:
enabled: true
denyEscalatingExec:
enabled: true
alwaysPullImages:
enabled: true
priority:
enabled: true
mutatingAdmissionWebhook:
enabled: true
validatingAdmissionWebhook:
enabled: true
persistentVolumeClaimResize:
enabled: true
auditLog:
enabled: true
maxage: 100
logpath: "/var/log/audit.log"
authentication:
webhook:
enabled: true
cacheTTL: "1234s"
configBase64: "e30k"
awsEnvironment:
enabled: true
environment:
CFNSTACK: '{ "Ref" : "AWS::StackId" }'
awsNodeLabels:
enabled: true
tlsBootstrap:
enabled: true
ephemeralImageStorage:
enabled: true
kiamSupport:
enabled: true
kube2IamSupport:
enabled: true
gpuSupport:
enabled: true
version: "375.66"
installImage: "shelmangroup/coreos-nvidia-driver-installer:latest"
kubeletOpts: '--image-gc-low-threshold 60 --image-gc-high-threshold 70'
loadBalancer:
enabled: true
names:
- manuallymanagedlb
securityGroupIds:
- sg-12345678
targetGroup:
enabled: true
arns:
- arn:aws:elasticloadbalancing:eu-west-1:xxxxxxxxxxxx:targetgroup/manuallymanagedetg/xxxxxxxxxxxxxxxx
securityGroupIds:
- sg-12345678
oidc:
enabled: true
oidc-issuer-url: "https://accounts.google.com"
oidc-client-id: "kubernetes"
oidc-username-claim: "email"
oidc-groups-claim: "groups"
nodeDrainer:
enabled: true
drainTimeout: 3
cloudWatchLogging:
enabled: true
amazonSsmAgent:
enabled: true
worker:
nodePools:
- name: pool1
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
asgBasedNodePoolHasWaitSignalEnabled,
func(c *config.Config, t *testing.T) {
expected := controlplane_config.Experimental{
Admission: controlplane_config.Admission{
PodSecurityPolicy: controlplane_config.PodSecurityPolicy{
Enabled: true,
},
AlwaysPullImages: controlplane_config.AlwaysPullImages{
Enabled: true,
},
DenyEscalatingExec: controlplane_config.DenyEscalatingExec{
Enabled: true,
},
Priority: controlplane_config.Priority{
Enabled: true,
},
MutatingAdmissionWebhook: controlplane_config.MutatingAdmissionWebhook{
Enabled: true,
},
ValidatingAdmissionWebhook: controlplane_config.ValidatingAdmissionWebhook{
Enabled: true,
},
PersistentVolumeClaimResize: controlplane_config.PersistentVolumeClaimResize{
Enabled: true,
},
},
AuditLog: controlplane_config.AuditLog{
Enabled: true,
MaxAge: 100,
LogPath: "/var/log/audit.log",
},
Authentication: controlplane_config.Authentication{
Webhook: controlplane_config.Webhook{
Enabled: true,
CacheTTL: "1234s",
Config: "e30k",
},
},
AwsEnvironment: controlplane_config.AwsEnvironment{
Enabled: true,
Environment: map[string]string{
"CFNSTACK": `{ "Ref" : "AWS::StackId" }`,
},
},
AwsNodeLabels: controlplane_config.AwsNodeLabels{
Enabled: true,
},
ClusterAutoscalerSupport: model.ClusterAutoscalerSupport{
Enabled: true,
},
TLSBootstrap: controlplane_config.TLSBootstrap{
Enabled: true,
},
EphemeralImageStorage: controlplane_config.EphemeralImageStorage{
Enabled: true,
Disk: "xvdb",
Filesystem: "xfs",
},
KIAMSupport: controlplane_config.KIAMSupport{
Enabled: true,
},
Kube2IamSupport: controlplane_config.Kube2IamSupport{
Enabled: true,
},
GpuSupport: controlplane_config.GpuSupport{
Enabled: true,
Version: "375.66",
InstallImage: "shelmangroup/coreos-nvidia-driver-installer:latest",
},
KubeletOpts: "--image-gc-low-threshold 60 --image-gc-high-threshold 70",
LoadBalancer: controlplane_config.LoadBalancer{
Enabled: true,
Names: []string{"manuallymanagedlb"},
SecurityGroupIds: []string{"sg-12345678"},
},
TargetGroup: controlplane_config.TargetGroup{
Enabled: true,
Arns: []string{"arn:aws:elasticloadbalancing:eu-west-1:xxxxxxxxxxxx:targetgroup/manuallymanagedetg/xxxxxxxxxxxxxxxx"},
SecurityGroupIds: []string{"sg-12345678"},
},
Oidc: model.Oidc{
Enabled: true,
IssuerUrl: "https://accounts.google.com",
ClientId: "kubernetes",
UsernameClaim: "email",
GroupsClaim: "groups",
},
NodeDrainer: model.NodeDrainer{
Enabled: true,
DrainTimeout: 3,
},
}
actual := c.Experimental
if !reflect.DeepEqual(expected, actual) {
t.Errorf("experimental settings didn't match : expected=%+v actual=%+v", expected, actual)
}
p := c.NodePools[0]
if reflect.DeepEqual(expected, p.Experimental) {
t.Errorf("experimental settings shouldn't be inherited to a node pool but it did : toplevel=%v nodepool=%v", expected, p.Experimental)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
func(c root.Cluster, t *testing.T) {
cp := c.ControlPlane()
controllerUserdataS3Part := cp.UserDataController.Parts[model.USERDATA_S3].Asset.Content
if !strings.Contains(controllerUserdataS3Part, `--feature-gates=PodPriority=true`) {
t.Error("missing controller feature gate: PodPriority=true")
}
if !strings.Contains(controllerUserdataS3Part, `scheduling.k8s.io/v1alpha1=true`) {
t.Error("missing controller runtime config: scheduling.k8s.io/v1alpha1=true")
}
re, _ := regexp.Compile("--admission-control=[a-zA-z,]*,Priority")
if len(re.FindString(controllerUserdataS3Part)) == 0 {
t.Error("missing controller --admission-control config: Priority")
}
},
},
},
{
context: "WithExperimentalFeaturesForWorkerNodePool",
configYaml: minimalValidConfigYaml + `
addons:
clusterAutoscaler:
enabled: true
worker:
nodePools:
- name: pool1
admission:
podSecurityPolicy:
enabled: true
auditLog:
enabled: true
maxage: 100
logpath: "/var/log/audit.log"
awsEnvironment:
enabled: true
environment:
CFNSTACK: '{ "Ref" : "AWS::StackId" }'
awsNodeLabels:
enabled: true
clusterAutoscalerSupport:
enabled: true
tlsBootstrap:
enabled: true # Must be ignored, value is synced with the one from control plane
ephemeralImageStorage:
enabled: true
kube2IamSupport:
enabled: true
loadBalancer:
enabled: true
names:
- manuallymanagedlb
securityGroupIds:
- sg-12345678
targetGroup:
enabled: true
arns:
- arn:aws:elasticloadbalancing:eu-west-1:xxxxxxxxxxxx:targetgroup/manuallymanagedetg/xxxxxxxxxxxxxxxx
securityGroupIds:
- sg-12345678
# Ignored, uses global setting
nodeDrainer:
enabled: true
drainTimeout: 5
nodeLabels:
kube-aws.coreos.com/role: worker
taints:
- key: reservation
value: spot
effect: NoSchedule
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
asgBasedNodePoolHasWaitSignalEnabled,
func(c *config.Config, t *testing.T) {
expected := controlplane_config.Experimental{
AwsEnvironment: controlplane_config.AwsEnvironment{
Enabled: true,
Environment: map[string]string{
"CFNSTACK": `{ "Ref" : "AWS::StackId" }`,
},
},
AwsNodeLabels: controlplane_config.AwsNodeLabels{
Enabled: true,
},
ClusterAutoscalerSupport: model.ClusterAutoscalerSupport{
Enabled: true,
},
TLSBootstrap: controlplane_config.TLSBootstrap{
Enabled: false,
},
EphemeralImageStorage: controlplane_config.EphemeralImageStorage{
Enabled: true,
Disk: "xvdb",
Filesystem: "xfs",
},
Kube2IamSupport: controlplane_config.Kube2IamSupport{
Enabled: true,
},
LoadBalancer: controlplane_config.LoadBalancer{
Enabled: true,
Names: []string{"manuallymanagedlb"},
SecurityGroupIds: []string{"sg-12345678"},
},
TargetGroup: controlplane_config.TargetGroup{
Enabled: true,
Arns: []string{"arn:aws:elasticloadbalancing:eu-west-1:xxxxxxxxxxxx:targetgroup/manuallymanagedetg/xxxxxxxxxxxxxxxx"},
SecurityGroupIds: []string{"sg-12345678"},
},
NodeDrainer: model.NodeDrainer{
Enabled: false,
DrainTimeout: 0,
},
}
p := c.NodePools[0]
if reflect.DeepEqual(expected, p.Experimental) {
t.Errorf("experimental settings for node pool didn't match : expected=%v actual=%v", expected, p.Experimental)
}
expectedNodeLabels := model.NodeLabels{
"kube-aws.coreos.com/cluster-autoscaler-supported": "true",
"kube-aws.coreos.com/role": "worker",
}
actualNodeLabels := c.NodePools[0].NodeLabels()
if !reflect.DeepEqual(expectedNodeLabels, actualNodeLabels) {
t.Errorf("worker node labels didn't match: expected=%v, actual=%v", expectedNodeLabels, actualNodeLabels)
}
expectedTaints := model.Taints{
{Key: "reservation", Value: "spot", Effect: "NoSchedule"},
}
actualTaints := c.NodePools[0].Taints
if !reflect.DeepEqual(expectedTaints, actualTaints) {
t.Errorf("worker node taints didn't match: expected=%v, actual=%v", expectedTaints, actualTaints)
}
},
},
},
{
context: "WithKube2IamSupport",
configYaml: minimalValidConfigYaml + `
controller:
iam:
role:
name: myrole1
experimental:
kube2IamSupport:
enabled: true
worker:
nodePools:
- name: pool1
iam:
role:
name: myrole2
kube2IamSupport:
enabled: true
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
asgBasedNodePoolHasWaitSignalEnabled,
func(c *config.Config, t *testing.T) {
expectedControllerRoleName := "myrole1"
expectedWorkerRoleName := "myrole2"
if expectedControllerRoleName != c.Controller.IAMConfig.Role.Name {
t.Errorf("controller's iam.role.name didn't match : expected=%v actual=%v", expectedControllerRoleName, c.Controller.IAMConfig.Role.Name)
}
if !c.Experimental.Kube2IamSupport.Enabled {
t.Errorf("controller's experimental.kube2IamSupport should be enabled but was not: %+v", c.Experimental)
}
p := c.NodePools[0]
if expectedWorkerRoleName != p.IAMConfig.Role.Name {
t.Errorf("worker node pool's iam.role.name didn't match : expected=%v actual=%v", expectedWorkerRoleName, p.IAMConfig.Role.Name)
}
if !p.Kube2IamSupport.Enabled {
t.Errorf("worker node pool's kube2IamSupport should be enabled but was not: %+v", p.Experimental)
}
},
},
},
{
context: "WithWaitSignalDisabled",
configYaml: minimalValidConfigYaml + `
waitSignal:
enabled: false
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
func(c *config.Config, t *testing.T) {
if c.WaitSignal.Enabled() {
t.Errorf("waitSignal should be disabled but was not: %v", c.WaitSignal)
}
},
},
},
{
context: "WithWaitSignalEnabled",
configYaml: minimalValidConfigYaml + `
waitSignal:
enabled: true
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
func(c *config.Config, t *testing.T) {
if !c.WaitSignal.Enabled() {
t.Errorf("waitSignal should be enabled but was not: %v", c.WaitSignal)
}
},
},
},
{
context: "WithNodePoolWithWaitSignalDisabled",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
waitSignal:
enabled: false
- name: pool2
waitSignal:
enabled: false
maxBatchSize: 2
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
func(c *config.Config, t *testing.T) {
if c.NodePools[0].WaitSignal.Enabled() {
t.Errorf("waitSignal should be disabled for node pool at index %d but was not", 0)
}
if c.NodePools[1].WaitSignal.Enabled() {
t.Errorf("waitSignal should be disabled for node pool at index %d but was not", 1)
}
},
},
},
{
context: "WithNodePoolWithWaitSignalEnabled",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
waitSignal:
enabled: true
- name: pool2
waitSignal:
enabled: true
maxBatchSize: 2
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
func(c *config.Config, t *testing.T) {
if !c.NodePools[0].WaitSignal.Enabled() {
t.Errorf("waitSignal should be enabled for node pool at index %d but was not", 0)
}
if c.NodePools[0].WaitSignal.MaxBatchSize() != 1 {
t.Errorf("waitSignal.maxBatchSize should be 1 for node pool at index %d but was %d", 0, c.NodePools[0].WaitSignal.MaxBatchSize())
}
if !c.NodePools[1].WaitSignal.Enabled() {
t.Errorf("waitSignal should be enabled for node pool at index %d but was not", 1)
}
if c.NodePools[1].WaitSignal.MaxBatchSize() != 2 {
t.Errorf("waitSignal.maxBatchSize should be 2 for node pool at index %d but was %d", 1, c.NodePools[1].WaitSignal.MaxBatchSize())
}
},
},
},
{
context: "WithMinimalValidConfig",
configYaml: minimalValidConfigYaml,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
},
},
{
context: "WithVaryingWorkerCountPerNodePool",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
- name: pool2
count: 2
- name: pool3
count: 0
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
if c.NodePools[0].Count != 1 {
t.Errorf("default worker count should be 1 but was: %d", c.NodePools[0].Count)
}
if c.NodePools[1].Count != 2 {
t.Errorf("worker count should be set to 2 but was: %d", c.NodePools[1].Count)
}
if c.NodePools[2].Count != 0 {
t.Errorf("worker count should be be set to 0 but was: %d", c.NodePools[2].Count)
}
},
},
},
{
context: "WithVaryingWorkerASGSizePerNodePool",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
- name: pool2
count: 2
- name: pool3
autoScalingGroup:
minSize: 0
maxSize: 10
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
if c.NodePools[0].MaxCount() != 1 {
t.Errorf("worker max count should be 1 but was: %d", c.NodePools[0].MaxCount())
}
if c.NodePools[0].MinCount() != 1 {
t.Errorf("worker min count should be 1 but was: %d", c.NodePools[0].MinCount())
}
if c.NodePools[1].MaxCount() != 2 {
t.Errorf("worker max count should be 2 but was: %d", c.NodePools[1].MaxCount())
}
if c.NodePools[1].MinCount() != 2 {
t.Errorf("worker min count should be 2 but was: %d", c.NodePools[1].MinCount())
}
if c.NodePools[2].MaxCount() != 10 {
t.Errorf("worker max count should be 10 but was: %d", c.NodePools[2].MaxCount())
}
if c.NodePools[2].MinCount() != 0 {
t.Errorf("worker min count should be 0 but was: %d", c.NodePools[2].MinCount())
}
},
},
},
{
context: "WithMultiAPIEndpoints",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: privateSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
private: true
- name: privateSubnet2
availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
private: true
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.3.0/24"
- name: publicSubnet2
availabilityZone: us-west-1b
instanceCIDR: "10.0.4.0/24"
worker:
# cant be possibly "unversioned" one w/ existing elb because doing so would result in a worker kubelet has chances to
# connect to multiple masters from different clusters!
apiEndpointName: versionedPrivate
# btw apiEndpointName can be defaulted to a private/public managed(hence unstable/possibly versioned but not stable/unversioned)
# elb/round-robin if and only if there is only one. However we dont do the complex defaulting like that for now.
adminAPIEndpointName: versionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
id: elb-internet-facing
##you cant configure existing elb like below
#private: true
#subnets:
#- name: privateSubnet1
##hostedZone must be omitted when elb id is specified.
##in other words, it your responsibility to create an alias record for the elb
#hostedZone:
# id: hostedzone-private
- name: unversionedPrivate
dnsName: api.internal.example.com
loadBalancer:
id: elb-internal
- name: versionedPublic
dnsName: v1api.example.com
loadBalancer:
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
- name: versionedPrivate
dnsName: v1api.internal.example.com
loadBalancer:
private: true
subnets:
- name: privateSubnet1
hostedZone:
id: hostedzone-private
- name: versionedPublicAlt
dnsName: v1apialt.example.com
loadBalancer:
# "private: false" implies all the private subnets defined in the top-level "subnets"
#subnets:
#- name: publicSubnet1
#- name: publicSubnet2
hostedZone:
id: hostedzone-public
- name: versionedPrivateAlt
dnsName: v1apialt.internal.example.com
loadBalancer:
private: true
# "private: true" implies all the private subnets defined in the top-level "subnets"
#subnets:
#- name: privateSubnet1
#- name: privateSubnet2
hostedZone:
id: hostedzone-private
- name: addedToCertCommonNames
dnsName: api-alt.example.com
loadBalancer:
managed: false
- name: elbOnly
dnsName: registerme.example.com
loadBalancer:
recordSetManaged: false
`,
assertCluster: []ClusterTester{
func(rootCluster root.Cluster, t *testing.T) {
c := rootCluster.ControlPlane()
private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24")
private1.Name = "privateSubnet1"
private2 := model.NewPrivateSubnet("us-west-1b", "10.0.2.0/24")
private2.Name = "privateSubnet2"
public1 := model.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
public1.Name = "publicSubnet1"
public2 := model.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
public2.Name = "publicSubnet2"
subnets := model.Subnets{
private1,
private2,
public1,
public2,
}
if !reflect.DeepEqual(c.AllSubnets(), subnets) {
t.Errorf("Managed subnets didn't match: expected=%+v actual=%+v", subnets, c.AllSubnets())
}
publicSubnets := model.Subnets{
public1,
public2,
}
privateSubnets := model.Subnets{
private1,
private2,
}
unversionedPublic := c.APIEndpoints["unversionedPublic"]
unversionedPrivate := c.APIEndpoints["unversionedPrivate"]
versionedPublic := c.APIEndpoints["versionedPublic"]
versionedPrivate := c.APIEndpoints["versionedPrivate"]
versionedPublicAlt := c.APIEndpoints["versionedPublicAlt"]
versionedPrivateAlt := c.APIEndpoints["versionedPrivateAlt"]
addedToCertCommonNames := c.APIEndpoints["addedToCertCommonNames"]
elbOnly := c.APIEndpoints["elbOnly"]
if len(unversionedPublic.LoadBalancer.Subnets) != 0 {
t.Errorf("unversionedPublic: subnets shuold be empty but was not: actual=%+v", unversionedPublic.LoadBalancer.Subnets)
}
if !unversionedPublic.LoadBalancer.Enabled() {
t.Errorf("unversionedPublic: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", unversionedPublic.LoadBalancer)
}
if len(unversionedPrivate.LoadBalancer.Subnets) != 0 {
t.Errorf("unversionedPrivate: subnets shuold be empty but was not: actual=%+v", unversionedPrivate.LoadBalancer.Subnets)
}
if !unversionedPrivate.LoadBalancer.Enabled() {
t.Errorf("unversionedPrivate: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", unversionedPrivate.LoadBalancer)
}
if !reflect.DeepEqual(versionedPublic.LoadBalancer.Subnets, model.Subnets{public1}) {
t.Errorf("versionedPublic: subnets didn't match: expected=%+v actual=%+v", model.Subnets{public1}, versionedPublic.LoadBalancer.Subnets)
}
if !versionedPublic.LoadBalancer.Enabled() {
t.Errorf("versionedPublic: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", versionedPublic.LoadBalancer)
}
if !reflect.DeepEqual(versionedPrivate.LoadBalancer.Subnets, model.Subnets{private1}) {
t.Errorf("versionedPrivate: subnets didn't match: expected=%+v actual=%+v", model.Subnets{private1}, versionedPrivate.LoadBalancer.Subnets)
}
if !versionedPrivate.LoadBalancer.Enabled() {
t.Errorf("versionedPrivate: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", versionedPrivate.LoadBalancer)
}
if !reflect.DeepEqual(versionedPublicAlt.LoadBalancer.Subnets, publicSubnets) {
t.Errorf("versionedPublicAlt: subnets didn't match: expected=%+v actual=%+v", publicSubnets, versionedPublicAlt.LoadBalancer.Subnets)
}
if !versionedPublicAlt.LoadBalancer.Enabled() {
t.Errorf("versionedPublicAlt: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", versionedPublicAlt.LoadBalancer)
}
if !reflect.DeepEqual(versionedPrivateAlt.LoadBalancer.Subnets, privateSubnets) {
t.Errorf("versionedPrivateAlt: subnets didn't match: expected=%+v actual=%+v", privateSubnets, versionedPrivateAlt.LoadBalancer.Subnets)
}
if !versionedPrivateAlt.LoadBalancer.Enabled() {
t.Errorf("versionedPrivateAlt: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", versionedPrivateAlt.LoadBalancer)
}
if len(addedToCertCommonNames.LoadBalancer.Subnets) != 0 {
t.Errorf("addedToCertCommonNames: subnets should be empty but was not: actual=%+v", addedToCertCommonNames.LoadBalancer.Subnets)
}
if addedToCertCommonNames.LoadBalancer.Enabled() {
t.Errorf("addedToCertCommonNames: it should not be enabled as the lb to which controller nodes are added, but it was: loadBalancer=%+v", addedToCertCommonNames.LoadBalancer)
}
if !reflect.DeepEqual(elbOnly.LoadBalancer.Subnets, publicSubnets) {
t.Errorf("elbOnly: subnets didn't match: expected=%+v actual=%+v", publicSubnets, elbOnly.LoadBalancer.Subnets)
}
if !elbOnly.LoadBalancer.Enabled() {
t.Errorf("elbOnly: it should be enabled but it was not: loadBalancer=%+v", elbOnly.LoadBalancer)
}
if elbOnly.LoadBalancer.ManageELBRecordSet() {
t.Errorf("elbOnly: record set should not be managed but it was: loadBalancer=%+v", elbOnly.LoadBalancer)
}
if !reflect.DeepEqual(c.ExternalDNSNames(), []string{"api-alt.example.com", "api.example.com", "api.internal.example.com", "registerme.example.com", "v1api.example.com", "v1api.internal.example.com", "v1apialt.example.com", "v1apialt.internal.example.com"}) {
t.Errorf("unexpected external DNS names: %s", strings.Join(c.ExternalDNSNames(), ", "))
}
if !reflect.DeepEqual(c.APIEndpoints.ManagedELBLogicalNames(), []string{"APIEndpointElbOnlyELB", "APIEndpointVersionedPrivateAltELB", "APIEndpointVersionedPrivateELB", "APIEndpointVersionedPublicAltELB", "APIEndpointVersionedPublicELB"}) {
t.Errorf("unexpected managed ELB logical names: %s", strings.Join(c.APIEndpoints.ManagedELBLogicalNames(), ", "))
}
},
},
},
{
context: "WithNetworkTopologyExplicitSubnets",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
# routeTableId must be omitted
# See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-275962332
# routeTableId: rtb-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
private: true
- name: private2
availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
private: true
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.3.0/24"
- name: public2
availabilityZone: us-west-1b
instanceCIDR: "10.0.4.0/24"
controller:
subnets:
- name: private1
- name: private2
loadBalancer:
subnets:
- name: public1
- name: public2
private: false
etcd:
subnets:
- name: private1
- name: private2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
everyPublicSubnetHasRouteToIGW,
hasTwoManagedNGWsAndEIPs,
func(c *config.Config, t *testing.T) {
private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24")
private1.Name = "private1"
private2 := model.NewPrivateSubnet("us-west-1b", "10.0.2.0/24")
private2.Name = "private2"
public1 := model.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
public1.Name = "public1"
public2 := model.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
public2.Name = "public2"
subnets := model.Subnets{
private1,
private2,
public1,
public2,
}
if !reflect.DeepEqual(c.AllSubnets(), subnets) {
t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
}
publicSubnets := model.Subnets{
public1,
public2,
}
importedPublicSubnets := model.Subnets{
model.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${ControlPlaneStackName}-Public1"}}`),
model.NewPublicSubnetFromFn("us-west-1b", `{"Fn::ImportValue":{"Fn::Sub":"${ControlPlaneStackName}-Public2"}}`),
}
p := c.NodePools[0]
if !reflect.DeepEqual(p.Subnets, importedPublicSubnets) {
t.Errorf("Worker subnets didn't match: expected=%v actual=%v", importedPublicSubnets, p.Subnets)
}
privateSubnets := model.Subnets{
private1,
private2,
}
if !reflect.DeepEqual(c.Controller.Subnets, privateSubnets) {
t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets)
}
if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) {
t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", publicSubnets, c.Controller.LoadBalancer.Subnets)
}
if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) {
t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets)
}
for i, s := range c.PrivateSubnets() {
if !s.ManageNATGateway() {
t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i)
}
if s.ManageRouteToInternet() {
t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
}
}
},
},
},
{
context: "WithNetworkTopologyImplicitSubnets",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
# routeTableId must be omitted
# See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-275962332
# routeTableId: rtb-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
private: true
- name: private2
availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
private: true
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.3.0/24"
- name: public2
availabilityZone: us-west-1b
instanceCIDR: "10.0.4.0/24"
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
everyPublicSubnetHasRouteToIGW,
hasTwoManagedNGWsAndEIPs,
func(c *config.Config, t *testing.T) {
private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24")
private1.Name = "private1"
private2 := model.NewPrivateSubnet("us-west-1b", "10.0.2.0/24")
private2.Name = "private2"
public1 := model.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
public1.Name = "public1"
public2 := model.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
public2.Name = "public2"
subnets := model.Subnets{
private1,
private2,
public1,
public2,
}
if !reflect.DeepEqual(c.AllSubnets(), subnets) {
t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
}
publicSubnets := model.Subnets{
public1,
public2,
}
if !reflect.DeepEqual(c.Controller.Subnets, publicSubnets) {
t.Errorf("Controller subnets didn't match: expected=%v actual=%v", publicSubnets, c.Controller.Subnets)
}
if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) {
t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", publicSubnets, c.Controller.LoadBalancer.Subnets)
}
if !reflect.DeepEqual(c.Etcd.Subnets, publicSubnets) {
t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", publicSubnets, c.Etcd.Subnets)
}
for i, s := range c.PrivateSubnets() {
if !s.ManageNATGateway() {
t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i)
}
if s.ManageRouteToInternet() {
t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
}
}
},
},
},
{
context: "WithNetworkTopologyControllerPrivateLB",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
# routeTableId must be omitted
# See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-275962332
# routeTableId: rtb-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
private: true
- name: private2
availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
private: true
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.3.0/24"
- name: public2
availabilityZone: us-west-1b
instanceCIDR: "10.0.4.0/24"
controller:
subnets:
- name: private1
- name: private2
loadBalancer:
private: true
etcd:
subnets:
- name: private1
- name: private2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
everyPublicSubnetHasRouteToIGW,
hasTwoManagedNGWsAndEIPs,
func(c *config.Config, t *testing.T) {
private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24")
private1.Name = "private1"
private2 := model.NewPrivateSubnet("us-west-1b", "10.0.2.0/24")
private2.Name = "private2"
public1 := model.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
public1.Name = "public1"
public2 := model.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
public2.Name = "public2"
subnets := model.Subnets{
private1,
private2,
public1,
public2,
}
if !reflect.DeepEqual(c.AllSubnets(), subnets) {
t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
}
importedPublicSubnets := model.Subnets{
model.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${ControlPlaneStackName}-Public1"}}`),
model.NewPublicSubnetFromFn("us-west-1b", `{"Fn::ImportValue":{"Fn::Sub":"${ControlPlaneStackName}-Public2"}}`),
}
p := c.NodePools[0]
if !reflect.DeepEqual(p.Subnets, importedPublicSubnets) {
t.Errorf("Worker subnets didn't match: expected=%v actual=%v", importedPublicSubnets, p.Subnets)
}
privateSubnets := model.Subnets{
private1,
private2,
}
if !reflect.DeepEqual(c.Controller.Subnets, privateSubnets) {
t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets)
}
if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, privateSubnets) {
t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets)
}
if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) {
t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets)
}
for i, s := range c.PrivateSubnets() {
if !s.ManageNATGateway() {
t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i)
}
if s.ManageRouteToInternet() {
t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
}
}
},
},
},
{
context: "WithNetworkTopologyControllerPublicLB",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
# routeTableId must be omitted
# See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-275962332
# routeTableId: rtb-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
private: true
- name: private2
availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
private: true
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.3.0/24"
- name: public2
availabilityZone: us-west-1b
instanceCIDR: "10.0.4.0/24"
controller:
loadBalancer:
private: false
etcd:
subnets:
- name: private1
- name: private2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
everyPublicSubnetHasRouteToIGW,
hasTwoManagedNGWsAndEIPs,
func(c *config.Config, t *testing.T) {
private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24")
private1.Name = "private1"
private2 := model.NewPrivateSubnet("us-west-1b", "10.0.2.0/24")
private2.Name = "private2"
public1 := model.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
public1.Name = "public1"
public2 := model.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
public2.Name = "public2"
subnets := model.Subnets{
private1,
private2,
public1,
public2,
}
publicSubnets := model.Subnets{
public1,
public2,
}
privateSubnets := model.Subnets{
private1,
private2,
}
importedPublicSubnets := model.Subnets{
model.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${ControlPlaneStackName}-Public1"}}`),
model.NewPublicSubnetFromFn("us-west-1b", `{"Fn::ImportValue":{"Fn::Sub":"${ControlPlaneStackName}-Public2"}}`),
}
if !reflect.DeepEqual(c.AllSubnets(), subnets) {
t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
}
p := c.NodePools[0]
if !reflect.DeepEqual(p.Subnets, importedPublicSubnets) {
t.Errorf("Worker subnets didn't match: expected=%v actual=%v", importedPublicSubnets, p.Subnets)
}
if !reflect.DeepEqual(c.Controller.Subnets, publicSubnets) {
t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets)
}
if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) {
t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets)
}
if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) {
t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets)
}
for i, s := range c.PrivateSubnets() {
if !s.ManageNATGateway() {
t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i)
}
if s.ManageRouteToInternet() {
t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
}
}
},
},
},
{
context: "WithNetworkTopologyExistingVaryingSubnets",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
id: subnet-1
private: true
- name: private2
availabilityZone: us-west-1b
idFromStackOutput: mycluster-private-subnet-1
private: true
- name: public1
availabilityZone: us-west-1a
id: subnet-2
- name: public2
availabilityZone: us-west-1b
idFromStackOutput: mycluster-public-subnet-1
controller:
loadBalancer:
private: false
etcd:
subnets:
- name: private1
- name: private2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
hasNoNGWsOrEIPsOrRoutes,
func(c *config.Config, t *testing.T) {
private1 := model.NewExistingPrivateSubnet("us-west-1a", "subnet-1")
private1.Name = "private1"
private2 := model.NewImportedPrivateSubnet("us-west-1b", "mycluster-private-subnet-1")
private2.Name = "private2"
public1 := model.NewExistingPublicSubnet("us-west-1a", "subnet-2")
public1.Name = "public1"
public2 := model.NewImportedPublicSubnet("us-west-1b", "mycluster-public-subnet-1")
public2.Name = "public2"
subnets := model.Subnets{
private1,
private2,
public1,
public2,
}
publicSubnets := model.Subnets{
public1,
public2,
}
privateSubnets := model.Subnets{
private1,
private2,
}
if !reflect.DeepEqual(c.AllSubnets(), subnets) {
t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
}
p := c.NodePools[0]
if !reflect.DeepEqual(p.Subnets, publicSubnets) {
t.Errorf("Worker subnets didn't match: expected=%v actual=%v", publicSubnets, p.Subnets)
}
if !reflect.DeepEqual(c.Controller.Subnets, publicSubnets) {
t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets)
}
if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) {
t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets)
}
if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) {
t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets)
}
for i, s := range c.PrivateSubnets() {
if s.ManageNATGateway() {
t.Errorf("NAT gateway for the existing private subnet #%d should not be created by kube-aws", i)
}
if s.ManageRouteToInternet() {
t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
}
}
},
},
},
{
context: "WithNetworkTopologyAllExistingPrivateSubnets",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + fmt.Sprintf(`
vpc:
id: vpc-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
id: subnet-1
private: true
- name: private2
availabilityZone: us-west-1b
idFromStackOutput: mycluster-private-subnet-1
private: true
controller:
subnets:
- name: private1
- name: private2
etcd:
subnets:
- name: private1
- name: private2
worker:
nodePools:
- name: pool1
subnets:
- name: private1
- name: private2
apiEndpoints:
- name: public
dnsName: "%s"
loadBalancer:
hostedZone:
id: hostedzone-xxxx
private: true
`, kubeAwsSettings.externalDNSName),
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
hasNoNGWsOrEIPsOrRoutes,
},
},
{
context: "WithNetworkTopologyAllExistingPublicSubnets",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
subnets:
- name: public1
availabilityZone: us-west-1a
id: subnet-2
- name: public2
availabilityZone: us-west-1b
idFromStackOutput: mycluster-public-subnet-1
etcd:
subnets:
- name: public1
- name: public2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
hasNoNGWsOrEIPsOrRoutes,
},
},
{
context: "WithNetworkTopologyExistingNATGateways",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
private: true
natGateway:
id: ngw-11111111
- name: private2
availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
private: true
natGateway:
id: ngw-22222222
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.3.0/24"
- name: public2
availabilityZone: us-west-1b
instanceCIDR: "10.0.4.0/24"
etcd:
subnets:
- name: private1
- name: private2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
hasNoManagedNGWsButSpecificNumOfRoutesToUnmanagedNGWs(2),
func(c *config.Config, t *testing.T) {
private1 := model.NewPrivateSubnetWithPreconfiguredNATGateway("us-west-1a", "10.0.1.0/24", "ngw-11111111")
private1.Name = "private1"
private2 := model.NewPrivateSubnetWithPreconfiguredNATGateway("us-west-1b", "10.0.2.0/24", "ngw-22222222")
private2.Name = "private2"
public1 := model.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
public1.Name = "public1"
public2 := model.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
public2.Name = "public2"
subnets := model.Subnets{
private1,
private2,
public1,
public2,
}
publicSubnets := model.Subnets{
public1,
public2,
}
privateSubnets := model.Subnets{
private1,
private2,
}
importedPublicSubnets := model.Subnets{
model.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${ControlPlaneStackName}-Public1"}}`),
model.NewPublicSubnetFromFn("us-west-1b", `{"Fn::ImportValue":{"Fn::Sub":"${ControlPlaneStackName}-Public2"}}`),
}
if !reflect.DeepEqual(c.AllSubnets(), subnets) {
t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
}
p := c.NodePools[0]
if !reflect.DeepEqual(p.Subnets, importedPublicSubnets) {
t.Errorf("Worker subnets didn't match: expected=%v actual=%v", importedPublicSubnets, p.Subnets)
}
if !reflect.DeepEqual(c.Controller.Subnets, publicSubnets) {
t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets)
}
if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) {
t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets)
}
if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) {
t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets)
}
for i, s := range c.PrivateSubnets() {
if s.ManageNATGateway() {
t.Errorf("NAT gateway for the existing private subnet #%d should not be created by kube-aws", i)
}
if s.ManageRouteToInternet() {
t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
}
}
},
},
},
{
context: "WithNetworkTopologyExistingNATGatewayEIPs",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
private: true
natGateway:
eipAllocationId: eipalloc-11111111
- name: private2
availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
private: true
natGateway:
eipAllocationId: eipalloc-22222222
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.3.0/24"
- name: public2
availabilityZone: us-west-1b
instanceCIDR: "10.0.4.0/24"
etcd:
subnets:
- name: private1
- name: private2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
hasSpecificNumOfManagedNGWsWithUnmanagedEIPs(2),
hasPrivateSubnetsWithManagedNGWs(2),
func(c *config.Config, t *testing.T) {
private1 := model.NewPrivateSubnetWithPreconfiguredNATGatewayEIP("us-west-1a", "10.0.1.0/24", "eipalloc-11111111")
private1.Name = "private1"
private2 := model.NewPrivateSubnetWithPreconfiguredNATGatewayEIP("us-west-1b", "10.0.2.0/24", "eipalloc-22222222")
private2.Name = "private2"
public1 := model.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
public1.Name = "public1"
public2 := model.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
public2.Name = "public2"
subnets := model.Subnets{
private1,
private2,
public1,
public2,
}
publicSubnets := model.Subnets{
public1,
public2,
}
privateSubnets := model.Subnets{
private1,
private2,
}
importedPublicSubnets := model.Subnets{
model.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${ControlPlaneStackName}-Public1"}}`),
model.NewPublicSubnetFromFn("us-west-1b", `{"Fn::ImportValue":{"Fn::Sub":"${ControlPlaneStackName}-Public2"}}`),
}
if !reflect.DeepEqual(c.AllSubnets(), subnets) {
t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
}
p := c.NodePools[0]
if !reflect.DeepEqual(p.Subnets, importedPublicSubnets) {
t.Errorf("Worker subnets didn't match: expected=%+v actual=%+v", importedPublicSubnets, p.Subnets)
}
if !reflect.DeepEqual(c.Controller.Subnets, publicSubnets) {
t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets)
}
if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) {
t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets)
}
if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) {
t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets)
}
},
},
},
{
context: "WithNetworkTopologyVaryingPublicSubnets",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
#required only for the managed subnet "public1"
# "public2" is assumed to have an existing route table and an igw already associated to it
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
- name: public2
availabilityZone: us-west-1b
id: subnet-2
controller:
loadBalancer:
private: false
etcd:
subnets:
- name: public1
- name: public2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{},
},
{
context: "WithSpotFleetEnabled",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
spotFleet:
targetCapacity: 10
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
hasDefaultLaunchSpecifications,
spotFleetBasedNodePoolHasWaitSignalDisabled,
},
},
{
context: "WithSpotFleetEnabledWithCustomIamRole",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
spotFleet:
targetCapacity: 10
iamFleetRoleArn: custom-iam-role
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
hasDefaultLaunchSpecifications,
spotFleetBasedNodePoolHasWaitSignalDisabled,
},
},
{
context: "WithSpotFleetWithCustomGp2RootVolumeSettings",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
spotFleet:
targetCapacity: 10
unitRootVolumeSize: 40
launchSpecifications:
- weightedCapacity: 1
instanceType: c4.large
- weightedCapacity: 2
instanceType: c4.xlarge
rootVolume:
size: 100
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
spotFleetBasedNodePoolHasWaitSignalDisabled,
func(c *config.Config, t *testing.T) {
expected := []model.LaunchSpecification{
{
WeightedCapacity: 1,
InstanceType: "c4.large",
SpotPrice: "",
// RootVolumeSize was not specified in the configYaml but should default to workerRootVolumeSize * weightedCapacity
// RootVolumeType was not specified in the configYaml but should default to "gp2"
RootVolume: model.NewGp2RootVolume(40),
},
{
WeightedCapacity: 2,
InstanceType: "c4.xlarge",
SpotPrice: "",
RootVolume: model.NewGp2RootVolume(100),
},
}
p := c.NodePools[0]
actual := p.NodePoolConfig.SpotFleet.LaunchSpecifications
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"LaunchSpecifications didn't match: expected=%v actual=%v",
expected,
actual,
)
}
},
},
},
{
context: "WithSpotFleetWithCustomInstanceTypes",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
spotFleet:
targetCapacity: 10
unitRootVolumeSize: 40
launchSpecifications:
- weightedCapacity: 1
instanceType: m4.large
- weightedCapacity: 2
instanceType: m4.xlarge
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
spotFleetBasedNodePoolHasWaitSignalDisabled,
func(c *config.Config, t *testing.T) {
expected := []model.LaunchSpecification{
{
WeightedCapacity: 1,
InstanceType: "m4.large",
SpotPrice: "",
// RootVolumeType was not specified in the configYaml but should default to gp2:
RootVolume: model.NewGp2RootVolume(40),
},
{
WeightedCapacity: 2,
InstanceType: "m4.xlarge",
SpotPrice: "",
RootVolume: model.NewGp2RootVolume(80),
},
}
p := c.NodePools[0]
actual := p.NodePoolConfig.SpotFleet.LaunchSpecifications
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"LaunchSpecifications didn't match: expected=%v actual=%v",
expected,
actual,
)
}
},
},
},
{
context: "WithSpotFleetWithCustomIo1RootVolumeSettings",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
spotFleet:
targetCapacity: 10
rootVolumeType: io1
unitRootVolumeSize: 40
unitRootVolumeIOPS: 100
launchSpecifications:
- weightedCapacity: 1
instanceType: c4.large
- weightedCapacity: 2
instanceType: c4.xlarge
rootVolume:
iops: 500
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
spotFleetBasedNodePoolHasWaitSignalDisabled,
func(c *config.Config, t *testing.T) {
expected := []model.LaunchSpecification{
{
WeightedCapacity: 1,
InstanceType: "c4.large",
SpotPrice: "",
// RootVolumeSize was not specified in the configYaml but should default to workerRootVolumeSize * weightedCapacity
// RootVolumeIOPS was not specified in the configYaml but should default to workerRootVolumeIOPS * weightedCapacity
// RootVolumeType was not specified in the configYaml but should default to "io1"
RootVolume: model.NewIo1RootVolume(40, 100),
},
{
WeightedCapacity: 2,
InstanceType: "c4.xlarge",
SpotPrice: "",
// RootVolumeType was not specified in the configYaml but should default to:
RootVolume: model.NewIo1RootVolume(80, 500),
},
}
p := c.NodePools[0]
actual := p.NodePoolConfig.SpotFleet.LaunchSpecifications
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"LaunchSpecifications didn't match: expected=%v actual=%v",
expected,
actual,
)
}
},
},
},
{
context: "WithVpcIdSpecified",
configYaml: minimalValidConfigYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
vpcId := "vpc-1a2b3c4d"
if c.VPC.ID != vpcId {
t.Errorf("vpc id didn't match: expected=%v actual=%v", vpcId, c.VPC.ID)
}
igwId := "igw-1a2b3c4d"
if c.InternetGateway.ID != igwId {
t.Errorf("internet gateway id didn't match: expected=%v actual=%v", igwId, c.InternetGateway.ID)
}
},
},
},
{
context: "WithLegacyVpcAndIGWIdSpecified",
configYaml: minimalValidConfigYaml + `
vpcId: vpc-1a2b3c4d
internetGatewayId: igw-1a2b3c4d
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
vpcId := "vpc-1a2b3c4d"
if c.VPC.ID != vpcId {
t.Errorf("vpc id didn't match: expected=%v actual=%v", vpcId, c.VPC.ID)
}
igwId := "igw-1a2b3c4d"
if c.InternetGateway.ID != igwId {
t.Errorf("internet gateway id didn't match: expected=%v actual=%v", igwId, c.InternetGateway.ID)
}
},
},
},
{
context: "WithVpcIdAndRouteTableIdSpecified",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
subnets:
- name: Subnet0
availabilityZone: ` + firstAz + `
instanceCIDR: "10.0.0.0/24"
routeTable:
id: rtb-1a2b3c4d
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
subnet1 := model.NewPublicSubnetWithPreconfiguredRouteTable(firstAz, "10.0.0.0/24", "rtb-1a2b3c4d")
subnet1.Name = "Subnet0"
subnets := model.Subnets{
subnet1,
}
expected := controlplane_config.EtcdSettings{
Etcd: model.Etcd{
EC2Instance: model.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
RootVolume: model.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
Tenancy: "default",
},
DataVolume: model.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Subnets: subnets,
},
}
actual := c.EtcdSettings
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"EtcdSettings didn't match: expected=%v actual=%v",
expected,
actual,
)
}
},
},
},
{
context: "WithWorkerManagedIamRoleName",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
iam:
role:
name: "myManagedRole"
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
if c.NodePools[0].IAMConfig.Role.Name != "myManagedRole" {
t.Errorf("iam.role.name: expected=myManagedRole actual=%s", c.NodePools[0].IAMConfig.Role.Name)
}
},
},
},
{
context: "WithWorkerManagedPolicies",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
iam:
role:
managedPolicies:
- arn: "arn:aws:iam::aws:policy/AdministratorAccess"
- arn: "arn:aws:iam::000000000000:policy/myManagedPolicy"
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
if len(c.NodePools[0].IAMConfig.Role.ManagedPolicies) < 2 {
t.Errorf("iam.role.managedPolicies: incorrect number of policies expected=2 actual=%d", len(c.NodePools[0].IAMConfig.Role.ManagedPolicies))
}
if c.NodePools[0].IAMConfig.Role.ManagedPolicies[0].Arn != "arn:aws:iam::aws:policy/AdministratorAccess" {
t.Errorf("iam.role.managedPolicies: expected=arn:aws:iam::aws:policy/AdministratorAccess actual=%s", c.NodePools[0].IAMConfig.Role.ManagedPolicies[0].Arn)
}
if c.NodePools[0].IAMConfig.Role.ManagedPolicies[1].Arn != "arn:aws:iam::000000000000:policy/myManagedPolicy" {
t.Errorf("iam.role.managedPolicies: expected=arn:aws:iam::000000000000:policy/myManagedPolicy actual=%s", c.NodePools[0].IAMConfig.Role.ManagedPolicies[1].Arn)
}
},
},
},
{
context: "WithWorkerExistingInstanceProfile",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
iam:
instanceProfile:
arn: "arn:aws:iam::000000000000:instance-profile/myInstanceProfile"
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
if c.NodePools[0].IAMConfig.InstanceProfile.Arn != "arn:aws:iam::000000000000:instance-profile/myInstanceProfile" {
t.Errorf("existingInstanceProfile: expected=arn:aws:iam::000000000000:instance-profile/myInstanceProfile actual=%s", c.NodePools[0].IAMConfig.InstanceProfile.Arn)
}
},
},
},
{
context: "WithWorkerAndControllerExistingInstanceProfile",
configYaml: minimalValidConfigYaml + `
controller:
iam:
instanceProfile:
arn: "arn:aws:iam::000000000000:instance-profile/myControllerInstanceProfile"
worker:
nodePools:
- name: pool1
iam:
instanceProfile:
arn: "arn:aws:iam::000000000000:instance-profile/myInstanceProfile"
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
if c.Controller.IAMConfig.InstanceProfile.Arn != "arn:aws:iam::000000000000:instance-profile/myControllerInstanceProfile" {
t.Errorf("existingInstanceProfile: expected=arn:aws:iam::000000000000:instance-profile/myControllerInstanceProfile actual=%s", c.Controller.IAMConfig.InstanceProfile.Arn)
}
if c.NodePools[0].IAMConfig.InstanceProfile.Arn != "arn:aws:iam::000000000000:instance-profile/myInstanceProfile" {
t.Errorf("existingInstanceProfile: expected=arn:aws:iam::000000000000:instance-profile/myInstanceProfile actual=%s", c.NodePools[0].IAMConfig.InstanceProfile.Arn)
}
},
},
},
{
context: "WithWorkerSecurityGroupIds",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
securityGroupIds:
- sg-12345678
- sg-abcdefab
- sg-23456789
- sg-bcdefabc
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
p := c.NodePools[0]
expectedWorkerSecurityGroupIds := []string{
`sg-12345678`, `sg-abcdefab`, `sg-23456789`, `sg-bcdefabc`,
}
if !reflect.DeepEqual(p.SecurityGroupIds, expectedWorkerSecurityGroupIds) {
t.Errorf("WorkerSecurityGroupIds didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupIds, p.SecurityGroupIds)
}
expectedWorkerSecurityGroupRefs := []string{
`"sg-12345678"`, `"sg-abcdefab"`, `"sg-23456789"`, `"sg-bcdefabc"`,
`{"Fn::ImportValue" : {"Fn::Sub" : "${ControlPlaneStackName}-WorkerSecurityGroup"}}`,
}
if !reflect.DeepEqual(p.SecurityGroupRefs(), expectedWorkerSecurityGroupRefs) {
t.Errorf("SecurityGroupRefs didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupRefs, p.SecurityGroupRefs())
}
},
},
},
{
context: "WithWorkerAndLBSecurityGroupIds",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
securityGroupIds:
- sg-12345678
- sg-abcdefab
loadBalancer:
enabled: true
securityGroupIds:
- sg-23456789
- sg-bcdefabc
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
func(c *config.Config, t *testing.T) {
p := c.NodePools[0]
expectedWorkerSecurityGroupIds := []string{
`sg-12345678`, `sg-abcdefab`,
}
if !reflect.DeepEqual(p.SecurityGroupIds, expectedWorkerSecurityGroupIds) {
t.Errorf("WorkerSecurityGroupIds didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupIds, p.SecurityGroupIds)
}
expectedLBSecurityGroupIds := []string{
`sg-23456789`, `sg-bcdefabc`,
}
if !reflect.DeepEqual(p.LoadBalancer.SecurityGroupIds, expectedLBSecurityGroupIds) {
t.Errorf("LBSecurityGroupIds didn't match: expected=%v actual=%v", expectedLBSecurityGroupIds, p.LoadBalancer.SecurityGroupIds)
}
expectedWorkerSecurityGroupRefs := []string{
`"sg-23456789"`, `"sg-bcdefabc"`, `"sg-12345678"`, `"sg-abcdefab"`,
`{"Fn::ImportValue" : {"Fn::Sub" : "${ControlPlaneStackName}-WorkerSecurityGroup"}}`,
}
if !reflect.DeepEqual(p.SecurityGroupRefs(), expectedWorkerSecurityGroupRefs) {
t.Errorf("SecurityGroupRefs didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupRefs, p.SecurityGroupRefs())
}
},
},
},
{
context: "WithWorkerAndALBSecurityGroupIds",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
securityGroupIds:
- sg-12345678
- sg-abcdefab
targetGroup:
enabled: true
securityGroupIds:
- sg-23456789
- sg-bcdefabc
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
func(c *config.Config, t *testing.T) {
p := c.NodePools[0]
expectedWorkerSecurityGroupIds := []string{
`sg-12345678`, `sg-abcdefab`,
}
if !reflect.DeepEqual(p.SecurityGroupIds, expectedWorkerSecurityGroupIds) {
t.Errorf("WorkerSecurityGroupIds didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupIds, p.SecurityGroupIds)
}
expectedALBSecurityGroupIds := []string{
`sg-23456789`, `sg-bcdefabc`,
}
if !reflect.DeepEqual(p.TargetGroup.SecurityGroupIds, expectedALBSecurityGroupIds) {
t.Errorf("LBSecurityGroupIds didn't match: expected=%v actual=%v", expectedALBSecurityGroupIds, p.TargetGroup.SecurityGroupIds)
}
expectedWorkerSecurityGroupRefs := []string{
`"sg-23456789"`, `"sg-bcdefabc"`, `"sg-12345678"`, `"sg-abcdefab"`,
`{"Fn::ImportValue" : {"Fn::Sub" : "${ControlPlaneStackName}-WorkerSecurityGroup"}}`,
}
if !reflect.DeepEqual(p.SecurityGroupRefs(), expectedWorkerSecurityGroupRefs) {
t.Errorf("SecurityGroupRefs didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupRefs, p.SecurityGroupRefs())
}
},
},
},
{
context: "WithDedicatedInstanceTenancy",
configYaml: minimalValidConfigYaml + `
workerTenancy: dedicated
controller:
tenancy: dedicated
etcd:
tenancy: dedicated
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
if c.Etcd.Tenancy != "dedicated" {
t.Errorf("Etcd.Tenancy didn't match: expected=dedicated actual=%s", c.Etcd.Tenancy)
}
if c.WorkerTenancy != "dedicated" {
t.Errorf("WorkerTenancy didn't match: expected=dedicated actual=%s", c.WorkerTenancy)
}
if c.Controller.Tenancy != "dedicated" {
t.Errorf("Controller.Tenancy didn't match: expected=dedicated actual=%s", c.Controller.Tenancy)
}
},
},
},
{
context: "WithControllerNodeLabels",
configYaml: minimalValidConfigYaml + `
controller:
nodeLabels:
kube-aws.coreos.com/role: controller
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
expected := model.NodeLabels{"kube-aws.coreos.com/role": "controller"}
actual := c.NodeLabels()
if !reflect.DeepEqual(expected, actual) {
t.Errorf("unexpected controller node labels: expected=%v, actual=%v", expected, actual)
}
},
},
},
{
context: "WithSSHAccessAllowedSourceCIDRsSpecified",
configYaml: minimalValidConfigYaml + `
sshAccessAllowedSourceCIDRs:
- 1.2.3.255/32
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
l := len(c.SSHAccessAllowedSourceCIDRs)
if l != 1 {
t.Errorf("unexpected size of sshAccessAllowedSouceCIDRs: %d", l)
t.FailNow()
}
actual := c.SSHAccessAllowedSourceCIDRs[0].String()
expected := "1.2.3.255/32"
if actual != expected {
t.Errorf("unexpected cidr in sshAccessAllowedSourecCIDRs[0]. expected = %s, actual = %s", expected, actual)
}
},
},
},
{
context: "WithSSHAccessAllowedSourceCIDRsOmitted",
configYaml: minimalValidConfigYaml,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
l := len(c.SSHAccessAllowedSourceCIDRs)
if l != 1 {
t.Errorf("unexpected size of sshAccessAllowedSouceCIDRs: %d", l)
t.FailNow()
}
actual := c.SSHAccessAllowedSourceCIDRs[0].String()
expected := "0.0.0.0/0"
if actual != expected {
t.Errorf("unexpected cidr in sshAccessAllowedSourecCIDRs[0]. expected = %s, actual = %s", expected, actual)
}
},
},
},
{
context: "WithSSHAccessAllowedSourceCIDRsEmptied",
configYaml: minimalValidConfigYaml + `
sshAccessAllowedSourceCIDRs:
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
l := len(c.SSHAccessAllowedSourceCIDRs)
if l != 0 {
t.Errorf("unexpected size of sshAccessAllowedSouceCIDRs: %d", l)
t.FailNow()
}
},
},
},
{
context: "WithWorkerWithoutGPUSettings",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
enabled := c.NodePools[0].Gpu.Nvidia.Enabled
if enabled {
t.Errorf("unexpected enabled of gpu.nvidia: %v. its default value should be false", enabled)
t.FailNow()
}
},
},
},
{
context: "WithGPUEnabledWorker",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
instanceType: p2.xlarge
gpu:
nvidia:
enabled: true
version: "123.45"
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
enabled := c.NodePools[0].Gpu.Nvidia.Enabled
version := c.NodePools[0].Gpu.Nvidia.Version
if !enabled {
t.Errorf("unexpected enabled value of gpu.nvidia: %v.", enabled)
t.FailNow()
}
if version != "123.45" {
t.Errorf("unexpected version value of gpu.nvidia: %v.", version)
t.FailNow()
}
},
},
},
{
context: "WithGPUDisabledWorker",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
gpu:
nvidia:
enabled: false
version: "123.45"
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
enabled := c.NodePools[0].Gpu.Nvidia.Enabled
version := c.NodePools[0].Gpu.Nvidia.Version
if enabled {
t.Errorf("unexpected enabled value of gpu.nvidia: %v.", enabled)
t.FailNow()
}
if version != "123.45" {
t.Errorf("unexpected version value of gpu.nvidia: %v.", version)
t.FailNow()
}
},
},
},
}
for _, validCase := range validCases {
t.Run(validCase.context, func(t *testing.T) {
configBytes := validCase.configYaml
// TODO Allow including plugins in test data?
plugins := []*pluginmodel.Plugin{}
providedConfig, err := config.ConfigFromBytesWithEncryptService([]byte(configBytes), plugins, helper.DummyEncryptService{})
if err != nil {
t.Errorf("failed to parse config %s: %v", configBytes, err)
t.FailNow()
}
t.Run("AssertConfig", func(t *testing.T) {
for _, assertion := range validCase.assertConfig {
assertion(providedConfig, t)
}
})
helper.WithDummyCredentials(func(dummyAssetsDir string) {
var stackTemplateOptions = root.NewOptions(false, false)
stackTemplateOptions.AssetsDir = dummyAssetsDir
stackTemplateOptions.ControllerTmplFile = "../../core/controlplane/config/templates/cloud-config-controller"
stackTemplateOptions.WorkerTmplFile = "../../core/controlplane/config/templates/cloud-config-worker"
stackTemplateOptions.EtcdTmplFile = "../../core/controlplane/config/templates/cloud-config-etcd"
stackTemplateOptions.RootStackTemplateTmplFile = "../../core/root/config/templates/stack-template.json"
stackTemplateOptions.NodePoolStackTemplateTmplFile = "../../core/nodepool/config/templates/stack-template.json"
stackTemplateOptions.ControlPlaneStackTemplateTmplFile = "../../core/controlplane/config/templates/stack-template.json"
cluster, err := root.ClusterFromConfig(providedConfig, stackTemplateOptions, false)
if err != nil {
t.Errorf("failed to create cluster driver : %v", err)
t.FailNow()
}
t.Run("AssertCluster", func(t *testing.T) {
for _, assertion := range validCase.assertCluster {
assertion(cluster, t)
}
})
t.Run("ValidateTemplates", func(t *testing.T) {
if err := cluster.ValidateTemplates(); err != nil {
t.Errorf("failed to render stack template: %v", err)
}
})
if os.Getenv("KUBE_AWS_INTEGRATION_TEST") == "" {
t.Skipf("`export KUBE_AWS_INTEGRATION_TEST=1` is required to run integration tests. Skipping.")
t.SkipNow()
} else {
t.Run("ValidateStack", func(t *testing.T) {
if !s3URIExists {
t.Errorf("failed to obtain value for KUBE_AWS_S3_DIR_URI")
t.FailNow()
}
report, err := cluster.ValidateStack()
if err != nil {
t.Errorf("failed to validate stack: %s %v", report, err)
}
})
}
})
})
}
parseErrorCases := []struct {
context string
configYaml string
expectedErrorMessage string
}{
{
context: "WithAPIEndpointLBAPIAccessAllowedSourceCIDRsEmptied",
configYaml: configYamlWithoutExernalDNSName + `
apiEndpoints:
- name: default
dnsName: k8s.example.com
loadBalancer:
apiAccessAllowedSourceCIDRs:
hostedZone:
id: a1b2c4
`,
expectedErrorMessage: `invalid cluster: invalid apiEndpoint "default" at index 0: invalid loadBalancer: either apiAccessAllowedSourceCIDRs or securityGroupIds must be present. Try not to explicitly empty apiAccessAllowedSourceCIDRs or set one or more securityGroupIDs`,
},
{
context: "WithAutoscalingEnabledButClusterAutoscalerIsDefault",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
autoscaling:
clusterAutoscaler:
enabled: true
`,
expectedErrorMessage: "Autoscaling with cluster-autoscaler can't be enabled for node pools because " +
"you didn't enabled the cluster-autoscaler addon. Enable it by turning on `addons.clusterAutoscaler.enabled`",
},
{
context: "WithAutoscalingEnabledButClusterAutoscalerIsNot",
configYaml: minimalValidConfigYaml + `
addons:
clusterAutoscaler:
enabled: false
worker:
nodePools:
- name: pool1
autoscaling:
clusterAutoscaler:
enabled: true
`,
expectedErrorMessage: "Autoscaling with cluster-autoscaler can't be enabled for node pools because " +
"you didn't enabled the cluster-autoscaler addon. Enable it by turning on `addons.clusterAutoscaler.enabled`",
},
{
context: "WithClusterAutoscalerEnabledForControlPlane",
configYaml: minimalValidConfigYaml + `
controller:
autoscaling:
clusterAutoscaler:
enabled: true
`,
expectedErrorMessage: "cluster-autoscaler can't be enabled for a control plane because " +
"allowing so for a group of controller nodes spreading over 2 or more availability zones " +
"results in unreliability while scaling nodes out.",
},
{
// See https://github.com/kubernetes-incubator/kube-aws/issues/365
context: "WithClusterNameContainsDots",
configYaml: kubeAwsSettings.withClusterName("my.cluster").minimumValidClusterYaml(),
expectedErrorMessage: "clusterName(=my.cluster) is malformed. It must consist only of alphanumeric characters, colons, or hyphens",
},
{
context: "WithControllerTaint",
configYaml: minimalValidConfigYaml + `
controller:
taints:
- key: foo
value: bar
effect: NoSchedule
`,
expectedErrorMessage: "`controller.taints` must not be specified because tainting controller nodes breaks the cluster",
},
{
context: "WithElasticFileSystemIdInSpecificNodePoolWithManagedSubnets",
configYaml: mainClusterYaml + `
subnets:
- name: managed1
availabilityZone: us-west-1a
instanceCIDR: 10.0.1.0/24
worker:
nodePools:
- name: pool1
subnets:
- name: managed1
elasticFileSystemId: efs-12345
- name: pool2
`,
expectedErrorMessage: "invalid node pool at index 0: elasticFileSystemId cannot be specified for a node pool in managed subnet(s), but was: efs-12345",
},
{
context: "WithEtcdAutomatedDisasterRecoveryRequiresAutomatedSnapshot",
configYaml: minimalValidConfigYaml + `
etcd:
version: 3
snapshot:
automated: false
disasterRecovery:
automated: true
`,
expectedErrorMessage: "`etcd.disasterRecovery.automated` is set to true but `etcd.snapshot.automated` is not - automated disaster recovery requires snapshot to be also automated",
},
{
context: "WithEtcdAutomatedDisasterRecoveryDoesntSupportEtcd2",
configYaml: minimalValidConfigYaml + `
etcd:
version: 2
snapshot:
automated: true
disasterRecovery:
automated: false
`,
expectedErrorMessage: "`etcd.snapshot.automated` is set to true for enabling automated snapshot. However the feature is available only for etcd version 3",
},
{
context: "WithEtcdAutomatedSnapshotDoesntSupportEtcd2",
configYaml: minimalValidConfigYaml + `
etcd:
version: 2
snapshot:
automated: false
disasterRecovery:
automated: true
`,
expectedErrorMessage: "`etcd.disasterRecovery.automated` is set to true for enabling automated disaster recovery. However the feature is available only for etcd version 3",
},
{
context: "WithInvalidNodeDrainTimeout",
configYaml: minimalValidConfigYaml + `
experimental:
nodeDrainer:
enabled: true
drainTimeout: 100
`,
expectedErrorMessage: "Drain timeout must be an integer between 1 and 60, but was 100",
},
{
context: "WithInvalidTaint",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
taints:
- key: foo
value: bar
effect: UnknownEffect
`,
expectedErrorMessage: "invalid taint effect: UnknownEffect",
},
{
context: "WithLegacyControllerSettingKeys",
configYaml: minimalValidConfigYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
routeTableId: rtb-1a2b3c4d
controllerCount: 2
controllerCreateTimeout: PT10M
controllerInstanceType: t2.large
controllerRootVolumeSize: 101
controllerRootVolumeType: io1
controllerRootVolumeIOPS: 102
controllerTenancy: dedicated
`,
expectedErrorMessage: "unknown keys found: controllerCount, controllerCreateTimeout, controllerInstanceType, controllerRootVolumeIOPS, controllerRootVolumeSize, controllerRootVolumeType, controllerTenancy",
},
{
context: "WithLegacyEtcdSettingKeys",
configYaml: minimalValidConfigYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
routeTableId: rtb-1a2b3c4d
etcdCount: 2
etcdTenancy: dedicated
etcdInstanceType: t2.large
etcdRootVolumeSize: 101
etcdRootVolumeType: io1
etcdRootVolumeIOPS: 102
etcdDataVolumeSize: 103
etcdDataVolumeType: io1
etcdDataVolumeIOPS: 104
etcdDataVolumeEncrypted: true
`,
expectedErrorMessage: "unknown keys found: etcdCount, etcdDataVolumeEncrypted, etcdDataVolumeIOPS, etcdDataVolumeSize, etcdDataVolumeType, etcdInstanceType, etcdRootVolumeIOPS, etcdRootVolumeSize, etcdRootVolumeType, etcdTenancy",
},
{
context: "WithAwsNodeLabelEnabledForTooLongClusterNameAndPoolName",
configYaml: minimalValidConfigYaml + `
# clusterName + nodePools[].name should be less than or equal to 25 characters or the launch configuration name
# "mykubeawsclustername-mynestedstackname-1N2C4K3LLBEDZ-WorkersLC-BC2S9P3JG2QD" exceeds the limit of 63 characters
# See https://kubernetes.io/docs/user-guide/labels/#syntax-and-character-set
clusterName: my-cluster1 # 11 characters
worker:
nodePools:
- name: workernodepool1 # 15 characters
awsNodeLabels:
enabled: true
`,
expectedErrorMessage: "awsNodeLabels can't be enabled for node pool because the total number of characters in clusterName(=\"my-cluster1\") + node pool's name(=\"workernodepool1\") exceeds the limit of 25",
},
{
context: "WithAwsNodeLabelEnabledForTooLongClusterName",
configYaml: minimalValidConfigYaml + `
# clusterName should be less than or equal to 21 characters or the launch configuration name
# "mykubeawsclustername-mynestedstackname-1N2C4K3LLBEDZ-ControllersLC-BC2S9P3JG2QD" exceeds the limit of 63 characters
# See https://kubernetes.io/docs/user-guide/labels/#syntax-and-character-set
clusterName: mycluster # 9
experimental:
awsNodeLabels:
enabled: true
`,
expectedErrorMessage: "awsNodeLabels can't be enabled for controllers because the total number of characters in clusterName(=\"mycluster\") exceeds the limit of 8",
},
{
context: "WithMultiAPIEndpointsInvalidLB",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
worker:
apiEndpointName: unversionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
id: elb-internet-facing
private: true
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
`,
expectedErrorMessage: "invalid apiEndpoint \"unversionedPublic\" at index 0: invalid loadBalancer: type, private, subnets, hostedZone must be omitted when id is specified to reuse an existing ELB",
},
{
context: "WithMultiAPIEndpointsInvalidWorkerAPIEndpointName",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
worker:
# no api endpoint named like that exists!
apiEndpointName: unknownEndpoint
adminAPIEndpointName: versionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
- name: versionedPublic
dnsName: apiv1.example.com
loadBalancer:
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
`,
expectedErrorMessage: "invalid value for worker.apiEndpointName: no API endpoint named \"unknownEndpoint\" found",
},
{
context: "WithMultiAPIEndpointsInvalidWorkerNodePoolAPIEndpointName",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
worker:
# this one is ok but...
apiEndpointName: versionedPublic
nodePools:
- name: pool1
# this one is ng; no api endpoint named this exists!
apiEndpointName: unknownEndpoint
adminAPIEndpointName: versionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
- name: versionedPublic
dnsName: apiv1.example.com
loadBalancer:
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
`,
expectedErrorMessage: "invalid node pool at index 0: failed to find an API endpoint named \"unknownEndpoint\": no API endpoint named \"unknownEndpoint\" defined under the `apiEndpoints[]`",
},
{
context: "WithMultiAPIEndpointsMissingDNSName",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
apiEndpoints:
- name: unversionedPublic
dnsName:
loadBalancer:
hostedZone:
id: hostedzone-public
`,
expectedErrorMessage: "invalid apiEndpoint \"unversionedPublic\" at index 0: dnsName must be set",
},
{
context: "WithMultiAPIEndpointsMissingGlobalAPIEndpointName",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
worker:
nodePools:
- name: pool1
# this one is ng; no api endpoint named this exists!
apiEndpointName: unknownEndpoint
- name: pool1
# this one is ng; missing apiEndpointName
adminAPIEndpointName: versionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
- name: versionedPublic
dnsName: apiv1.example.com
loadBalancer:
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
`,
expectedErrorMessage: "worker.apiEndpointName must not be empty when there're 2 or more API endpoints under the key `apiEndpoints` and one of worker.nodePools[] are missing apiEndpointName",
},
{
context: "WithMultiAPIEndpointsRecordSetImpliedBySubnetsMissingHostedZoneID",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
worker:
apiEndpointName: unversionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
# an internet-facing(which is the default) lb in the public subnet is going to be created with a corresponding record set
# however no hosted zone for the record set is provided!
subnets:
- name: publicSubnet1
# missing hosted zone id here!
`,
expectedErrorMessage: "invalid apiEndpoint \"unversionedPublic\" at index 0: invalid loadBalancer: missing hostedZone.id",
},
{
context: "WithMultiAPIEndpointsRecordSetImpliedByExplicitPublicMissingHostedZoneID",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
worker:
apiEndpointName: unversionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
# an internet-facing lb is going to be created with a corresponding record set
# however no hosted zone for the record set is provided!
private: false
# missing hosted zone id here!
`,
expectedErrorMessage: "invalid apiEndpoint \"unversionedPublic\" at index 0: invalid loadBalancer: missing hostedZone.id",
},
{
context: "WithMultiAPIEndpointsRecordSetImpliedByExplicitPrivateMissingHostedZoneID",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
- name: privateSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.2.0/24"
worker:
apiEndpointName: unversionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
# an internal lb is going to be created with a corresponding record set
# however no hosted zone for the record set is provided!
private: true
# missing hosted zone id here!
`,
expectedErrorMessage: "invalid apiEndpoint \"unversionedPublic\" at index 0: invalid loadBalancer: missing hostedZone.id",
},
{
context: "WithNetworkTopologyAllExistingPrivateSubnetsRejectingExistingIGW",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
id: subnet-1
private: true
controller:
loadBalancer:
private: true
etcd:
subnets:
- name: private1
worker:
nodePools:
- name: pool1
subnets:
- name: private1
`,
expectedErrorMessage: `internet gateway id can't be specified when all the subnets are existing private subnets`,
},
{
context: "WithNetworkTopologyAllExistingPublicSubnetsRejectingExistingIGW",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: public1
availabilityZone: us-west-1a
id: subnet-1
controller:
loadBalancer:
private: false
etcd:
subnets:
- name: public1
worker:
nodePools:
- name: pool1
subnets:
- name: public1
`,
expectedErrorMessage: `internet gateway id can't be specified when all the public subnets have existing route tables associated. kube-aws doesn't try to modify an exisinting route table to include a route to the internet gateway`,
},
{
context: "WithNetworkTopologyAllManagedPublicSubnetsWithExistingRouteTableRejectingExistingIGW",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: public1
availabilityZone: us-west-1a
instanceCIDR: 10.0.1.0/24
routeTable:
id: subnet-1
controller:
loadBalancer:
private: false
etcd:
subnets:
- name: public1
worker:
nodePools:
- name: pool1
subnets:
- name: public1
`,
expectedErrorMessage: `internet gateway id can't be specified when all the public subnets have existing route tables associated. kube-aws doesn't try to modify an exisinting route table to include a route to the internet gateway`,
},
{
context: "WithNetworkTopologyAllManagedPublicSubnetsMissingExistingIGW",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
#misses this
#internetGateway:
# id: igw-1a2b3c4d
subnets:
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
controller:
loadBalancer:
private: false
etcd:
subnets:
- name: public1
worker:
nodePools:
- name: pool1
subnets:
- name: public1
`,
expectedErrorMessage: `internet gateway id can't be omitted when there're one or more managed public subnets in an existing VPC`,
},
{
context: "WithNetworkTopologyAllPreconfiguredPrivateDeprecatedAndThenRemoved",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
# This, in combination with mapPublicIPs=false, had been implying that the route table contains a route to a preconfigured NAT gateway
# See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-276008202
routeTableId: rtb-1a2b3c4d
# This had been implied that all the subnets created by kube-aws should be private
mapPublicIPs: false
subnets:
- availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
# implies
# private: true
# routeTable
# id: rtb-1a2b3c4d
- availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
# implies
# private: true
# routeTable
# id: rtb-1a2b3c4d
`,
expectedErrorMessage: "internet gateway id can't be omitted when there're one or more managed public subnets in an existing VPC",
},
{
context: "WithNetworkTopologyAllPreconfiguredPublicDeprecatedAndThenRemoved",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
# This, in combination with mapPublicIPs=true, had been implying that the route table contains a route to a preconfigured internet gateway
# See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-276008202
routeTableId: rtb-1a2b3c4d
# This had been implied that all the subnets created by kube-aws should be public
mapPublicIPs: true
# internetGateway.id should be omitted as we assume that the route table specified by routeTableId already contain a route to one
#internetGateway:
# id:
subnets:
- availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
# #implies
# private: false
# routeTable
# id: rtb-1a2b3c4d
- availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
# #implies
# private: false
# routeTable
# id: rtb-1a2b3c4d
`,
expectedErrorMessage: "internet gateway id can't be omitted when there're one or more managed public subnets in an existing VPC",
},
{
context: "WithVpcIdAndVPCCIDRSpecified",
configYaml: minimalValidConfigYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
# vpcCIDR (10.1.0.0/16) does not contain instanceCIDR (10.0.1.0/24)
vpcCIDR: "10.1.0.0/16"
`,
},
{
context: "WithRouteTableIdSpecified",
configYaml: minimalValidConfigYaml + `
# vpc.id must be specified if routeTableId is specified
routeTableId: rtb-1a2b3c4d
`,
},
{
context: "WithWorkerSecurityGroupIds",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
securityGroupIds:
- sg-12345678
- sg-abcdefab
- sg-23456789
- sg-bcdefabc
- sg-34567890
`,
expectedErrorMessage: "number of user provided security groups must be less than or equal to 4 but was 5",
},
{
context: "WithWorkerAndLBSecurityGroupIds",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
securityGroupIds:
- sg-12345678
- sg-abcdefab
- sg-23456789
loadBalancer:
enabled: true
securityGroupIds:
- sg-bcdefabc
- sg-34567890
`,
expectedErrorMessage: "number of user provided security groups must be less than or equal to 4 but was 5",
},
{
context: "WithWorkerAndALBSecurityGroupIds",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
securityGroupIds:
- sg-12345678
- sg-abcdefab
- sg-23456789
targetGroup:
enabled: true
securityGroupIds:
- sg-bcdefabc
- sg-34567890
`,
expectedErrorMessage: "number of user provided security groups must be less than or equal to 4 but was 5",
},
{
context: "WithUnknownKeyInRoot",
configYaml: minimalValidConfigYaml + `
foo: bar
`,
expectedErrorMessage: "unknown keys found: foo",
},
{
context: "WithUnknownKeyInController",
configYaml: minimalValidConfigYaml + `
controller:
foo: 1
`,
expectedErrorMessage: "unknown keys found in controller: foo",
},
{
context: "WithUnknownKeyInControllerASG",
configYaml: minimalValidConfigYaml + `
controller:
autoScalingGroup:
foo: 1
`,
expectedErrorMessage: "unknown keys found in controller.autoScalingGroup: foo",
},
{
context: "WithUnknownKeyInEtcd",
configYaml: minimalValidConfigYaml + `
etcd:
foo: 1
`,
expectedErrorMessage: "unknown keys found in etcd: foo",
},
{
context: "WithUnknownKeyInWorkerNodePool",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
clusterAutoscaler:
enabled: true
`,
expectedErrorMessage: "unknown keys found in worker.nodePools[0]: clusterAutoscaler",
},
{
context: "WithUnknownKeyInWorkerNodePoolASG",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
autoScalingGroup:
foo: 1
`,
expectedErrorMessage: "unknown keys found in worker.nodePools[0].autoScalingGroup: foo",
},
{
context: "WithUnknownKeyInWorkerNodePoolSpotFleet",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
spotFleet:
bar: 1
`,
expectedErrorMessage: "unknown keys found in worker.nodePools[0].spotFleet: bar",
},
{
context: "WithUnknownKeyInWorkerNodePoolCA",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
autoscaling:
clusterAutoscaler:
baz: 1
`,
expectedErrorMessage: "unknown keys found in worker.nodePools[0].autoscaling.clusterAutoscaler: baz",
},
{
context: "WithUnknownKeyInAddons",
configYaml: minimalValidConfigYaml + `
addons:
blah: 5
`,
expectedErrorMessage: "unknown keys found in addons: blah",
},
{
context: "WithUnknownKeyInReschedulerAddon",
configYaml: minimalValidConfigYaml + `
addons:
rescheduler:
foo: yeah
`,
expectedErrorMessage: "unknown keys found in addons.rescheduler: foo",
},
{
context: "WithUnknownKeyInClusterAutoscalerAddon",
configYaml: minimalValidConfigYaml + `
addons:
clusterAutoscaler:
foo: yeah
`,
expectedErrorMessage: "unknown keys found in addons.clusterAutoscaler: foo",
},
{
context: "WithTooLongControllerIAMRoleName",
configYaml: kubeAwsSettings.withClusterName("kubeaws-it-main").withRegion("ap-northeast-1").minimumValidClusterYaml() + `
controller:
iam:
role:
name: foobarba-foobarba-foobarba-foobarba-foobarba-foobarba
`,
expectedErrorMessage: "IAM role name(=ap-northeast-1-foobarba-foobarba-foobarba-foobarba-foobarba-foobarba) will be 68 characters long. It exceeds the AWS limit of 64 characters: region name(=ap-northeast-1) + managed iam role name(=foobarba-foobarba-foobarba-foobarba-foobarba-foobarba) should be less than or equal to 49",
},
{
context: "WithTooLongWorkerIAMRoleName",
configYaml: kubeAwsSettings.withClusterName("kubeaws-it-main").withRegion("ap-northeast-1").minimumValidClusterYaml() + `
worker:
nodePools:
- name: pool1
iam:
role:
name: foobarba-foobarba-foobarba-foobarba-foobarba-foobarbazzz
`,
expectedErrorMessage: "IAM role name(=ap-northeast-1-foobarba-foobarba-foobarba-foobarba-foobarba-foobarbazzz) will be 71 characters long. It exceeds the AWS limit of 64 characters: region name(=ap-northeast-1) + managed iam role name(=foobarba-foobarba-foobarba-foobarba-foobarba-foobarbazzz) should be less than or equal to 49",
},
{
context: "WithInvalidEtcdInstanceProfileArn",
configYaml: minimalValidConfigYaml + `
etcd:
iam:
instanceProfile:
arn: "badArn"
`,
expectedErrorMessage: "invalid etcd settings: invalid instance profile, your instance profile must match (=arn:aws:iam::YOURACCOUNTID:instance-profile/INSTANCEPROFILENAME), provided (badArn)",
},
{
context: "WithInvalidEtcdManagedPolicyArn",
configYaml: minimalValidConfigYaml + `
etcd:
iam:
role:
managedPolicies:
- arn: "badArn"
`,
expectedErrorMessage: "invalid etcd settings: invalid managed policy arn, your managed policy must match this (=arn:aws:iam::(YOURACCOUNTID|aws):policy/POLICYNAME), provided this (badArn)",
},
{
context: "WithInvalidWorkerInstanceProfileArn",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
iam:
instanceProfile:
arn: "badArn"
`,
expectedErrorMessage: "invalid instance profile, your instance profile must match (=arn:aws:iam::YOURACCOUNTID:instance-profile/INSTANCEPROFILENAME), provided (badArn)",
},
{
context: "WithInvalidWorkerManagedPolicyArn",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
iam:
role:
managedPolicies:
- arn: "badArn"
`,
expectedErrorMessage: "invalid managed policy arn, your managed policy must match this (=arn:aws:iam::(YOURACCOUNTID|aws):policy/POLICYNAME), provided this (badArn)",
},
{
context: "WithGPUEnabledWorkerButEmptyVersion",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
instanceType: p2.xlarge
gpu:
nvidia:
enabled: true
version: ""
`,
expectedErrorMessage: `gpu.nvidia.version must not be empty when gpu.nvidia is enabled.`,
},
{
context: "WithGPUDisabledWorkerButIntallationSupportEnabled",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
instanceType: t2.medium
gpu:
nvidia:
enabled: true
version: ""
`,
expectedErrorMessage: `instance type t2.medium doesn't support GPU. You can enable Nvidia driver intallation support only when use [p2 p3 g2 g3] instance family.`,
},
}
for _, invalidCase := range parseErrorCases {
t.Run(invalidCase.context, func(t *testing.T) {
configBytes := invalidCase.configYaml
// TODO Allow including plugins in test data?
plugins := []*pluginmodel.Plugin{}
providedConfig, err := config.ConfigFromBytes([]byte(configBytes), plugins)
if err == nil {
t.Errorf("expected to fail parsing config %s: %+v", configBytes, *providedConfig)
t.FailNow()
}
errorMsg := fmt.Sprintf("%v", err)
if !strings.Contains(errorMsg, invalidCase.expectedErrorMessage) {
t.Errorf(`expected "%s" to be contained in the error message : %s`, invalidCase.expectedErrorMessage, errorMsg)
}
})
}
}
|
[
"\"KUBE_AWS_INTEGRATION_TEST\""
] |
[] |
[
"KUBE_AWS_INTEGRATION_TEST"
] |
[]
|
["KUBE_AWS_INTEGRATION_TEST"]
|
go
| 1 | 0 | |
meson/post_install.py
|
#!/usr/bin/env python3
import os
import subprocess
prefix = os.environ.get('MESON_INSTALL_PREFIX', '/usr')
datadir = os.path.join(prefix, 'share')
# Packaging tools define DESTDIR and this isn't needed for them
if 'DESTDIR' not in os.environ:
print('Compiling gsettings schemas...')
schema_dir = os.path.join(datadir, 'glib-2.0/schemas')
subprocess.call(['glib-compile-schemas', schema_dir])
print('Updating icon cache...')
icon_cache_dir = os.path.join(datadir, 'icons')
subprocess.call(['gtk-update-icon-cache', '-qtf', icon_cache_dir])
print('Updating desktop database...')
desktop_database_dir = os.path.join(datadir, 'applications')
subprocess.call(['update-desktop-database', '-q', desktop_database_dir])
|
[] |
[] |
[
"MESON_INSTALL_PREFIX"
] |
[]
|
["MESON_INSTALL_PREFIX"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"os"
)
func handler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
var name string
name = os.Getenv("NAME")
return events.APIGatewayProxyResponse{
StatusCode: 200,
Body: name,
}, nil
}
func main() {
// Make the handler available for Remote Procedure Call by AWS Lambda
lambda.Start(handler)
}
|
[
"\"NAME\""
] |
[] |
[
"NAME"
] |
[]
|
["NAME"]
|
go
| 1 | 0 | |
flexget/plugin.py
|
""" Plugin Loading & Management.
"""
from __future__ import absolute_import, division, unicode_literals
import logging
import os
import re
import time
import warnings
from itertools import ifilter
from path import Path
from requests import RequestException
from flexget import plugins as plugins_pkg
from flexget import config_schema
from flexget.event import add_event_handler as add_phase_handler
from flexget.event import fire_event, remove_event_handlers
log = logging.getLogger('plugin')
class DependencyError(Exception):
"""Plugin depends on other plugin, but it cannot be loaded.
Args:
issued_by: name of the plugin trying to do the import
missing: name of the plugin or library that is missing
message: user readable error message
All args are optional.
"""
def __init__(self, issued_by=None, missing=None, message=None, silent=False):
super(DependencyError, self).__init__()
self.issued_by = issued_by
self.missing = missing
self._message = message
self.silent = silent
def _get_message(self):
if self._message:
return self._message
else:
return 'Plugin `%s` requires dependency `%s`' % (self.issued_by, self.missing)
def _set_message(self, message):
self._message = message
def has_message(self):
return self._message is not None
message = property(_get_message, _set_message)
def __str__(self):
return '<DependencyError(issued_by=%r,missing=%r,message=%r,silent=%r)>' % \
(self.issued_by, self.missing, self.message, self.silent)
class RegisterException(Exception):
def __init__(self, value):
super(RegisterException, self).__init__()
self.value = value
def __str__(self):
return repr(self.value)
class PluginWarning(Warning):
def __init__(self, value, logger=log, **kwargs):
super(PluginWarning, self).__init__()
self.value = value
self.log = logger
self.kwargs = kwargs
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.value
class PluginError(Exception):
def __init__(self, value, logger=log, **kwargs):
super(PluginError, self).__init__()
# Value is expected to be a string
if not isinstance(value, basestring):
value = unicode(value)
self.value = value
self.log = logger
self.kwargs = kwargs
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return unicode(self.value)
# TODO: move to utils or somewhere more appropriate
class internet(object):
"""@internet decorator for plugin phase methods.
Catches all internet related exceptions and raises PluginError with relevant message.
Task handles PluginErrors by aborting the task.
"""
def __init__(self, logger=None):
if logger:
self.log = logger
else:
self.log = logging.getLogger('@internet')
def __call__(self, func):
def wrapped_func(*args, **kwargs):
from httplib import BadStatusLine
import urllib2
try:
return func(*args, **kwargs)
except RequestException as e:
log.debug('decorator caught RequestException. handled traceback:', exc_info=True)
raise PluginError('RequestException: %s' % e)
except urllib2.HTTPError as e:
raise PluginError('HTTPError %s' % e.code, self.log)
except urllib2.URLError as e:
log.debug('decorator caught urlerror. handled traceback:', exc_info=True)
raise PluginError('URLError %s' % e.reason, self.log)
except BadStatusLine:
log.debug('decorator caught badstatusline. handled traceback:', exc_info=True)
raise PluginError('Got BadStatusLine', self.log)
except ValueError as e:
log.debug('decorator caught ValueError. handled traceback:', exc_info=True)
raise PluginError(e)
except IOError as e:
log.debug('decorator caught ioerror. handled traceback:', exc_info=True)
if hasattr(e, 'reason'):
raise PluginError('Failed to reach server. Reason: %s' % e.reason, self.log)
elif hasattr(e, 'code'):
raise PluginError('The server couldn\'t fulfill the request. Error code: %s' % e.code, self.log)
raise PluginError('IOError when connecting to server: %s' % e, self.log)
return wrapped_func
def priority(value):
"""Priority decorator for phase methods"""
def decorator(target):
target.priority = value
return target
return decorator
DEFAULT_PRIORITY = 128
plugin_contexts = ['task', 'root']
# task phases, in order of their execution; note that this can be extended by
# registering new phases at runtime
task_phases = ['start', 'input', 'metainfo', 'filter', 'download', 'modify', 'output', 'learn', 'exit']
# map phase names to method names
phase_methods = {
# task
'abort': 'on_task_abort' # special; not a task phase that gets called normally
}
phase_methods.update((_phase, 'on_task_' + _phase) for _phase in task_phases) # DRY
# Mapping of plugin name to PluginInfo instance (logical singletons)
plugins = {}
# Loading done?
plugins_loaded = False
_loaded_plugins = {}
_plugin_options = []
_new_phase_queue = {}
def register_task_phase(name, before=None, after=None):
"""Adds a new task phase to the available phases."""
if before and after:
raise RegisterException('You can only give either before or after for a phase.')
if not before and not after:
raise RegisterException('You must specify either a before or after phase.')
if name in task_phases or name in _new_phase_queue:
raise RegisterException('Phase %s already exists.' % name)
def add_phase(phase_name, before, after):
if before is not None and before not in task_phases:
return False
if after is not None and after not in task_phases:
return False
# add method name to phase -> method lookup table
phase_methods[phase_name] = 'on_task_' + phase_name
# place phase in phase list
if before is None:
task_phases.insert(task_phases.index(after) + 1, phase_name)
if after is None:
task_phases.insert(task_phases.index(before), phase_name)
return True
# if can't add yet (dependencies) queue addition
if not add_phase(name, before, after):
_new_phase_queue[name] = [before, after]
for phase_name, args in _new_phase_queue.items():
if add_phase(phase_name, *args):
del _new_phase_queue[phase_name]
class PluginInfo(dict):
"""
Allows accessing key/value pairs of this dictionary subclass via
attributes. Also instantiates a plugin and initializes properties.
"""
# Counts duplicate registrations
dupe_counter = 0
def __init__(self, plugin_class, name=None, groups=None, builtin=False, debug=False, api_ver=1,
contexts=None, category=None):
"""
Register a plugin.
:param plugin_class: The plugin factory.
:param string name: Name of the plugin (if not given, default to factory class name in underscore form).
:param list groups: Groups this plugin belongs to.
:param bool builtin: Auto-activated?
:param bool debug: True if plugin is for debugging purposes.
:param int api_ver: Signature of callback hooks (1=task; 2=task,config).
:param list contexts: List of where this plugin is configurable. Can be 'task', 'root', or None
:param string category: The type of plugin. Can be one of the task phases.
Defaults to the package name containing the plugin.
"""
dict.__init__(self)
if groups is None:
groups = []
if name is None:
# Convention is to take camel-case class name and rewrite it to an underscore form,
# e.g. 'PluginName' to 'plugin_name'
name = re.sub('[A-Z]+', lambda i: '_' + i.group(0).lower(), plugin_class.__name__).lstrip('_')
if contexts is None:
contexts = ['task']
elif isinstance(contexts, basestring):
contexts = [contexts]
if category is None and plugin_class.__module__.startswith('flexget.plugins'):
# By default look at the containing package of the plugin.
category = plugin_class.__module__.split('.')[-2]
# Check for unsupported api versions
if api_ver < 2:
warnings.warn('Api versions <2 are no longer supported. Plugin %s' % name, DeprecationWarning, stacklevel=2)
# Set basic info attributes
self.api_ver = api_ver
self.name = name
self.groups = groups
self.builtin = builtin
self.debug = debug
self.contexts = contexts
self.category = category
self.phase_handlers = {}
self.plugin_class = plugin_class
self.instance = None
if self.name in plugins:
PluginInfo.dupe_counter += 1
log.critical('Error while registering plugin %s. A plugin with the same name is already registered' %
self.name)
else:
plugins[self.name] = self
def initialize(self):
if self.instance is not None:
# We already initialized
return
# Create plugin instance
self.instance = self.plugin_class()
self.instance.plugin_info = self # give plugin easy access to its own info
self.instance.log = logging.getLogger(getattr(self.instance, "LOGGER_NAME", None) or self.name)
if hasattr(self.instance, 'schema'):
self.schema = self.instance.schema
elif hasattr(self.instance, 'validator'):
self.schema = self.instance.validator().schema()
else:
# TODO: I think plugins without schemas should not be allowed in config, maybe rethink this
self.schema = {}
if self.schema is not None:
location = '/schema/plugin/%s' % self.name
self.schema['id'] = location
config_schema.register_schema(location, self.schema)
self.build_phase_handlers()
def reset_phase_handlers(self):
"""Temporary utility method"""
self.phase_handlers = {}
self.build_phase_handlers()
# TODO: should unregister events (from flexget.event)
# this method is not used at the moment anywhere ...
raise NotImplementedError
def build_phase_handlers(self):
"""(Re)build phase_handlers in this plugin"""
for phase, method_name in phase_methods.iteritems():
if phase in self.phase_handlers:
continue
if hasattr(self.instance, method_name):
method = getattr(self.instance, method_name)
if not callable(method):
continue
# check for priority decorator
if hasattr(method, 'priority'):
handler_prio = method.priority
else:
handler_prio = DEFAULT_PRIORITY
event = add_phase_handler('plugin.%s.%s' % (self.name, phase), method, handler_prio)
# provides backwards compatibility
event.plugin = self
self.phase_handlers[phase] = event
def __getattr__(self, attr):
if attr in self:
return self[attr]
return dict.__getattribute__(self, attr)
def __setattr__(self, attr, value):
self[attr] = value
def __str__(self):
return '<PluginInfo(name=%s)>' % self.name
__repr__ = __str__
register = PluginInfo
def _strip_trailing_sep(path):
return path.rstrip("\\/")
def _get_standard_plugins_path():
"""
:returns: List of directories where plugins should be tried to load from.
"""
# Get basic path from environment
paths = []
env_path = os.environ.get('FLEXGET_PLUGIN_PATH')
if env_path:
paths = env_path.split(os.pathsep)
# Add flexget.plugins directory (core plugins)
paths.append(os.path.abspath(os.path.dirname(plugins_pkg.__file__)))
return paths
def _load_plugins_from_dirs(dirs):
"""
:param list dirs: Directories from where plugins are loaded from
"""
log.debug('Trying to load plugins from: %s' % dirs)
dirs = [Path(d) for d in dirs if os.path.isdir(d)]
# add all dirs to plugins_pkg load path so that imports work properly from any of the plugin dirs
plugins_pkg.__path__ = map(_strip_trailing_sep, dirs)
for plugins_dir in dirs:
for plugin_path in plugins_dir.walkfiles('*.py'):
if plugin_path.name == '__init__.py':
continue
# Split the relative path from the plugins dir to current file's parent dir to find subpackage names
plugin_subpackages = filter(None, plugin_path.relpath(plugins_dir).parent.splitall())
module_name = '.'.join([plugins_pkg.__name__] + plugin_subpackages + [plugin_path.namebase])
try:
__import__(module_name)
except DependencyError as e:
if e.has_message():
msg = e.message
else:
msg = 'Plugin `%s` requires `%s` to load.' % (e.issued_by or module_name, e.missing or 'N/A')
if not e.silent:
log.warning(msg)
else:
log.debug(msg)
except ImportError as e:
log.critical('Plugin `%s` failed to import dependencies' % module_name)
log.exception(e)
except ValueError as e:
# Debugging #2755
log.error('ValueError attempting to import `%s` (from %s): %s', module_name, plugin_path, e)
except Exception as e:
log.critical('Exception while loading plugin %s' % module_name)
log.exception(e)
raise
else:
log.trace('Loaded module %s from %s' % (module_name, plugin_path))
if _new_phase_queue:
for phase, args in _new_phase_queue.iteritems():
log.error('Plugin %s requested new phase %s, but it could not be created at requested '
'point (before, after). Plugin is not working properly.' % (args[0], phase))
def load_plugins(extra_dirs=None):
"""
Load plugins from the standard plugin paths.
:param list extra_dirs: Extra directories from where plugins are loaded.
"""
global plugins_loaded
if not extra_dirs:
extra_dirs = []
# Add flexget.plugins directory (core plugins)
extra_dirs.extend(_get_standard_plugins_path())
start_time = time.time()
# Import all the plugins
_load_plugins_from_dirs(extra_dirs)
# Register them
fire_event('plugin.register')
# Plugins should only be registered once, remove their handlers after
remove_event_handlers('plugin.register')
# After they have all been registered, instantiate them
for plugin in plugins.values():
plugin.initialize()
took = time.time() - start_time
plugins_loaded = True
log.debug('Plugins took %.2f seconds to load' % took)
def get_plugins(phase=None, group=None, context=None, category=None, name=None, min_api=None):
"""
Query other plugins characteristics.
:param string phase: Require phase
:param string group: Plugin must belong to this group.
:param string context: Where plugin is configured, eg. (root, task)
:param string category: Type of plugin, phase names.
:param string name: Name of the plugin.
:param int min_api: Minimum api version.
:return: List of PluginInfo instances.
:rtype: list
"""
def matches(plugin):
if phase is not None and phase not in phase_methods:
raise ValueError('Unknown phase %s' % phase)
if phase and phase not in plugin.phase_handlers:
return False
if group and group not in plugin.groups:
return False
if context and context not in plugin.contexts:
return False
if category and not category == plugin.category:
return False
if name is not None and name != plugin.name:
return False
if min_api is not None and plugin.api_ver < min_api:
return False
return True
return ifilter(matches, plugins.itervalues())
def plugin_schemas(**kwargs):
"""Create a dict schema that matches plugins specified by `kwargs`"""
return {'type': 'object',
'properties': dict((p.name, {'$ref': p.schema['id']}) for p in get_plugins(**kwargs)),
'additionalProperties': False,
'error_additionalProperties': '{{message}} Only known plugin names are valid keys.',
'patternProperties': {'^_': {'title': 'Disabled Plugin'}}}
config_schema.register_schema('/schema/plugins', plugin_schemas)
def get_plugins_by_phase(phase):
"""
.. deprecated:: 1.0.3328
Use :func:`get_plugins` instead
Return an iterator over all plugins that hook :phase:
"""
warnings.warn('Deprecated API', DeprecationWarning, stacklevel=2)
if phase not in phase_methods:
raise Exception('Unknown phase %s' % phase)
return get_plugins(phase=phase)
def get_phases_by_plugin(name):
"""Return all phases plugin :name: hooks"""
return list(get_plugin_by_name(name).phase_handlers)
def get_plugins_by_group(group):
"""
.. deprecated:: 1.0.3328
Use :func:`get_plugins` instead
Return an iterator over all plugins with in specified group.
"""
warnings.warn('Deprecated API', DeprecationWarning, stacklevel=2)
return get_plugins(group=group)
def get_plugin_keywords():
"""Return iterator over all plugin keywords."""
return plugins.iterkeys()
def get_plugin_by_name(name, issued_by='???'):
"""Get plugin by name, preferred way since this structure may be changed at some point."""
if name not in plugins:
raise DependencyError(issued_by=issued_by, missing=name, message='Unknown plugin %s' % name)
return plugins[name]
|
[] |
[] |
[
"FLEXGET_PLUGIN_PATH"
] |
[]
|
["FLEXGET_PLUGIN_PATH"]
|
python
| 1 | 0 | |
src/prefect/cli/run.py
|
import json
import logging
import os
import runpy
import sys
import textwrap
import uuid
import time
from contextlib import contextmanager
from types import ModuleType
from typing import Callable, Dict, List, Union, Any
import click
from click import ClickException
from tabulate import tabulate
import prefect
from prefect.backend.flow import FlowView
from prefect.backend.flow_run import FlowRunView, watch_flow_run
from prefect.backend.execution import execute_flow_run_in_subprocess
from prefect.cli.build_register import (
TerminalError,
handle_terminal_error,
log_exception,
)
from prefect.client import Client
from prefect.utilities.graphql import EnumValue, with_args
from prefect.utilities.importtools import import_object
from prefect.utilities.logging import temporary_logger_config
@contextmanager
def temporary_environ(environ):
"""
Temporarily add environment variables to the current os.environ
The original environment will be restored at context exit
"""
old_environ = os.environ.copy()
os.environ.update(environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(old_environ)
@contextmanager
def try_error_done(
message: str,
echo: Callable = click.secho,
traceback: bool = False,
skip_done: bool = False,
):
"""
Try to run the code in the context block. On error print "Error" and raise a
terminal error with the exception string. On succecss, print "Done".
Args:
message: The first message to display
echo: The function to use to echo. Must support `click.secho` arguments
traceback: Display the exception traceback instead of a short message
skip_done: Do not display 'Done', the user of the context should instead
Example:
>>> with try_error_done("Setting up foo..."):
>>> pass
Setting up foo... Done
>>> with try_error_done("Setting up bar..."):
>>> raise ValueError("no!")
Setting up bar... Error
no!
"""
echo(message, nl=False)
try:
yield
except TerminalError:
echo(" Error", fg="red")
raise
except Exception as exc:
echo(" Error", fg="red")
if traceback and not isinstance(exc, TerminalError):
log_exception(exc, indent=2)
raise TerminalError
else:
raise TerminalError(f"{type(exc).__name__}: {exc}")
else:
if not skip_done:
echo(" Done", fg="green")
def echo_with_log_color(log_level: int, message: str, **kwargs: Any):
if log_level >= logging.ERROR:
kwargs.setdefault("fg", "red")
elif log_level >= logging.WARNING:
kwargs.setdefault("fg", "yellow")
elif log_level <= logging.DEBUG:
kwargs.setdefault("fg", "white")
kwargs.setdefault("dim", True)
else:
kwargs.setdefault("fg", "white")
click.secho(
message,
**kwargs,
)
def load_flows_from_script(path: str) -> "List[prefect.Flow]":
"""Given a file path, load all flows found in the file"""
# TODO: This is copied and slightly modified from `prefect.cli.build_register`
# we should probably abstract this in the future
# Temporarily add the flow's local directory to `sys.path` so that local
# imports work. This ensures that `sys.path` is the same as it would be if
# the flow script was run directly (i.e. `python path/to/flow.py`).
orig_sys_path = sys.path.copy()
sys.path.insert(0, os.path.dirname(os.path.abspath(path)))
try:
with prefect.context({"loading_flow": True, "local_script_path": path}):
namespace = runpy.run_path(path, run_name="<flow>")
except FileNotFoundError as exc:
if path in str(exc): # Only capture it if it's about our file
raise TerminalError(f"File does not exist: {os.path.abspath(path)!r}")
raise
finally:
sys.path[:] = orig_sys_path
flows = [f for f in namespace.values() if isinstance(f, prefect.Flow)]
return flows
def load_flows_from_module(name: str) -> "List[prefect.Flow]":
"""
Given a module name (or full import path to a flow), load all flows found in the
module
"""
# TODO: This is copied and slightly modified from `prefect.cli.build_register`
# we should probably abstract this in the future
try:
with prefect.context({"loading_flow": True}):
mod_or_obj = import_object(name)
except Exception as exc:
# If the requested module isn't found, log without a traceback
# otherwise log a general message with the traceback.
if isinstance(exc, ModuleNotFoundError) and (
name == exc.name
or (name.startswith(exc.name) and name[len(exc.name)] == ".")
):
raise TerminalError(str(exc).capitalize())
elif isinstance(exc, AttributeError):
raise TerminalError(str(exc).capitalize())
else:
raise
if isinstance(mod_or_obj, ModuleType):
flows = [f for f in vars(mod_or_obj).values() if isinstance(f, prefect.Flow)]
elif isinstance(mod_or_obj, prefect.Flow):
flows = [mod_or_obj]
else:
raise TerminalError(
f"Invalid object of type {type(mod_or_obj).__name__!r} found at {name!r}. "
f"Expected Module or Flow."
)
return flows
def get_flow_from_path_or_module(
path: str = None, module: str = None, name: str = None
):
location = path if path is not None else module
flows = load_flows_from_script(path) if path else load_flows_from_module(module)
flows_by_name = {flow.name: flow for flow in flows}
flow_names = ", ".join(map(repr, flows_by_name.keys()))
if not flows:
raise TerminalError(f"Found no flows at {location}.")
if len(flows) > 1 and not name:
raise TerminalError(
f"Found multiple flows at {location!r}: {flow_names}\n\n"
f"Specify a flow name to run."
)
if name:
if name not in flows_by_name:
raise TerminalError(
f"Did not find {name!r} in flows at {location}. Found {flow_names}"
)
flow = flows_by_name[name]
else:
flow = list(flows_by_name.values())[0]
return flow
def get_flow_view(
flow_or_group_id: str = None,
project: str = None,
name: str = None,
) -> "FlowView":
if flow_or_group_id:
# Lookup by flow id then flow group id if that fails
try:
flow_view = FlowView.from_flow_id(flow_or_group_id)
except ValueError:
pass
else:
return flow_view
try:
flow_view = FlowView.from_flow_group_id(flow_or_group_id)
except ValueError:
pass
else:
return flow_view
# Fall through to failure
raise TerminalError(
f"Failed to find flow id or flow group id matching {flow_or_group_id!r}"
)
if project:
if not name:
raise TerminalError(
"Missing required option `--name`. Cannot look up a flow by project "
"without also passing a name."
)
return FlowView.from_flow_name(flow_name=name, project_name=project)
if name:
# If name wasn't provided for use with another lookup, try a global name search
return FlowView.from_flow_name(flow_name=name)
# This line should not be reached
raise RuntimeError("Failed to find matching case for flow lookup.")
def load_json_key_values(
cli_input: List[str], display_name: str
) -> Dict[str, Union[dict, str, int]]:
"""
Parse a list of strings formatted as "key=value" where the value is loaded as JSON.
We do the best here to display a helpful JSON parsing message, e.g.
```
Error: Failed to parse JSON for parameter 'name' with value
foo
JSON Error: Expecting value: line 1 column 1 (char 0)
Did you forget to include quotes? You may need to escape so your shell does not remove them, e.g. \"
```
Args:
cli_input: A list of "key=value" strings to parse
display_name: A name to display in exceptions
Returns:
A mapping of keys -> parsed values
"""
parsed = {}
def cast_value(value: str) -> Any:
"""Cast the value from a string to a valid JSON type; add quotes for the user
if necessary
"""
try:
return json.loads(value)
except ValueError as exc:
if (
"Extra data" in str(exc) or "Expecting value" in str(exc)
) and '"' not in value:
return cast_value(f'"{value}"')
raise exc
for spec in cli_input:
try:
key, value = spec.split("=")
except ValueError:
raise TerminalError(
f"Invalid {display_name} option {spec!r}. Expected format 'key=value'."
)
try:
parsed[key] = cast_value(value)
except ValueError as exc:
indented_value = textwrap.indent(value, prefix="\t")
raise TerminalError(
f"Failed to parse JSON for {display_name} {key!r} with value"
f"\n\n{indented_value}\n\n"
f"JSON Error: {exc}"
)
return parsed
RUN_EPILOG = """
\bExamples:
\b Run flow in a script locally
\b $ prefect run -p hello-world.py
\b Run flow in a module locally
\b $ prefect run -m prefect.hello_world
\b Run flow with a non-default parameter locally
\b $ prefect run -m prefect.hello_world --param name=Marvin
\b Run registered flow with the backend by flow name and watch execution
\b $ prefect run -n "hello-world" --watch
\b Run registered flow with the backend with custom labels
\b $ prefect run -n "hello-world" --label example --label hello
\b Run registered flow with the backend by flow id and exit after creation
\b $ prefect run -i "9a1cd70c-37d7-4cd4-ab91-d41c2700300d"
\b Run registered flow and pipe flow run id to another program
\b $ prefect run -n "hello-world" --quiet | post_run.sh
\b Run registered flow and execute locally without an agent
\b $ prefect run -n "hello-world" --execute
"""
FLOW_LOOKUP_MSG = """
Look up a flow to run with one of the following option combinations:
--id
--name
--project and --name
--path (and --name if there are multiple flows in the script)
--module (and --name if there are multiple flows in the module)
See `prefect run --help` for more details on the options.
"""
@click.group(invoke_without_command=True, epilog=RUN_EPILOG)
@click.pass_context
# Flow lookup settings -----------------------------------------------------------------
@click.option(
"--id",
"-i",
"flow_or_group_id",
help=(
"The UUID of a flow or flow group to run. If a flow group id is given, "
"the latest flow id will be used for the run."
),
)
@click.option(
"--project",
help="The name of the Prefect project containing the flow to run.",
)
@click.option(
"--path",
"-p",
help="The path to a file containing the flow to run.",
)
@click.option(
"--module",
"-m",
help="The python module name containing the flow to run.",
)
@click.option(
"--name",
"-n",
help=(
"The name of a flow to run from the specified file/module/project. If the "
"source contains multiple flows, this must be provided. "
),
)
# Flow run settings --------------------------------------------------------------------
@click.option(
"--label",
"labels",
help=(
"A label to add to the flow run. May be passed multiple times to specify "
"multiple labels. If not passed, the labels from the flow group will be used."
),
multiple=True,
)
@click.option("--run-name", help="A name to assign to the flow run.", default=None)
@click.option(
"--context",
"context_vars",
help=(
"A key, value pair (key=value) specifying a flow context variable. The value "
"will be interpreted as JSON. May be passed multiple times to specify multiple "
"context values. Nested values may be set by passing a dict."
),
multiple=True,
)
@click.option(
"--param",
"params",
help=(
"A key, value pair (key=value) specifying a flow parameter. The value will be "
"interpreted as JSON. May be passed multiple times to specify multiple "
"parameter values."
),
multiple=True,
)
@click.option(
"--log-level",
help=(
"The log level to set for the flow run. If passed, the level must be a valid "
"Python logging level name. If this option is not passed, the default level "
"for the flow will be used."
),
type=click.Choice(
["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], case_sensitive=False
),
default=None,
)
@click.option(
"--param-file",
help=(
"The path to a JSON file containing parameter keys and values. Any parameters "
"passed with `--param` will take precedence over these values."
),
default=None,
)
@click.option(
"--execute",
help=(
"Execute the flow run in-process without an agent. If this process exits, the "
"flow run will be marked as 'Failed'."
),
is_flag=True,
)
@click.option(
"--schedule",
"-s",
help=(
"Execute the flow run according to the schedule attached to the flow. If this "
"flag is set, this command will wait between scheduled flow runs. If the flow "
"has no schedule, this flag will be ignored. If used with a non-local run, an "
"exception will be thrown."
),
is_flag=True,
)
# Display settings ---------------------------------------------------------------------
@click.option(
"--quiet",
"-q",
help=(
"Disable verbose messaging about the flow run and just print the flow run id."
),
is_flag=True,
)
@click.option(
"--no-logs",
help=(
"Disable streaming logs from the flow run to this terminal. Only state changes "
"will be displayed. Only applicable when `--watch` is set."
),
is_flag=True,
)
@click.option(
"--watch",
"-w",
help="Wait for the flow run to finish executing and display status information.",
is_flag=True,
)
@handle_terminal_error
def run(
ctx,
flow_or_group_id,
project,
path,
module,
name,
labels,
context_vars,
params,
execute,
schedule,
log_level,
param_file,
run_name,
quiet,
no_logs,
watch,
):
"""Run a flow"""
# Since the old command was a subcommand of this, we have to do some
# mucking to smoothly deprecate it. Can be removed with `prefect run flow`
# is removed.
if ctx.invoked_subcommand is not None:
if any([params, no_logs, quiet, flow_or_group_id]):
# These options are not supported by `prefect run flow`
raise ClickException(
"Got unexpected extra argument (%s)" % ctx.invoked_subcommand
)
return
# Define a simple function so we don't have to have a lot of `if not quiet` logic
quiet_echo = (
(lambda *_, **__: None)
if quiet
else lambda *args, **kwargs: click.secho(*args, **kwargs)
)
# Cast labels to a list instead of a tuple so we can extend it
labels = list(labels)
# Ensure that the user has not passed conflicting options
given_lookup_options = {
key
for key, option in {
"--id": flow_or_group_id,
"--project": project,
"--path": path,
"--module": module,
}.items()
if option is not None
}
# Since `name` can be passed in conjunction with several options and also alone
# it requires a special case here
if not given_lookup_options and not name:
raise ClickException(
"Received no options to look up the flow." + FLOW_LOOKUP_MSG
)
if "--id" in given_lookup_options and name:
raise ClickException(
"Received too many options to look up the flow; "
"cannot specifiy both `--name` and `--id`" + FLOW_LOOKUP_MSG
)
if len(given_lookup_options) > 1:
raise ClickException(
"Received too many options to look up the flow: "
f"{', '.join(given_lookup_options)}" + FLOW_LOOKUP_MSG
)
# Load parameters and context ------------------------------------------------------
context_dict = load_json_key_values(context_vars, "context")
file_params = {}
if param_file:
try:
with open(param_file) as fp:
file_params = json.load(fp)
except FileNotFoundError:
raise TerminalError(
f"Parameter file does not exist: {os.path.abspath(param_file)!r}"
)
except ValueError as exc:
raise TerminalError(
f"Failed to parse JSON at {os.path.abspath(param_file)!r}: {exc}"
)
cli_params = load_json_key_values(params, "parameter")
conflicting_keys = set(cli_params.keys()).intersection(file_params.keys())
if conflicting_keys:
quiet_echo(
"The following parameters were specified by file and CLI, the CLI value "
f"will be used: {conflicting_keys}"
)
params_dict = {**file_params, **cli_params}
# Local flow run -------------------------------------------------------------------
if path or module:
# We can load a flow for local execution immediately if given a path or module,
# otherwise, we'll lookup the flow then pull from storage for a local run
with try_error_done("Retrieving local flow...", quiet_echo, traceback=True):
flow = get_flow_from_path_or_module(path=path, module=module, name=name)
# Set the desired log level
if no_logs:
log_level = 100 # CRITICAL is 50 so this should do it
run_info = ""
if params_dict:
run_info += f"└── Parameters: {params_dict}\n"
if context_dict:
run_info += f"└── Context: {context_dict}\n"
if run_info:
quiet_echo("Configured local flow run")
quiet_echo(run_info, nl=False)
quiet_echo("Running flow locally...")
with temporary_logger_config(
level=log_level,
stream_fmt="└── %(asctime)s | %(levelname)-7s | %(message)s",
stream_datefmt="%H:%M:%S",
):
with prefect.context(**context_dict):
try:
result_state = flow.run(
parameters=params_dict, run_on_schedule=schedule
)
except Exception as exc:
quiet_echo("Flow runner encountered an exception!")
log_exception(exc, indent=2)
raise TerminalError("Flow run failed!")
if result_state.is_failed():
quiet_echo("Flow run failed!", fg="red")
sys.exit(1)
else:
quiet_echo("Flow run succeeded!", fg="green")
return
# Backend flow run -----------------------------------------------------------------
if schedule:
raise ClickException("`--schedule` can only be specified for local flow runs")
client = Client()
# Validate the flow look up options we've been given and get the flow from the
# backend
with try_error_done("Looking up flow metadata...", quiet_echo):
flow_view = get_flow_view(
flow_or_group_id=flow_or_group_id,
project=project,
name=name,
)
if log_level:
run_config = flow_view.run_config
if not run_config.env:
run_config.env = {}
run_config.env["PREFECT__LOGGING__LEVEL"] = log_level
else:
run_config = None
if execute:
# Add a random label to prevent an agent from picking up this run
labels.append(f"agentless-run-{str(uuid.uuid4())[:8]}")
try: # Handle keyboard interrupts during creation
flow_run_id = None
# Create a flow run in the backend
with try_error_done(
f"Creating run for flow {flow_view.name!r}...",
quiet_echo,
traceback=True,
# Display 'Done' manually after querying for data to display so there is not
# a lag
skip_done=True,
):
flow_run_id = client.create_flow_run(
flow_id=flow_view.flow_id,
parameters=params_dict,
context=context_dict,
# If labels is an empty list pass `None` to get defaults
# https://github.com/PrefectHQ/server/blob/77c301ce0c8deda4f8771f7e9991b25e7911224a/src/prefect_server/api/runs.py#L136
labels=labels or None,
run_name=run_name,
# We only use the run config for setting logging levels right now
run_config=run_config,
)
if quiet:
# Just display the flow run id in quiet mode
click.echo(flow_run_id)
flow_run = None
else:
# Grab information about the flow run (if quiet we can skip this query)
flow_run = FlowRunView.from_flow_run_id(flow_run_id)
run_url = client.get_cloud_url("flow-run", flow_run_id)
# Display "Done" for creating flow run after pulling the info so there
# isn't a weird lag
quiet_echo(" Done", fg="green")
quiet_echo(
textwrap.dedent(
f"""
└── Name: {flow_run.name}
└── UUID: {flow_run.flow_run_id}
└── Labels: {flow_run.labels}
└── Parameters: {flow_run.parameters}
└── Context: {flow_run.context}
└── URL: {run_url}
"""
).strip()
)
except KeyboardInterrupt:
# If the user interrupts here, they will expect the flow run to be cancelled
quiet_echo("\nKeyboard interrupt detected! Aborting...", fg="yellow")
if flow_run_id:
client.cancel_flow_run(flow_run_id=flow_run_id)
quiet_echo("Cancelled flow run.")
else:
# The flow run was not created so we can just exit
quiet_echo("Aborted.")
return
# Handle agentless execution
if execute:
quiet_echo("Executing flow run...")
try:
with temporary_logger_config(
level=(
100 if no_logs or quiet else log_level
), # Disable logging if asked
stream_fmt="└── %(asctime)s | %(levelname)-7s | %(message)s",
stream_datefmt="%H:%M:%S",
):
execute_flow_run_in_subprocess(flow_run_id)
except KeyboardInterrupt:
quiet_echo("Keyboard interrupt detected! Aborting...", fg="yellow")
pass
elif watch:
try:
quiet_echo("Watching flow run execution...")
for log in watch_flow_run(
flow_run_id=flow_run_id,
stream_logs=not no_logs,
):
level_name = logging.getLevelName(log.level)
timestamp = log.timestamp.in_tz(tz="local")
echo_with_log_color(
log.level,
f"└── {timestamp:%H:%M:%S} | {level_name:<7} | {log.message}",
)
except KeyboardInterrupt:
quiet_echo("Keyboard interrupt detected!", fg="yellow")
try:
cancel = click.confirm(
"On exit, we can leave your flow run executing or cancel it.\n"
"Do you want to cancel this flow run?",
default=True,
)
except click.Abort:
# A second keyboard interrupt will exit without cancellation
pass
else:
if cancel:
client.cancel_flow_run(flow_run_id=flow_run_id)
quiet_echo("Cancelled flow run.", fg="green")
return
quiet_echo("Exiting without cancelling flow run!", fg="yellow")
raise # Re-raise the interrupt
else:
# If not watching or executing, exit without checking state
return
# Get the final flow run state
flow_run = FlowRunView.from_flow_run_id(flow_run_id)
# Wait for the flow run to be done up to 3 seconds
elapsed_time = 0
while not flow_run.state.is_finished() and elapsed_time < 3:
time.sleep(1)
elapsed_time += 1
flow_run = flow_run.get_latest()
# Display the final state
if flow_run.state.is_failed():
quiet_echo("Flow run failed!", fg="red")
sys.exit(1)
elif flow_run.state.is_successful():
quiet_echo("Flow run succeeded!", fg="green")
else:
quiet_echo(f"Flow run is in unexpected state: {flow_run.state}", fg="yellow")
sys.exit(1)
# DEPRECATED: prefect run flow ---------------------------------------------------------
@run.command("flow", hidden=True)
@click.option("--id", help="The UUID of a flow to run.", default=None)
@click.option(
"--version-group-id",
required=False,
help="The id of a flow version group to run.",
hidden=True,
)
@click.option(
"--name", "-n", required=False, help="The name of a flow to run.", hidden=True
)
@click.option(
"--project",
"-p",
required=False,
help="The project that contains the flow.",
hidden=True,
)
@click.option("--version", "-v", type=int, help="A flow version to run.", hidden=True)
@click.option(
"--parameters-file",
"-pf",
help="A parameters JSON file.",
hidden=True,
type=click.Path(exists=True),
)
@click.option(
"--parameters-string", "-ps", help="A parameters JSON string.", hidden=True
)
@click.option("--run-name", "-rn", help="A name to assign for this run.", hidden=True)
@click.option("--context", "-c", help="A context JSON string.", hidden=True)
@click.option(
"--watch",
"-w",
is_flag=True,
help="Watch current state of the flow run.",
hidden=True,
)
@click.option(
"--label",
"labels",
help="A list of labels to apply to the flow run",
hidden=True,
multiple=True,
)
@click.option(
"--logs", "-l", is_flag=True, help="Live logs of the flow run.", hidden=True
)
@click.option(
"--no-url",
is_flag=True,
help="Only output flow run id instead of link.",
hidden=True,
)
def run_flow(
id,
version_group_id,
name,
project,
version,
parameters_file,
parameters_string,
run_name,
context,
watch,
labels,
logs,
no_url,
):
"""
Run a flow that is registered to the Prefect API
DEPRECATED: Use `prefect run` instead of `prefect run flow`
\b
Options:
--id, -i TEXT The ID of a flow to run
--version-group-id TEXT The ID of a flow version group to run
--name, -n TEXT The name of a flow to run
--project, -p TEXT The name of a project that contains the flow
--version, -v INTEGER A flow version to run
--parameters-file, -pf FILE PATH A filepath of a JSON file containing
parameters
--parameters-string, -ps TEXT A string of JSON parameters (note: to ensure these are
parsed correctly, it is best to include the full payload
within single quotes)
--run-name, -rn TEXT A name to assign for this run
--context, -c TEXT A string of JSON key / value pairs to include in context
(note: to ensure these are parsed correctly, it is best
to include the full payload within single quotes)
--watch, -w Watch current state of the flow run, stream
output to stdout
--label TEXT Set labels on the flow run; use multiple times to set
multiple labels.
--logs, -l Get logs of the flow run, stream output to
stdout
--no-url Only output the flow run id instead of a
link
\b
Either `id`, `version-group-id`, or both `name` and `project` must be provided to run a flow.
\b
If both `--parameters-file` and `--parameters-string` are provided then the values
passed in through the string will override the values provided from the file.
\b
e.g.
File contains: {"a": 1, "b": 2}
String: '{"a": 3}'
Parameters passed to the flow run: {"a": 3, "b": 2}
\b
Example:
$ prefect run flow -n "Test-Flow" -p "My Project" -ps '{"my_param": 42}'
Flow Run: https://cloud.prefect.io/myslug/flow-run/2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9
"""
if not id and not (name and project) and not version_group_id:
click.secho(
"A flow ID, version group ID, or a combination of flow name and project must be provided.",
fg="red",
)
return
if sum(map(bool, (id, version_group_id, name))) != 1:
click.secho(
"Only one of flow ID, version group ID, or a name/project combination can be provided.",
fg="red",
)
return
if watch and logs:
click.secho(
"Streaming state and logs not currently supported together.", fg="red"
)
return
if labels == ():
labels = None
client = Client()
flow_id = id
if not flow_id and not version_group_id:
where_clause = {
"_and": {
"name": {"_eq": name},
"version": {"_eq": version},
"project": {"name": {"_eq": project}},
}
}
query = {
"query": {
with_args(
"flow",
{
"where": where_clause,
"order_by": {
"name": EnumValue("asc"),
"version": EnumValue("desc"),
},
"distinct_on": EnumValue("name"),
},
): {"id": True}
}
}
result = client.graphql(query)
flow_data = result.data.flow
if flow_data:
flow_id = flow_data[0].id
else:
click.secho("{} not found".format(name), fg="red")
return
# Load parameters from file if provided
file_params = {}
if parameters_file:
with open(parameters_file) as params_file:
file_params = json.load(params_file)
# Load parameters from string if provided
string_params = {}
if parameters_string:
string_params = json.loads(parameters_string)
if context:
context = json.loads(context)
flow_run_id = client.create_flow_run(
flow_id=flow_id,
version_group_id=version_group_id,
context=context,
labels=labels,
parameters={**file_params, **string_params},
run_name=run_name,
)
if no_url:
click.echo("Flow Run ID: {}".format(flow_run_id))
else:
flow_run_url = client.get_cloud_url("flow-run", flow_run_id)
click.echo("Flow Run: {}".format(flow_run_url))
if watch:
current_states = []
while True:
query = {
"query": {
with_args("flow_run_by_pk", {"id": flow_run_id}): {
with_args(
"states",
{"order_by": {EnumValue("timestamp"): EnumValue("asc")}},
): {"state": True, "timestamp": True}
}
}
}
result = client.graphql(query)
# Filter through retrieved states and output in order
for state_index in result.data.flow_run_by_pk.states:
state = state_index.state
if state not in current_states:
if state != "Success" and state != "Failed":
click.echo("{} -> ".format(state), nl=False)
else:
click.echo(state)
return flow_run_id
current_states.append(state)
time.sleep(3)
if logs:
all_logs = []
log_query = {
with_args(
"logs", {"order_by": {EnumValue("timestamp"): EnumValue("asc")}}
): {"timestamp": True, "message": True, "level": True},
"start_time": True,
"state": True,
}
query = {
"query": {
with_args(
"flow_run",
{
"where": {"id": {"_eq": flow_run_id}},
"order_by": {EnumValue("start_time"): EnumValue("desc")},
},
): log_query
}
}
while True:
result = client.graphql(query)
flow_run = result.data.flow_run
if not flow_run:
click.secho("{} not found".format(flow_run_id), fg="red")
return
new_run = flow_run[0]
logs = new_run.logs
output = []
for i in logs:
if [i.timestamp, i.level, i.message] not in all_logs:
if not len(all_logs):
click.echo(
tabulate(
[[i.timestamp, i.level, i.message]],
headers=["TIMESTAMP", "LEVEL", "MESSAGE"],
tablefmt="plain",
numalign="left",
stralign="left",
)
)
all_logs.append([i.timestamp, i.level, i.message])
continue
output.append([i.timestamp, i.level, i.message])
all_logs.append([i.timestamp, i.level, i.message])
if output:
click.echo(
tabulate(output, tablefmt="plain", numalign="left", stralign="left")
)
if new_run.state == "Success" or new_run.state == "Failed":
return flow_run_id
time.sleep(3)
return flow_run_id
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
open_context_path.py
|
"""Open file paths at the current cursor position."""
import functools
import logging
import os
import re
from itertools import chain
import sublime
import sublime_plugin
platform = sublime.platform()
log = logging.getLogger("OpenContextPath")
class OpenContextPathCommand(sublime_plugin.TextCommand):
"""Open file paths at the current cursor position."""
# the regex to split a text into individual parts of a possible path
file_parts_unix = re.compile(
r"((\w+|\.\.?)/*|\W)", re.IGNORECASE)
file_parts_win = re.compile(
r"([A-Z]:[/\\]+|(\w+|\.\.?)[/\\]*|\W)", re.IGNORECASE)
file_parts = (file_parts_unix if platform != "windows" else file_parts_win)
def run(self, edit, event=None):
"""Run the command."""
paths = self.find_paths(event)
for path, info in paths:
self.open_path(path, info)
def is_enabled(self, event=None):
"""Whether the command is enabled."""
paths = self.find_paths(event)
return len(paths) > 0
def is_visible(self, event=None):
"""Whether the context menu entry is visible."""
paths = self.find_paths(event)
return len(paths) > 0
def description(self, event=None):
"""Describe the context menu entry."""
paths = self.find_paths(event)
if paths:
# only show the name of the first found path
path, info = paths[0]
desc = "Open " + os.path.basename(os.path.normpath(path))
if info.get("line"):
desc += " at line {}".format(info["line"])
return desc
return ""
def want_event(self):
"""Whether we need the event data."""
return True
def open_path(self, path, info):
"""Open a file in Sublime Text or a directory with the file manager."""
window = self.view.window()
# normalize the path to adjust it to the system
path = os.path.normpath(path)
if os.path.isdir(path):
log.debug("Opening directory: %s", path)
window.run_command("open_dir", {
"dir": path
})
else:
if platform == "windows":
# Sublime Text has trouble opening Windows paths without a
# drive letter. We use abspath to fix that.
drive, tail = os.path.splitdrive(path)
if not drive:
path = os.path.abspath(path)
# encode line and column numbers into the file path
if info.get("line"):
path += ":{}".format(info["line"])
if info.get("col"):
path += ":{}".format(info["col"])
log.debug("Opening file: %s", path)
window.open_file(path, sublime.ENCODED_POSITION)
def get_view_settings(self):
"""Find the settings for the current view."""
settings = self.view.settings().get("open_context_path", {})
if not settings:
# if this is not the window's active view (which is true for
# panels) we can try to find some settings there
active_view = self.view.window().active_view()
if self.view != active_view:
settings = active_view.settings().get("open_context_path", {})
return settings
def get_context(self):
"""Return the current context setting."""
settings = sublime.load_settings("OpenContextPath.sublime-settings")
view_settings = self.get_view_settings()
# give the view settings precedence over the global settings
context = view_settings.get("context", None)
if not context:
context = settings.get("context", 100)
return context
def get_directories(self):
"""Collect the current list of directories from the settings."""
settings = sublime.load_settings("OpenContextPath.sublime-settings")
view_settings = self.get_view_settings()
# give the view settings precedence over the global settings
dirs = view_settings.get("directories", [])
dirs += settings.get("directories", [])
# expand ~ to the home directory
dirs = [os.path.expanduser(dir) for dir in dirs]
# expand Sublime Text and environment variables
variables = {}
variables.update(self.view.window().extract_variables())
variables.update(os.environ)
dirs = [sublime.expand_variables(dir, variables) for dir in dirs]
# make all relative paths absolute by basing them on the project folder
project = self.view.window().project_file_name()
if project:
project_path = os.path.dirname(project)
dirs = [os.path.join(project_path, dir) for dir in dirs]
# return a tuple because lists are not hashable and don't work with the
# cache
return tuple(dirs)
def get_patterns(self):
"""Collect the current list of patterns from the settings."""
settings = sublime.load_settings("OpenContextPath.sublime-settings")
view_settings = self.get_view_settings()
# give the view settings precedence over the global settings
patterns = view_settings.get("patterns", [])
patterns += settings.get("patterns", [])
return patterns
def find_paths(self, event=None):
"""Find file paths at the position where the command was called."""
view = self.view
if event:
# search the text around the event's position
points = [view.window_to_text((event["x"], event["y"]))]
else:
# search the texts around all selections
points = [sel.a for sel in view.sel()]
return self.find_paths_at(points)
def find_paths_at(self, points):
"""Find file paths at the given text positions."""
view = self.view
context = self.get_context()
# get the current list of directories to search
dirs = self.get_directories()
# search for a path around each of the points
paths = []
for pt in points:
# clip the text to the specified context
line = view.line(pt)
begin = max(line.a, pt - context)
end = min(line.b, pt + context)
text = view.substr(sublime.Region(begin, end))
col = pt - begin
# try to extract a path and match the text after for additional
# information
path, scope = self.extract_path(text, col, dirs)
if path:
info = self.match_patterns(text[scope[1]:])
paths.append((path, info))
return paths
@functools.lru_cache()
def extract_path(self, text, cur, dirs):
"""Extract a file path around a cursor position within a text."""
log.debug("Extracting from: %s^%s", text[:cur], text[cur:])
log.debug("Directories: %s", dirs)
# split the text into possible parts of a file path before and after
# the cursor position
before = []
after = []
for match in re.finditer(self.file_parts, text):
part = text[match.start():match.end()]
if match.start() <= cur:
before.append(part)
else:
after.append(part)
log.debug("Before cursor: %s", before)
log.debug("After cursor: %s", after)
# go through the parts before the cursor to find the ones that mark the
# beginning of a file path
path = ""
begin, end = 0, 0
for i, part in reversed(list(enumerate(before))):
# in case we haven't found the beginning of a path yet, it could be
# that there is a file consisting of multiple parts in which case
# we just need to blindly start testing for this possibility
if path == "" or self.search_path(part, dirs):
log.debug("Path: %s", part)
existing_path = part
# now find the longest path that can be constructed from all
# the parts after this one
new_path = existing_path
for part in chain(before[i + 1:], after):
new_path += part
if self.search_path(new_path, dirs):
log.debug("Path: %s", new_path)
existing_path = new_path
# we need to test this path again if we skipped that above
if path != "" or self.search_path(existing_path, dirs):
log.debug("Found path: %s", existing_path)
# check if the cursor is actually inside the found path by
# summing up the elements before and within the path
len_before_path = len("".join(before[:i]))
len_existing_path = len(existing_path)
if len_before_path + len_existing_path >= cur:
# keep the longest path
if len_existing_path > len(path):
log.debug("Best path: %s", existing_path)
path = existing_path
begin = len_before_path
end = begin + len_existing_path
if path:
# search again to return the full path for relative paths
return self.search_path(path, dirs), (begin, end)
return None, None
def match_patterns(self, text):
"""Match some text for additional information about a path."""
log.debug("Matching patterns to: %s", text)
# find the first matching pattern and return all named groups
for pattern in self.get_patterns():
match = re.match(pattern, text)
if match:
log.debug("Found groups: %s", match.groupdict())
return match.groupdict()
return {}
def search_path(self, path, dirs):
"""Search for an existing path (possibly relative to dirs)."""
# ignore special directories with no separator
if path in [".", ".."]:
return None
# expand ~ to the user's home directory
if path.startswith("~"):
path = os.path.expanduser(path)
# expand the environment variables
path = os.path.expandvars(path)
if platform == "windows":
# disable UNC paths on Windows
if path.startswith("\\\\") or path.startswith("//"):
return None
# ignore spaces at the end of a path
if path.endswith(" "):
return None
if os.path.isabs(path): # absolute paths
if os.path.exists(path):
return path
else: # relative paths
for dir in dirs:
full_path = os.path.join(dir, path)
if os.path.exists(full_path):
return full_path
return None
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'troc.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/Honeybee_Honeybee.py
|
# This is the heart of the Honeybee
# By Mostapha Sadeghipour Roudsari
# [email protected]
# Honeybee started by Mostapha Sadeghipour Roudsari is licensed
# under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
"""
This component carries all of Honeybee's main classes. Other components refer to these
classes to run the studies. Therefore, you need to let her fly before running the studies so the
classes will be copied to Rhinos shared space. So let her fly!
-
Honeybee started by Mostapha Sadeghipour Roudsari is licensed
under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
Based on a work at https://github.com/mostaphaRoudsari/Honeybee.
-
Check this link for more information about the license:
http://creativecommons.org/licenses/by-sa/3.0/deed.en_US
-
Source code is available at:
https://github.com/mostaphaRoudsari/Honeybee
-
Provided by Honeybee 0.0.55
Args:
letItFly: Set Boolean to True to let the Honeybee fly!
Returns:
report: Current Honeybee mood!!!
"""
ghenv.Component.Name = "Honeybee_Honeybee"
ghenv.Component.NickName = 'Honeybee'
ghenv.Component.Message = 'VER 0.0.55\nDEC_13_2014'
ghenv.Component.Category = "Honeybee"
ghenv.Component.SubCategory = "00 | Honeybee"
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
import rhinoscriptsyntax as rs
import Rhino as rc
import scriptcontext as sc
from clr import AddReference
AddReference('Grasshopper')
import Grasshopper.Kernel as gh
import math
import shutil
import sys
import os
import System.Threading.Tasks as tasks
import System
import time
from itertools import chain
import datetime
import json
import copy
import urllib
import cPickle as pickle
import subprocess
PI = math.pi
rc.Runtime.HostUtils.DisplayOleAlerts(False)
class CheckIn():
def __init__(self):
#set up default pass
if os.path.exists("c:\\ladybug\\") and os.access(os.path.dirname("c:\\ladybug\\"), os.F_OK):
# folder already exists so it is all fine
sc.sticky["Honeybee_DefaultFolder"] = "c:\\ladybug\\"
elif os.access(os.path.dirname("c:\\"), os.F_OK):
#the folder does not exists but write privileges are given so it is fine
sc.sticky["Honeybee_DefaultFolder"] = "c:\\ladybug\\"
else:
# let's use the user folder
sc.sticky["Honeybee_DefaultFolder"] = os.path.join("C:\\Users\\", os.getenv("USERNAME"), "AppData\\Roaming\\Ladybug\\")
def getComponentVersion(self):
monthDict = {'JAN':'01', 'FEB':'02', 'MAR':'03', 'APR':'04', 'MAY':'05', 'JUN':'06',
'JUL':'07', 'AUG':'08', 'SEP':'09', 'OCT':'10', 'NOV':'11', 'DEC':'12'}
# convert component version to standard versioning
ver, verDate = ghenv.Component.Message.split("\n")
ver = ver.split(" ")[1].strip()
month, day, year = verDate.split("_")
month = monthDict[month.upper()]
version = ".".join([year, month, day, ver])
return version
def isNewerVersionAvailable(self, currentVersion, availableVersion):
# print int(availableVersion.replace(".", "")), int(currentVersion.replace(".", ""))
return int(availableVersion.replace(".", "")) > int(currentVersion.replace(".", ""))
def checkForUpdates(self, LB= True, HB= True, OpenStudio = True, template = True):
url = "https://dl.dropboxusercontent.com/u/16228160/honeybee/versions.txt"
webFile = urllib.urlopen(url)
versions= eval(webFile.read())
webFile.close()
if LB:
ladybugVersion = versions['Ladybug']
currentLadybugVersion = self.getComponentVersion() # I assume that this function will be called inside Ladybug_ladybug Component
if self.isNewerVersionAvailable(currentLadybugVersion, ladybugVersion):
msg = "There is a newer version of Ladybug available to download! " + \
"We strongly recommend you to download the newer version from Food4Rhino: " + \
"http://www.food4rhino.com/project/ladybug-honeybee"
print msg
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
if HB:
honeybeeVersion = versions['Honeybee']
currentHoneybeeVersion = self.getComponentVersion() # I assume that this function will be called inside Honeybee_Honeybee Component
if self.isNewerVersionAvailable(currentHoneybeeVersion, honeybeeVersion):
msg = "There is a newer version of Honeybee available to download! " + \
"We strongly recommend you to download the newer version from Food4Rhino: " + \
"http://www.food4rhino.com/project/ladybug-honeybee"
print msg
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
if OpenStudio:
# This should be called inside OpenStudio component which means Honeybee is already flying
# check if the version file exist
openStudioLibFolder = os.path.join(sc.sticky["Honeybee_DefaultFolder"], "OpenStudio")
versionFile = os.path.join(openStudioLibFolder, "osversion.txt")
isNewerOSAvailable= False
if not os.path.isfile(versionFile):
isNewerOSAvailable= True
else:
# read the file
with open(versionFile) as verFile:
currentOSVersion= eval(verFile.read())['version']
OSVersion = versions['OpenStudio']
if isNewerOSAvailable or self.isNewerVersionAvailable(currentOSVersion, OSVersion):
sc.sticky["isNewerOSAvailable"] = True
else:
sc.sticky["isNewerOSAvailable"] = False
if template:
honeybeeDefaultFolder = sc.sticky["Honeybee_DefaultFolder"]
templateFile = os.path.join(honeybeeDefaultFolder, 'OpenStudioMasterTemplate.idf')
# check file doesn't exist then it should be downloaded
if not os.path.isfile(templateFile):
return True
# find the version
try:
with open(templateFile) as tempFile:
currentTemplateVersion = eval(tempFile.readline().split("!")[-1].strip())["version"]
except Exception, e:
return True
# finally if the file exist and already has a version, compare the versions
templateVersion = versions['Template']
return self.isNewerVersionAvailable(currentTemplateVersion, templateVersion)
checkIn = CheckIn()
class versionCheck(object):
def __init__(self):
self.version = self.getVersion(ghenv.Component.Message)
def getVersion(self, LBComponentMessage):
monthDict = {'JAN':'01', 'FEB':'02', 'MAR':'03', 'APR':'04', 'MAY':'05', 'JUN':'06',
'JUL':'07', 'AUG':'08', 'SEP':'09', 'OCT':'10', 'NOV':'11', 'DEC':'12'}
# convert component version to standard versioning
try: ver, verDate = LBComponentMessage.split("\n")
except: ver, verDate = LBComponentMessage.split("\\n")
ver = ver.split(" ")[1].strip()
month, day, year = verDate.split("_")
month = monthDict[month.upper()]
version = ".".join([year, month, day, ver])
return version
def isCurrentVersionNewer(self, desiredVersion):
return int(self.version.replace(".", "")) >= int(desiredVersion.replace(".", ""))
def isCompatible(self, LBComponent):
code = LBComponent.Code
# find the version that is supposed to be flying
try:
version = code.split("compatibleHBVersion")[1].split("=")[1].split("\n")[0].strip()
except Exception, e:
print e
self.giveWarning(LBComponent)
return False
desiredVersion = self.getVersion(version)
if not self.isCurrentVersionNewer(desiredVersion):
self.giveWarning(LBComponent)
return False
return True
def giveWarning(self, GHComponent):
warningMsg = "You need a newer version of Honeybee to use this compoent." + \
"Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
GHComponent.AddRuntimeMessage(w, warningMsg)
class hb_findFolders():
def __init__(self):
self.RADPath, self.RADFile = self.which('rad.exe')
self.EPPath, self.EPFile = self.which('EnergyPlus.exe')
self.DSPath, self.DSFile = self.which('gen_dc.exe')
def which(self, program):
"""
Check for path. Modified from this link:
http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
"""
def is_exe(fpath):
#print fpath
#if fpath.upper().find("EnergyPlus") > 0:
# print fpath
# Avoid Radiance and Daysim that comes with DIVA as it has a different
# structure which doesn't match the standard Daysim
if fpath.upper().find("DIVA")<0:
# if the user has DIVA installed the component may find DIVA version
# of RADIANCE and DAYISM which can cause issues because of the different
# structure of folders in DIVA
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
else:
return False
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return path, exe_file
return None, None
class PrepareTemplateEPLibFiles(object):
"""
Download Template files and check for available libraries for EnergyPlus
"""
def __init__(self, downloadTemplate = False, workingDir = sc.sticky["Honeybee_DefaultFolder"]):
if not sc.sticky.has_key("honeybee_constructionLib"): sc.sticky ["honeybee_constructionLib"] = {}
if not sc.sticky.has_key("honeybee_materialLib"): sc.sticky ["honeybee_materialLib"] = {}
if not sc.sticky.has_key("honeybee_windowMaterialLib"): sc.sticky ["honeybee_windowMaterialLib"] = {}
if not sc.sticky.has_key("honeybee_ScheduleLib"): sc.sticky["honeybee_ScheduleLib"] = {}
if not sc.sticky.has_key("honeybee_ScheduleTypeLimitsLib"): sc.sticky["honeybee_ScheduleTypeLimitsLib"] = {}
self.downloadTemplate = downloadTemplate
self.workingDir = workingDir
def downloadFile(self, url, workingDir):
import urllib
webFile = urllib.urlopen(url)
localFile = open(workingDir + '/' + url.split('/')[-1], 'wb')
localFile.write(webFile.read())
webFile.close()
localFile.close()
def cleanHBLib(self):
sc.sticky ["honeybee_constructionLib"] = {}
sc.sticky ["honeybee_materialLib"] = {}
sc.sticky ["honeybee_windowMaterialLib"] = {}
sc.sticky["honeybee_ScheduleLib"] = {}
sc.sticky["honeybee_ScheduleTypeLimitsLib"] = {}
def downloadTemplates(self):
workingDir = self.workingDir
# create the folder if it is not there
if not os.path.isdir(workingDir): os.mkdir(workingDir)
# create a backup from users library
templateFile = os.path.join(workingDir, 'OpenStudioMasterTemplate.idf')
bckupfile = os.path.join(workingDir, 'OpenStudioMasterTemplate_' + str(int(time.time())) +'.idf')
# download template file
if self.downloadTemplate or not os.path.isfile(templateFile):
# create a backup from users library
try: shutil.copyfile(templateFile, bckupfile)
except: pass
try:
## download File
print 'Downloading OpenStudioMasterTemplate.idf to ', workingDir
updatedLink = "https://dl.dropboxusercontent.com/u/16228160/honeybee/template/OpenStudioMasterTemplate.idf"
# This is the current link for current available version of Honeybee. Once we release the new version it can be removed.
#downloadFile(r'https://dl.dropboxusercontent.com/u/16228160/honeybee/OpenStudioMasterTemplate.idf', workingDir)
self.downloadFile(updatedLink, workingDir)
# clean current library
self.cleanHBLib()
except:
print 'Download failed!!! You need OpenStudioMasterTemplate.idf to use honeybee.' + \
'\nPlease check your internet connection, and try again!'
else:
pass
if not os.path.isfile(workingDir + '\OpenStudioMasterTemplate.idf'):
print 'Download failed!!! You need OpenStudioMasterTemplate.idf to use honeybee.' + \
'\nPlease check your internet connection, and try again!'
return -1
else:
libFilePaths = [os.path.join(workingDir, 'OpenStudioMasterTemplate.idf')]
# download openstudio standards
if not os.path.isfile(workingDir + '\OpenStudio_Standards.json'):
try:
## download File
print 'Downloading OpenStudio_Standards.json to ', workingDir
self.downloadFile(r'https://dl.dropboxusercontent.com/u/16228160/honeybee/OpenStudio_Standards.json', workingDir)
except:
print 'Download failed!!! You need OpenStudio_Standards.json to use honeybee.' + \
'\nPlease check your internet connection, and try again!'
else:
pass
if not os.path.isfile(workingDir + '\OpenStudio_Standards.json'):
print 'Download failed!!! You need OpenStudio_Standards.json to use honeybee.' + \
'\nPlease check your internet connection, and try again!'
return -1
else:
# load the json file
filepath = os.path.join(workingDir, 'OpenStudio_Standards.json')
with open(filepath) as jsondata:
openStudioStandardLib = json.load(jsondata)
sc.sticky ["honeybee_OpenStudioStandardsFile"] = openStudioStandardLib
print "Standard template file is loaded!\n"
# add cutom library
customEPLib = os.path.join(workingDir,"userCustomEPLibrary.idf")
if not os.path.isfile(customEPLib):
# create an empty file
with open(customEPLib, "w") as outf:
outf.write("!Honeybee custom EnergyPlus library\n")
if os.path.isfile(customEPLib):
libFilePaths.append(customEPLib)
return libFilePaths
class HB_GetEPLibraries(object):
def __init__(self):
pass
def cleanHBLib(self, construction = True, schedule = True):
if construction:
sc.sticky ["honeybee_constructionLib"] = {}
sc.sticky ["honeybee_materialLib"] = {}
sc.sticky ["honeybee_windowMaterialLib"] = {}
if schedule:
sc.sticky["honeybee_ScheduleLib"] = {}
sc.sticky["honeybee_ScheduleTypeLimitsLib"] = {}
def createEPObject(self, openFile, resultDict, key, scheduleType = None):
# store the data into the dictionary
recounter = 0
for lineCount, line in enumerate(openFile):
if line.strip().startswith("!") or line.strip()=="":
recounter -= 1
continue
if lineCount == 0:
nameKey = line.split("!")[0].strip()[:-1].strip().upper()
if nameKey in resultDict[key].keys():
# this means the material is already in the lib
# I can rename it but for now I rather to give a warning
# and break the loop
warning = key + ": " + nameKey + " is already existed in the libaray. " + \
"Rename one of the " + nameKey + " and try again."
print warning
break
else:
# add the material to the library
resultDict[key][nameKey] = {}
if scheduleType!=None: resultDict[key][nameKey][0] = scheduleType
else:
objValue = line.split("!")[0].strip()
try: objDescription = line.split("!")[1].strip()
except: objDescription = ""
objKey = lineCount + recounter #+ '_' + line.split("!-")[1].strip()
if objValue.endswith(","):
resultDict[key][nameKey][objKey] = objValue[:-1], objDescription
elif objValue.endswith(";"):
resultDict[key][nameKey][objKey] = objValue[:-1], objDescription
break
return resultDict
def loadEPConstructionsAndMaterials(self, idfFilePaths, cleanCurrentLib = True):
if cleanCurrentLib:
self.cleanHBLib(True, False)
# add current library here
resultDict = {"Material": sc.sticky["honeybee_materialLib"],
"WindowMaterial": sc.sticky["honeybee_windowMaterialLib"],
"Construction": sc.sticky ["honeybee_constructionLib"]}
print "Loading EP construction library..."
EPKeys = ["Material", "WindowMaterial", "Construction"]
for libFilePath in idfFilePaths:
with open(libFilePath, 'r') as inf:
for line in inf:
for key in EPKeys:
if line.lower().strip().startswith(key.lower() + ",") \
or line.lower().strip().startswith(key.lower() + ":"):
resultDict = self.createEPObject(inf, resultDict, key, line.strip()[:-1])
# add them to libraries
sc.sticky ["honeybee_constructionLib"] = resultDict["Construction"]
sc.sticky ["honeybee_materialLib"] = resultDict["Material"]
sc.sticky ["honeybee_windowMaterialLib"] = resultDict["WindowMaterial"]
print str(len(sc.sticky["honeybee_constructionLib"].keys())) + " EPConstruction are loaded available in Honeybee library"
print str(len(sc.sticky["honeybee_materialLib"].keys())) + " EPMaterial are now loaded in Honeybee library"
print str(len(sc.sticky["honeybee_windowMaterialLib"].keys())) + " EPWindowMaterial are loaded in Honeybee library"
def loadEPSchedules(self, idfFilePaths, cleanCurrentLib = True):
if cleanCurrentLib:
self.cleanHBLib(False, True)
schedulesDict = {"ScheduleTypeLimits": sc.sticky["honeybee_ScheduleTypeLimitsLib"],
"Schedule": sc.sticky["honeybee_ScheduleLib"]
}
print "\nLoading EP schedules..."
EPKeys = ["ScheduleTypeLimits", "Schedule"]
for libFilePath in libFilePaths:
with open(libFilePath, 'r') as inf:
for line in inf:
for key in EPKeys:
if line.lower().strip().startswith(key.lower() + ",") \
or line.lower().strip().startswith(key.lower() + ":"):
schedulesDict = self.createEPObject(inf, schedulesDict, key, line.strip()[:-1])
break
sc.sticky["honeybee_ScheduleLib"] = schedulesDict["Schedule"]
sc.sticky["honeybee_ScheduleTypeLimitsLib"] = schedulesDict["ScheduleTypeLimits"]
print str(len(sc.sticky["honeybee_ScheduleLib"].keys())) + " schedules are loaded available in Honeybee library"
print str(len(sc.sticky["honeybee_ScheduleTypeLimitsLib"].keys())) + " schedule type limits are now loaded in Honeybee library"
class RADMaterialAux(object):
def __init__(self, reloadRADMaterial = False):
self.radMatTypes = ["plastic", "glass", "trans", "metal", "mirror", "mixedfunc", "dielectric", "transdata", "light", "glow"]
if reloadRADMaterial:
# initiate the library
if not sc.sticky.has_key("honeybee_RADMaterialLib"): sc.sticky ["honeybee_RADMaterialLib"] = {}
# add default materials to the library
self.analyseRadMaterials(self.createRadMaterialFromParameters('plastic', 'Context_Material', .35, .35, .35, 0, 0.1), True, True)
self.analyseRadMaterials(self.createRadMaterialFromParameters('plastic', 'Interior_Ceiling', .80, .80, .80, 0, 0.1), True, True)
self.analyseRadMaterials(self.createRadMaterialFromParameters('plastic', 'Interior_Floor', .2, .2, .2, 0, 0.1), True, True)
self.analyseRadMaterials(self.createRadMaterialFromParameters('plastic', 'Exterior_Floor', .2, .2, .2, 0, 0.1), True, True)
self.analyseRadMaterials(self.createRadMaterialFromParameters('glass', 'Exterior_Window', .60, .60, .60), True, True)
self.analyseRadMaterials(self.createRadMaterialFromParameters('glass', 'Interior_Window', .60, .60, .60), True, True)
self.analyseRadMaterials(self.createRadMaterialFromParameters('plastic', 'Exterior_Roof', .35, .35, .35, 0, 0.1), True, True)
self.analyseRadMaterials(self.createRadMaterialFromParameters('plastic', 'Exterior_Wall', .50, .50, .50, 0, 0.1), True, True)
self.analyseRadMaterials(self.createRadMaterialFromParameters('plastic', 'Interior_Wall', .50, .50, .50, 0, 0.1), True, True)
# import user defined RAD library
RADLibraryFile = os.path.join(sc.sticky["Honeybee_DefaultFolder"], "HoneybeeRadMaterials.mat")
if os.path.isfile(RADLibraryFile):
self.importRADMaterialsFromFile(RADLibraryFile)
else:
if not os.path.isdir(sc.sticky["Honeybee_DefaultFolder"]):
os.mkdir(sc.sticky["Honeybee_DefaultFolder"])
with open(RADLibraryFile, "w") as outf:
outf.write("#Honeybee Radiance Material Library\n")
# let the user do it for now
# update the list of the materials in the call from library components
#for component in ghenv.Component.OnPingDocument().Objects:
# if type(component)== type(ghenv.Component) and component.Name == "Honeybee_Call from Radiance Library":
# pass
# #component.ExpireSolution(True)
print "Loading RAD default materials..." + \
`len(sc.sticky ["honeybee_RADMaterialLib"].keys())` + " RAD materials are loaded\n"
def duplicateMaterialWarning(self, materialName, newMaterialString):
returnYN = {'YES': True, 'NO': False}
buttons = System.Windows.Forms.MessageBoxButtons.YesNo
icon = System.Windows.Forms.MessageBoxIcon.Warning
try:
currentMaterialString = self.getRADMaterialString(materialName)
except:
currentMaterialString = materialName
isAdded, materialName = self.analyseRadMaterials(materialName, False)
msg = materialName + " already exists in the library:\n\n" + \
currentMaterialString + "\n" + \
"Do you want to overwrite the current material with this new definition?\n\n" + \
newMaterialString + "\n\n" + \
"Tip: If you are not sure what to do select No and change the material name."
up = rc.UI.Dialogs.ShowMessageBox(msg, "Duplicate Material Name", buttons, icon)
return returnYN[up.ToString().ToUpper()]
def addRADMatToDocumentDict(self, HBSrf, currentMatDict, currentMixedFunctionsDict):
"""
this function collects the materials for a single run and
"""
# check if the material is already added
materialName = HBSrf.RadMaterial
if not materialName in currentMatDict.keys():
# find material type
materialType = sc.sticky ["honeybee_RADMaterialLib"][materialName].keys()[0]
# check if this is a mixed function
if materialType == "mixfunc":
# add mixedFunction
currentMixedFunctionsDict[materialName] = materialName
# find the base materials for the mixed function
material1 = sc.sticky ["honeybee_RADMaterialLib"][materialName][materialType][0][0]
material2 = sc.sticky ["honeybee_RADMaterialLib"][materialName][materialType][0][1]
for matName in [material1, material2]:
if not matName in currentMatDict.keys():
currentMatDict[matName] = matName
else:
# add to the dictionary
currentMatDict[materialName] = materialName
return currentMatDict, currentMixedFunctionsDict
def createRadMaterialFromParameters(self, modifier, name, *args):
def getTransmissivity(transmittance):
return (math.sqrt(0.8402528435 + 0.0072522239 * (transmittance ** 2)) - 0.9166530661 ) / 0.0036261119 / transmittance
# I should check the inputs here
radMaterial = "void " + modifier + " " + name + "\n" + \
"0\n" + \
"0\n" + \
`int(len(args))`
for arg in args:
if modifier == "glass":
radMaterial = radMaterial + (" " + "%.3f"%getTransmissivity(arg))
else:
radMaterial = radMaterial + (" " + "%.3f"%arg)
return radMaterial + "\n"
def analyseRadMaterials(self, radMaterialString, addToDocLib = False, overwrite = True):
"""
import a RAD material string and convert it into Honeybee rad library and return the name
"""
cleanedRadMaterialString = self.cleanRadMaterials(radMaterialString)
lineSegments = cleanedRadMaterialString.split(" ")
if len(lineSegments) == 1:
# this is just the name
# to be used for applying material to surfaces
return 0, lineSegments[0]
else:
#print lineSegments
materialType = lineSegments[1]
materialName = lineSegments[2]
if addToDocLib:
if not overwrite and materialName in sc.sticky ["honeybee_RADMaterialLib"]:
upload = self.duplicateMaterialWarning(materialName, radMaterialString)
if not upload:
return 0, materialName
sc.sticky ["honeybee_RADMaterialLib"][materialName] = {materialType: {}}
counters = []
materialProp = lineSegments[3:]
#first counter is the first member of the list
counter = 0
counters.append(0)
while counter < len(materialProp):
counter += int(materialProp[counter]) + 1
try:
counters.append(counter)
except:
pass
# print cleanedRadMaterialString
# print counter
# print counters
for counter, count in enumerate(counters[1:]):
matStr = materialProp[counters[counter] + 1: count]
sc.sticky ["honeybee_RADMaterialLib"][materialName][materialType][counter] = matStr
else:
return 0, materialName
return 1, materialName
def cleanRadMaterials(self, radMaterialString):
"""
inputs rad material string, remove comments, spaces, etc and returns
a single line string everything separated by a single space
"""
matStr = ""
lines = radMaterialString.split("\n")
for line in lines:
if not line.strip().startswith("#"):
line = line.replace("\t", " ")
lineSeg = line.split(" ")
for seg in lineSeg:
if seg.strip()!="":
matStr += seg + " "
return matStr[:-1] # remove the last space
def getRADMaterialString(self, materialName):
"""
create rad material string from the HB material dictionary based
"""
materialType = sc.sticky ["honeybee_RADMaterialLib"][materialName].keys()[0]
matStr = "void " + materialType + " " + materialName + "\n"
for lineCount in sc.sticky ["honeybee_RADMaterialLib"][materialName][materialType].keys():
properties = sc.sticky ["honeybee_RADMaterialLib"][materialName][materialType][lineCount]
matStr += str(len(properties)) + " " + " ".join(properties) + "\n"
return matStr
def getRADMaterialType(self, materialName):
materialType = sc.sticky ["honeybee_RADMaterialLib"][materialName].keys()[0]
return materialType
def getRADMaterialParameters(self, materialName):
materialType = self.getRADMaterialType(materialName)
lastLine = len(sc.sticky ["honeybee_RADMaterialLib"][materialName][materialType].keys()) - 1
properties = sc.sticky ["honeybee_RADMaterialLib"][materialName][materialType][lastLine]
return properties
def getSTForTransMaterials(self, materialName):
properties = self.getRADMaterialParameters(materialName)
properties = map(float, properties)
# check got translucant materials
PHAverage = 0.265 * properties[0] + 0.670 * properties[1] + 0.065 * properties[2]
st = properties[5] * properties[6] * (1 - PHAverage * properties[3])
return st
def importRadMatStr(self, firstline, inRadf):
matStr = firstline
for line in inRadf:
if not line.strip().startswith("void"):
if not line.strip().startswith("#") and line.strip()!= "":
matStr += line
else:
isAdded, materialName = self.analyseRadMaterials(matStr, True, True)
# import the rest of the file to the honeybee library
self.importRadMatStr(line, inRadf)
# import the last file
isAdded, materialName = self.analyseRadMaterials(matStr, True, True)
def importRADMaterialsFromFile(self, radFilePath):
with open(radFilePath, "r") as inRadf:
for line in inRadf:
if line.strip().startswith("void"):
if line.split(" ")[1].strip() in self.radMatTypes:
matStr = self.importRadMatStr(line, inRadf)
def searchRadMaterials(self, keywords, materialTypes):
keywords = [kw.strip().upper() for kw in keywords]
materialTypes = [mt.strip().upper() for mt in materialTypes]
materials = []
for radMaterial in sc.sticky["honeybee_RADMaterialLib"].keys():
materialName = radMaterial.ToUpper()
materialType = sc.sticky["honeybee_RADMaterialLib"][radMaterial].keys()[0].ToUpper()
if len(materialTypes)==0 or materialType.ToUpper()in materialTypes:
if len(keywords)!= 0 and not "*" in keywords:
for keyword in keywords:
if materialName.find(keyword)!= -1 or keyword.find(materialName)!= -1:
materials.append(radMaterial)
else:
materials.append(radMaterial)
return materials
def addToGlobalLibrary(self, RADMaterial, RADLibraryFile = os.path.join(sc.sticky["Honeybee_DefaultFolder"], "HoneybeeRadMaterials.mat")):
added, materialName = self.analyseRadMaterials(RADMaterial, False)
# read the global library file
if not os.path.isfile(RADLibraryFile):
# create a single line for the library
with open(RADLibraryFile, "w") as inf:
inf.write("#Honeybee Radiance Global Material Library\n\n")
def addToExistingMaterials(firstline, inRadf, targetMaterialName):
matStr = firstline
thisLine = ""
# collect material string
for thisLine in inRadf:
if not thisLine.strip().startswith("void"):
# avoid comment lines and empty lines
if not thisLine.strip().startswith("#") and thisLine.strip()!= "":
matStr += thisLine
else:
break
# get the material name
isAdded, materialName = self.analyseRadMaterials(matStr, False)
# print materialName
if materialName == targetMaterialName:
self.found = True
# ask the user if he wants to overwrite it with the new one
writeTheNewMaterial= self.duplicateMaterialWarning(matStr, RADMaterial)
if writeTheNewMaterial:
# update the file
self.outFileStr += RADMaterial + "\n"
else:
# keep the current material
self.outFileStr += matStr + "\n"
else:
# keep this material
self.outFileStr += matStr + "\n"
# import the rest of the file to the honeybee library
if thisLine.strip().startswith("void"):
addToExistingMaterials(thisLine, inRadf, targetMaterialName)
# open the file and read the materials
self.outFileStr = ""
self.found = False
with open(RADLibraryFile, "r") as inRadf:
for line in inRadf:
if line.strip().startswith("void"):
if line.split(" ")[1].strip() in self.radMatTypes:
# check if the name is already existed and add it to the
# file if the user wants to overwrite the file.
addToExistingMaterials(line, inRadf, materialName)
else:
self.outFileStr += line
if self.found == False:
# the material is just new so let's just add it to the end of the file
print materialName + " is added to global library"
self.outFileStr += RADMaterial + "\n"
# write the new file
# this is not the most efficient way of read and write a file in Python
# but as far as the file is not huge it is fine! Someone may want to fix this
# print self.outFileStr
with open(RADLibraryFile, "w") as inRadf:
inRadf.write(self.outFileStr)
class DLAnalysisRecipe(object):
def __init__(self, type, *arg):
"""
types:
0: image based analysis > Illuminance(lux) = 0, Radiation(kwh) = 1, Luminance (cd) = 2
1: node based analysis
2: annual simulation (Daysim for now)
3: daylight factor
4: vertical sky component
"""
self.type = type
self.component = arg[-1]
# based on the type it should return different outputs
if type == 0:
self.skyFile = arg[0]
self.viewNames = arg[1]
try: self.radParameters = arg[2].d
except: self.radParameters = arg[2]
self.cameraType = arg[3]
self.simulationType = arg[4]
self.imageSize = arg[5], arg[6]
self.sectionPlane = arg[7]
self.backupImages = arg[8]
self.studyFolder = "\\imageBasedSimulation\\"
elif type == 1:
self.skyFile = arg[0]
self.testPts = self.convertTreeToLists(arg[1])
self.vectors = self.convertTreeToLists(arg[2])
try: self.radParameters = arg[3].d
except: self.radParameters = arg[3]
self.simulationType = arg[4]
self.testMesh = self.convertTreeToLists(arg[5])
self.studyFolder = "\\gridBasedSimulation\\"
elif type == 2:
self.weatherFile = arg[0]
self.testPts = self.convertTreeToLists(arg[1])
self.vectors = self.convertTreeToLists(arg[2])
try: self.radParameters = arg[3].d
except: self.radParameters = arg[3]
self.DSParameters = arg[4]
self.testMesh = self.convertTreeToLists(arg[5])
self.northDegrees = arg[6]
self.studyFolder = "\\annualSimulation\\"
elif type == 3:
self.skyFile = arg[0]
self.testPts = self.convertTreeToLists(arg[1])
self.vectors = self.convertTreeToLists(arg[2])
try: self.radParameters = arg[3].d
except: self.radParameters = arg[3]
self.simulationType = 0 #illuminance
self.testMesh = self.convertTreeToLists(arg[4])
self.studyFolder = "\\DF\\"
elif type == 4:
self.skyFile = arg[0]
self.testPts = self.convertTreeToLists(arg[1])
self.vectors = self.convertTreeToLists(arg[2])
try: self.radParameters = arg[3].d
except: self.radParameters = arg[3]
self.testMesh = self.convertTreeToLists(arg[4])
self.simulationType = 0 #illuminance
self.studyFolder = "\\VSC\\"
# double check the sky in case of grid based and image based simulations
if type ==0 or type == 1:
self.checkSky()
def convertTreeToLists(self, l):
listOfLists = []
for path in l.Paths:
listOfLists.append(l.Branch(path))
return listOfLists
def checkSky(self):
if self.simulationType == 1:
# make sure the sky is either gencum or gendaylit
# edit in case of gendaylit
self.radSkyFile = self.skyFile.split(".")[0] + "_radAnalysis.sky"
skyOut = open(self.radSkyFile, "w")
genDaylit = False
with open(self.skyFile, "r") as skyIn:
for line in skyIn:
if line.startswith("!gensky"):
self.skyFile = None
msg = "You need to use one of the climate-based skies for radiation analysis.\n" + \
"Change the skyFile and try again"
self.component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
return
elif line.startswith("!gendaylit"):
line = line.replace("-O 0", "-O 1")
genDaylit = True
# write a new file
skyOut.write(line)
skyOut.close()
self.skyFile = self.radSkyFile
if not genDaylit:
self.simulationType = 1.1 # annual radiation analysis
else:
# make sure the sky is not from gencum
with open(self.skyFile, "r") as skyIn:
for line in skyIn:
if line.strip().startswith("2 skybright") and line.strip().endswith(".cal"):
self.skyFile = None
msg = "Cumulative sky can only be used for radiation analysis.\n" + \
"Change the skyFile and try again"
self.component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
return
class hb_MSHToRAD(object):
def __init__(self, mesh, fileName = None, workingDir = None, bitmap = None, radMaterial = None):
if fileName == None:
fileName = "unnamed"
self.name = fileName
if workingDir == None:
workingDir = sc.sticky["Honeybee_DefaultFolder"]
workingDir = os.path.join(workingDir, fileName, "MSH2RADFiles")
if not os.path.isdir(workingDir): os.mkdir(workingDir)
self.workingDir = workingDir
self.mesh = mesh
self.pattern = bitmap
if self.pattern != None:
# create material name based on bitmap
bitmapFileName = os.path.basename(self.pattern)
self.matName = ".".join(bitmapFileName.split(".")[:-1])
#copy the image into same folder
try:
shutil.copyfile(self.pattern, os.path.join(self.workingDir, bitmapFileName))
except:
pass
else:
self.matName = "radMaterial"
if radMaterial != None:
try:
self.matName = radMaterial.split("\n")[0].split(" ")[2]
except: # Exception, e:
# print `e`
# not a standard radiance material
pass
self.RADMaterial = radMaterial
def meshToObj(self):
objFilePath = os.path.join(self.workingDir, self.name + ".obj")
with open(objFilePath, "w") as outfile:
# objTxt = "# OBJ file written by TurtlePyMesh\n\n"
outfile.write("# OBJ file written by TurtlePyMesh\n\n")
# add material file name
mtlFile = self.name + ".mtl"
#objTxt += "mtllib " + mtlFile + "\n"
outfile.write("mtllib " + mtlFile + "\n")
for count, Tmesh in enumerate(self.mesh):
# add object name - for this version I keep it all as a single object
#objTxt += "o object_" + str(count + 1) + "\n"
outfile.write("o object_" + str(count + 1) + "\n")
# add material name - for now brick as test
#objTxt += "usemtl " + matName + "\n"
outfile.write("usemtl " + self.matName + "\n")
if Tmesh.Normals.Count == 0:
Tmesh.Normals.ComputeNormals()
# add vertices
for v in Tmesh.Vertices:
XYZ = v.X, v.Y, v.Z
XYZ = map(str, XYZ)
vString = " ".join(XYZ)
#objTxt += "v " + vString + "\n"
outfile.write("v " + vString + "\n")
# add texture vertices
for vt in Tmesh.TextureCoordinates:
XY = vt.X, vt.Y
XY = map(str, XY)
vtString = " ".join(XY)
#objTxt += "vt " + vtString + "\n"
outfile.write("vt " + vtString + "\n")
# add normals
for vn in Tmesh.Normals:
XYZ = vn.X, vn.Y, vn.Z
XYZ = map(str, XYZ)
vnString = " ".join(XYZ)
# objTxt += "vn " + vnString + "\n"
outfile.write("vn " + vnString + "\n")
# add faces
# vertices number is global so the number should be added together
fCounter = 0
if count > 0:
for meshCount in range(count):
fCounter += self.mesh[meshCount].Vertices.Count
# print fCounter
if self.pattern != None:
for face in Tmesh.Faces:
# objTxt += "f " + "/".join(3*[`face.A + fCounter + 1`]) + " " + "/".join(3*[`face.B + fCounter + 1`]) + " " + "/".join(3*[`face.C + fCounter + 1`])
outfile.write("f " + "/".join(3*[`face.A + fCounter + 1`]) + " " + "/".join(3*[`face.B + fCounter + 1`]) + " " + "/".join(3*[`face.C + fCounter + 1`]))
if face.IsQuad:
#objTxt += " " + "/".join(3*[`face.D + fCounter + 1`])
outfile.write(" " + "/".join(3*[`face.D + fCounter + 1`]))
#objTxt += "\n"
outfile.write("\n")
else:
for face in Tmesh.Faces:
outfile.write("f " + "//".join(2 * [`face.A + fCounter + 1`]) + \
" " + "//".join(2 * [`face.B + fCounter + 1`]) + \
" " + "//".join(2 * [`face.C + fCounter + 1`]))
if face.IsQuad:
outfile.write(" " + "//".join( 2 * [`face.D + fCounter + 1`]))
#objTxt += "\n"
outfile.write("\n")
# This method happened to be so slow!
# with open(objFile, "w") as outfile:
# outfile.writelines(objTxt)
return objFilePath
def getPICImageSize(self):
with open(self.pattern, "rb") as inf:
for count, line in enumerate(inf):
#print line
if line.strip().startswith("-Y") and line.find("-X"):
Y, YSize, X, XSize = line.split(" ")
return XSize, YSize
def objToRAD(self, objFile):
# prepare file names
radFile = objFile.replace(".obj", ".rad")
mshFile = objFile.replace(".obj", ".msh")
batFile = objFile.replace(".obj", ".bat")
path, fileName = os.path.split(radFile)
matFile = os.path.join(path, "material_" + fileName)
try:
materialType = self.RADMaterial.split("\n")[0].split(" ")[1]
materialTale = "\n".join(self.RADMaterial.split("\n")[1:])
except Exception, e:
# to be added here: if material is not full string then get it from the library
print "material error..." + `e`
return
# create material file
if self.pattern != None:
# find aspect ratio
try:
X, Y= self.getPICImageSize()
ar = str(int(X)/int(Y))
except Exception, e:
ar = str(1)
# mesh has a pattern
patternName = ".".join(os.path.basename(self.pattern).split(".")[:-1])
materialStr = "void colorpict " + patternName + "_pattern\n" + \
"7 red green blue " + self.pattern + " . (" + ar + "*(Lu-floor(Lu))) (Lv-floor(Lv)) \n" + \
"0\n" + \
"1 1\n" + \
patternName + "_pattern " + materialType + " " + patternName + "\n" + \
materialTale
else:
materialStr = "void " + materialType + " " + self.matName + "\n" + \
materialTale
# write material to file
with open(matFile, "w") as outfile:
outfile.write(materialStr)
# create rad file
if self.pattern != None:
cmd = "c:\\radiance\\bin\\obj2mesh -a " + matFile + " " + objFile + " > " + mshFile
with open(batFile, "w") as outfile:
outfile.write(cmd)
#outfile.write("\npause")
os.system(batFile)
radStr = "void mesh painting\n" + \
"1 " + mshFile + "\n" + \
"0\n" + \
"0\n"
with open(radFile, "w") as outfile:
outfile.write(radStr)
else:
# use object to rad
#create a fake mtl file - material will be overwritten by radiance material
mtlFile = objFile.replace(".obj", ".mtl")
mtlStr = "# Honeybee\n" + \
"newmtl " + self.matName + "\n" + \
"Ka 0.0000 0.0000 0.0000\n" + \
"Kd 1.0000 1.0000 1.0000\n" + \
"Ks 1.0000 1.0000 1.0000\n" + \
"Tf 0.0000 0.0000 0.0000\n" + \
"d 1.0000\n" + \
"Ns 0\n"
with open(mtlFile, "w") as mtlf:
mtlf.write(mtlStr)
# create a map file
#mapFile = objFile.replace(".obj", ".map")
#with open(mapFile, "w") as mapf:
# mapf.write(self.matName + " (Object \"" + self.matName + "\");")
#cmd = "c:\\radiance\\bin\\obj2rad -m " + mapFile + " " + objFile + " > " + radFile
cmd = "c:\\radiance\\bin\\obj2rad " + objFile + " > " + radFile
with open(batFile, "w") as outfile:
outfile.write(cmd)
#outfile.write("\npause")
os.system(batFile)
time.sleep(.2)
return matFile, radFile
class WriteRAD(object):
def __init__(self, component = ghenv.Component):
self.component = component
self.hb_writeRADAUX = sc.sticky["honeybee_WriteRADAUX"]()
self.hb_RADMaterialAUX = sc.sticky["honeybee_RADMaterialAUX"]()
self.lb_preparation = sc.sticky["ladybug_Preparation"]()
self.hb_writeDS = sc.sticky["honeybee_WriteDS"]()
self.hb_radParDict = sc.sticky["honeybee_RADParameters"]().radParDict
hb_folders = sc.sticky["honeybee_folders"]
self.hb_RADPath = hb_folders["RADPath"]
self.hb_RADLibPath = hb_folders["RADLibPath"]
self.hb_DSPath = hb_folders["DSPath"]
self.hb_DSCore = hb_folders["DSCorePath"]
self.hb_DSLibPath = hb_folders["DSLibPath"]
def writeRADAndMaterialFiles(self, originalHBObjects, subWorkingDir, radFileName, \
analysisRecipe, meshParameters, exportInteriorWalls):
# collect information from analysis recipe
radParameters = analysisRecipe.radParameters
simulationType = analysisRecipe.type
radFileFullName = os.path.join(subWorkingDir, radFileName + '.rad')
IESObjects = {}
IESCount = 0
# call the objects from the lib
hb_hive = sc.sticky["honeybee_Hive"]()
HBObjects = hb_hive.callFromHoneybeeHive(originalHBObjects)
geoRadFile = open(radFileFullName, 'w')
geoRadFile.write("#GENERATED BY HONEYBEE\n")
customRADMat = {} # dictionary to collect the custom material names
customMixFunRadMat = {} # dictionary to collect the custom mixfunc material names
surfaceList = []
if len(HBObjects)!=0:
for objCount, HBObj in enumerate(HBObjects):
# check if the object is zone or a surface (?)
if HBObj.objectType == "HBZone":
if HBObj.hasNonPlanarSrf or HBObj.hasInternalEdge:
HBObj.prepareNonPlanarZone(meshParameters)
for srf in HBObj.surfaces:
# check if an interior wall
if not exportInteriorWalls and self.hb_writeRADAUX.isSrfInterior(srf):
continue
# if it is an interior wall and the other wall is already written
# then don't write this wall
if self.hb_writeRADAUX.isSrfInterior(srf) and srf.BCObject.name in surfaceList:
continue
surfaceList.append(srf.name)
# collect the custom material informations
if srf.RadMaterial!=None:
customRADMat, customMixFunRadMat = self.hb_RADMaterialAUX.addRADMatToDocumentDict(srf, customRADMat, customMixFunRadMat)
# write the surfaces
if srf.isPlanar and len(srf.childSrfs)<2:
geoRadFile.write(self.RADSurface(srf))
else:
geoRadFile.write(self.RADNonPlanarSurface(srf))
if srf.hasChild:
# collect the custom material informations
for childSrf in srf.childSrfs:
if childSrf.RadMaterial!=None:
customRADMat, customMixFunRadMat = self.hb_RADMaterialAUX.addRADMatToDocumentDict(childSrf, customRADMat, customMixFunRadMat)
if not srf.isPlanar or len(srf.childSrfs) > 1:
geoRadFile.write(self.RADNonPlanarChildSurface(srf))
elif HBObj.objectType == "HBSurface":
# I should wrap this in a function as I'm using it multiple times with minor changes
# collect the custom material informations
if HBObj.RadMaterial!=None:
try:
customRADMat, customMixFunRadMat = self.hb_RADMaterialAUX.addRADMatToDocumentDict(HBObj, customRADMat, customMixFunRadMat)
except:
msg = HBObj.RadMaterial + " is not defined in the material library! Add the material to library and try again."
print msg
self.component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
return -1
# check for material in child surfaces
if not HBObj.isChild and HBObj.hasChild:
# collect the custom material informations
for childSrf in HBObj.childSrfs:
if childSrf.RadMaterial!=None:
try:
customRADMat, customMixFunRadMat = self.hb_RADMaterialAUX.addRADMatToDocumentDict(childSrf, customRADMat, customMixFunRadMat)
except:
msg = childSrf.RadMaterial + " is not defined in the material library! Add the material to library and try again."
print msg
self.component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
return -1
if HBObj.isPlanar and (not HBObj.isChild and len(HBObj.childSrfs)<2):
# check for rad material
geoRadFile.write(self.RADSurface(HBObj))
else:
geoRadFile.write(self.RADNonPlanarSurface(HBObj))
if not HBObj.isChild and HBObj.hasChild:
geoRadFile.write(self.RADNonPlanarChildSurface(HBObj))
elif HBObj.objectType == "HBIES":
IESCount += 1
IESObjcIsFine = True
# check if the object has been move or scaled
if HBObj.checkIfScaledOrRotated(originalHBObjects[objCount]):
IESObjcIsFine = False
msg = "IES luminaire " + HBObj.name + " is scaled or rotated" + \
" and cannot be added to the scene."
print msg
self.component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
# check if the material name is already exist
if HBObj.name in customRADMat.keys():
IESObjcIsFine = False
msg = "IES luminaire " + HBObj.name + " cannot be added to the scene.\n" + \
"A material with the same name already exist."
print msg
self.component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
# if it is all fine then write the geometry
if IESObjcIsFine:
geoRadFile.write(self.getIESSurfaceStr(originalHBObjects[objCount], HBObj.name, IESCount, HBObj))
# downlight_light polygon downlight.d
# add to IES Objects list so I can add the materials to the list later
if HBObj.name not in IESObjects.keys():
IESObjects[HBObj.name] = HBObj
geoRadFile.close()
########################################################################
######################## GENERATE THE BASE RAD FILE ####################
materialFileName = subWorkingDir + "\\material_" + radFileName + '.rad'
# This part should be fully replaced with the new method where I generate the materials from the
# 0.1 material string
matStr = "# start of generic materials definition(s)\n" + \
self.hb_RADMaterialAUX.getRADMaterialString('Context_Material') + "\n" + \
self.hb_RADMaterialAUX.getRADMaterialString('Interior_Ceiling') + "\n" + \
self.hb_RADMaterialAUX.getRADMaterialString('Interior_Floor') + "\n" + \
self.hb_RADMaterialAUX.getRADMaterialString('Exterior_Floor') + "\n" + \
self.hb_RADMaterialAUX.getRADMaterialString('Exterior_Window') + "\n" + \
self.hb_RADMaterialAUX.getRADMaterialString('Interior_Window') + "\n" + \
self.hb_RADMaterialAUX.getRADMaterialString('Exterior_Roof') + "\n" + \
self.hb_RADMaterialAUX.getRADMaterialString('Exterior_Wall') + "\n" + \
self.hb_RADMaterialAUX.getRADMaterialString('Interior_Wall') + "\n" + \
"# end of generic materials definition(s)\n"
with open(materialFileName, 'w') as matFile:
matFile.write(matStr)
matFile.write("\n# start of material(s) specific to this study (if any)\n")
for radMatName in customRADMat.keys():
matFile.write(self.hb_RADMaterialAUX.getRADMaterialString(radMatName) + "\n")
# check if the material is is trans
if self.hb_RADMaterialAUX.getRADMaterialType(radMatName) == "trans":
# get the st value
st = self.hb_RADMaterialAUX.getSTForTransMaterials(radMatName)
if st < radParameters["_st_"]:
print "Found a trans material... " + \
"Resetting st parameter from " + str(radParameters["_st_"]) + " to " + str(st)
radParameters["_st_"] = st
# write mixedfun if any
for radMatName in customMixFunRadMat.keys():
matFile.write(self.hb_RADMaterialAUX.getRADMaterialString(radMatName) + "\n")
# write IES material if any
if len(IESObjects.keys())!= 0:
for IESName in IESObjects.keys():
IESObj = IESObjects[IESName]
# write material file
matFile.write(IESObj.materialStr)
# add dat file to folder
datFileName = subWorkingDir + "\\" + IESName + '.dat'
with open(datFileName, "w") as outDat:
outDat.write(IESObj.datFile)
matFile.write("# end of material(s) specific to this study (if any)\n")
# export dayism shading geometries as radFiles
# this is only useful for dynamic shadings
dynamicCounter = 0
if simulationType == 2:
dynamicShadingRecipes = analysisRecipe.DSParameters.DShdR
if len(dynamicShadingRecipes) == 0: return radFileFullName, materialFileName
customRADMat = {} # dictionary to collect the custom material names
customMixFunRadMat = {} # dictionary to collect the custom mixfunc material names
for shadingRecipe in dynamicShadingRecipes:
if shadingRecipe.type == 2:
groupName = shadingRecipe.name
dynamicCounter+=1
for stateCount, shadingState in enumerate(shadingRecipe.shadingStates):
fileName = groupName + "_state_" + str(stateCount + 1) + ".rad"
try:
radStr = ""
shdHBObjects = hb_hive.callFromHoneybeeHive(shadingState.shdHBObjects)
for HBObj in shdHBObjects:
# collect the custom material informations
if HBObj.RadMaterial!=None:
customRADMat, customMixFunRadMat = self.hb_RADMaterialAUX.addRADMatToDocumentDict(HBObj, customRADMat, customMixFunRadMat)
if HBObj.isPlanar and (not HBObj.isChild and len(HBObj.childSrfs)<2):
radStr += self.RADSurface(HBObj)
else:
radStr += self.RADNonPlanarSurface(HBObj)
if not HBObj.isChild and HBObj.hasChild:
# collect the custom material informations
for childSrf in HBObj.childSrfs:
if childSrf.RadMaterial!=None:
customRADMat, customMixFunRadMat = self.hb_RADMaterialAUX.addRADMatToDocumentDict(childSrf, customRADMat, customMixFunRadMat)
radStr += self.RADNonPlanarChildSurface(HBObj)
# write the shading file
with open(subWorkingDir + "\\" + fileName, "w") as radInf:
radInf.write(matStr)
radInf.write("# material(s) specific to this study\n")
for radMatName in customRADMat.keys():
radInf.write(self.hb_RADMaterialAUX.getRADMaterialString(radMatName) + "\n")
# write mixedfun if any
for radMatName in customMixFunRadMat.keys():
radInf.write(self.hb_RADMaterialAUX.getRADMaterialString(radMatName) + "\n")
radInf.write(radStr)
except Exception, e:
# print `e`
# None object so just create an empty file
with open(subWorkingDir + "\\" + fileName , "w") as radInf:
radInf.write("#empty shading file")
pass
return radFileFullName, materialFileName
def writeTestPtFile(self, subWorkingDir, radFileName, numOfCPUs, analysisRecipe):
if analysisRecipe.type == 0: return [], [] #image-based simulation
testPoints = analysisRecipe.testPts
ptsNormals = analysisRecipe.vectors
# write a pattern file which I can use later to re-branch the points
ptnFileName = os.path.join(subWorkingDir, radFileName + '.ptn')
with open(ptnFileName, "w") as ptnFile:
for ptList in testPoints:
ptnFile.write(str(len(ptList)) + ", ")
# faltten the test points
flattenTestPoints = self.lb_preparation.flattenList(testPoints)
flattenPtsNormals = self.lb_preparation.flattenList(ptsNormals)
numOfPoints = len(flattenTestPoints)
if numOfCPUs > numOfPoints: numOfCPUs = numOfCPUs
if numOfCPUs > 1: ptsEachCpu = int(numOfPoints/(numOfCPUs))
else: ptsEachCpu = numOfPoints
lenOfPts = []
testPtsEachCPU = []
for cpuCount in range(numOfCPUs):
# write pts file
ptsForThisCPU = []
ptsFileName = os.path.join(subWorkingDir, radFileName + '_' + `cpuCount` + '.pts')
ptsFile = open(ptsFileName, "w")
if cpuCount + 1 != numOfCPUs:
for ptCount in range(cpuCount * ptsEachCpu, (cpuCount + 1) * ptsEachCpu):
ptsFile.write(self.hb_writeRADAUX.testPtsStr(flattenTestPoints[ptCount], flattenPtsNormals[ptCount]))
ptsForThisCPU.append(flattenTestPoints[ptCount])
lenOfPts.append(ptsEachCpu)
else:
for ptCount in range(cpuCount * ptsEachCpu, numOfPoints):
ptsFile.write(self.hb_writeRADAUX.testPtsStr(flattenTestPoints[ptCount], flattenPtsNormals[ptCount]))
ptsForThisCPU.append(flattenTestPoints[ptCount])
lenOfPts.append(numOfPoints - (cpuCount * ptsEachCpu))
ptsFile.close()
testPtsEachCPU.append(ptsForThisCPU)
return testPtsEachCPU, lenOfPts
def writeBatchFiles(self, subWorkingDir, radFileName, radSkyFileName, \
radFileFullName, materialFileName, \
numOfCPUs, testPtsEachCPU, \
lenOfPts, analysisRecipe, additionalRadFiles, \
readyOCTFile = None, runOverture = True):
batchFiles = []
fileNames = [] # list of only names of the files
pcompFileName = ""
# initiate RAD Parameters
if analysisRecipe.radParameters==None:
quality = 0
analysisRecipe.radParameters = {}
print "Default values are set for RAD parameters"
for key in self.hb_radParDict.keys():
#print key + " is set to " + str(hb_radParDict[key][quality])
analysisRecipe.radParameters[key] = self.hb_radParDict[key][quality]
if analysisRecipe.type == 2:
# read parameters
runAnnualGlare = analysisRecipe.DSParameters.runAnnualGlare
onlyAnnualGlare = analysisRecipe.DSParameters.onlyAnnualGlare
annualGlareViews = analysisRecipe.DSParameters.RhinoViewsName
outputUnits = analysisRecipe.DSParameters.outputUnits
adaptiveZone = analysisRecipe.DSParameters.adaptiveZone
dgp_imageSize = analysisRecipe.DSParameters.dgp_imageSize
dynamicShadingRecipes = analysisRecipe.DSParameters.DShdR
numOfIllFiles = analysisRecipe.DSParameters.numOfIll
northAngleRotation = analysisRecipe.northDegrees
# empty list for result file names
DSResultFilesAddress = []
# location string
epwFileAddress = analysisRecipe.weatherFile
locationStr, locName = self.hb_writeDS.DSLocationStr(self.hb_writeRADAUX, self.lb_preparation, epwFileAddress)
newLocName = self.lb_preparation.removeBlankLight(locName)
newLocName = newLocName.replace("/", "_")
# copy .epw file to sub-directory
self.lb_preparation.copyFile(epwFileAddress, subWorkingDir + "\\" + newLocName + '.epw')
pathStr = "SET RAYPATH=.;" + self.hb_RADLibPath + ";" + self.hb_DSPath + ";" + \
self.hb_DSLibPath + ";\nPATH=" + self.hb_RADPath + ";" + \
self.hb_DSPath + ";" + self.hb_DSLibPath + ";$PATH\n"
heaFileName = os.path.join(subWorkingDir, radFileName + '_0.hea')
initBatchFileName = os.path.join(subWorkingDir, radFileName + '_InitDS.bat')
initBatchFile = open(initBatchFileName, "w")
initBatchFile.write(pathStr)
initBatchStr = 'C:\n' + \
'CD ' + self.hb_DSPath + '\n' + \
'epw2wea ' + subWorkingDir + "\\" + self.lb_preparation.removeBlankLight(locName) + '.epw ' + subWorkingDir + "\\" + self.lb_preparation.removeBlankLight(locName) + '.wea\n' + \
':: 1. Generate Daysim version of Radiance Files\n' + \
'radfiles2daysim ' + heaFileName + ' -m -g\n'
# rotate scene if angle is not 0!
if northAngleRotation!=0:
initBatchStr += \
':: 1.5. Roate geometry and test points\n' + \
'rotate_scene ' + heaFileName + '\n'
if runAnnualGlare:
initBatchStr += \
':: 2. Generate Values for annual glare\n' + \
'gen_dgp_profile ' + heaFileName
initBatchFile.write(initBatchStr)
initBatchFile.close()
# annual glare only needs one headeing file and will run on a single cpu
if runAnnualGlare and onlyAnnualGlare:
numOfCPUs = 1
# write the rest of the files
for cpuCount in range(numOfCPUs):
heaFileName = os.path.join(subWorkingDir, radFileName + '_' + `cpuCount` + '.hea')
heaFile = open(heaFileName, "w")
projectName = radFileName
tempDirName = subWorkingDir + '\\tmp_' + `cpuCount`
heaFile.write(self.hb_writeDS.DSHeadingStr(projectName, subWorkingDir, tempDirName, self.hb_DSCore , cpuCount))
# delete the files in the old temp folder
tempWorkingDir = self.lb_preparation.makeWorkingDir(tempDirName)
heaFile.write(locationStr)
heaFile.write(self.hb_writeDS.DSAnalysisUnits(outputUnits, lenOfPts[cpuCount]))
# write view for annual glare if any
glareViewFileName = subWorkingDir + '\\' + projectName + '_' + 'annualGlareView.vf'
vfFile = open(glareViewFileName, "w")
vfFile.write('')
for view in annualGlareViews:
viewLine = self.hb_writeRADAUX.exportView(view, analysisRecipe.radParameters, 1, [dgp_imageSize, dgp_imageSize])
# I'm not sure why Daysim view file needs rview Perspective at the start line
vfFile.write("rview Perspective " + viewLine + "\n")
vfFile.close()
# building string
heaFile.write(self.hb_writeDS.DSBldgStr(projectName, materialFileName, radFileFullName, \
adaptiveZone, dgp_imageSize, dgp_imageSize, cpuCount, northAngleRotation))
# radiance parameters string
heaFile.write(self.hb_writeDS.DSRADStr(analysisRecipe.radParameters))
# dynamic simulaion options
heaFile.write(self.hb_writeDS.DSDynamicSimStr(dynamicShadingRecipes, projectName, subWorkingDir, testPtsEachCPU[cpuCount], cpuCount))
# heaFile.write(hb_writeDS.resultStr(projectName, cpuCount))
heaFile.close()
if not(runAnnualGlare and onlyAnnualGlare):
# ill files
DSResultFilesAddress.append(os.path.join(subWorkingDir, radFileName + '_' + `cpuCount` + '.ill'))
# 3. write the batch file
DSBatchFileName = os.path.join(subWorkingDir, radFileName + '_' + `cpuCount` + '_DS.bat')
DSBatchFile = open(DSBatchFileName, "w")
fileNames.append(DSBatchFileName.split("\\")[-1])
heaFileName = os.path.join(subWorkingDir, radFileName + '_' + `cpuCount` + '.hea')
#SET PATH = " + subWorkingDir + "\n" + workingDrive +"\n"
DSBatchFile.write(pathStr)
DSBatchStr = ':: Calculate Daylight Coefficient File (*.dc)\n' + \
'gen_dc ' + heaFileName + ' -dif\n' + \
'gen_dc ' + heaFileName + ' -dir\n' + \
'gen_dc ' + heaFileName + ' -paste\n' + \
'\n' + \
':: Generate Illuminance Files (*.ill)\n' + \
'ds_illum ' + heaFileName + '\n'
DSBatchFile.write(DSBatchStr)
DSBatchFile.close()
batchFiles.append(DSBatchFileName)
return initBatchFileName, batchFiles, fileNames, pcompFileName, DSResultFilesAddress
######################## NOT ANNUAL SIMULATION #######################
# 3. write the batch file
HDRFileAddress = []
if analysisRecipe.type == 0:
self.rhinoViewNames = analysisRecipe.viewNames
# image based
initBatchFileName = os.path.join(subWorkingDir, radFileName + '_IMGInit.bat')
if readyOCTFile ==None:
OCTFileName = radFileName + '_IMG'
else:
OCTFileName = (".").join(os.path.basename(readyOCTFile).split(".")[:-1])
else:
# not annual and not image based
initBatchFileName = os.path.join(subWorkingDir, radFileName + '_RADInit.bat')
if readyOCTFile ==None:
OCTFileName = radFileName + '_RAD'
else:
OCTFileName = (".").join(os.path.basename(readyOCTFile).split(".")[:-1])
# create the batch file that initiate the simulation
with open(initBatchFileName, "w") as batchFile:
# write the path string (I should check radiance to be installed on the system
pathStr = "SET RAYPATH=.;" + self.hb_RADLibPath + "\nPATH=" + self.hb_RADPath + ";$PATH\n"
batchFile.write(pathStr)
batchFile.write("c:\n")
batchFile.write("cd " + subWorkingDir + "\n")
# write OCT file
# 3.2. oconv line
sceneRadFiles = [materialFileName, radSkyFileName, radFileFullName]
if additionalRadFiles:
for additionalFile in additionalRadFiles:
if additionalFile!=None:
sceneRadFiles.append(additionalFile)
OCTLine = self.hb_writeRADAUX.oconvLine(OCTFileName, sceneRadFiles)
if readyOCTFile ==None: batchFile.write(OCTLine)
if analysisRecipe.type == 0:
# add overture line in case it is an image-based analysis
view = sc.doc.Views.ActiveView.ActiveViewport.Name
viewLine = self.hb_writeRADAUX.exportView(view, analysisRecipe.radParameters, analysisRecipe.cameraType, imageSize = [64, 64])
# write rpict lines
overtureLine = self.hb_writeRADAUX.overtureLine(viewLine, OCTFileName, view, analysisRecipe.radParameters, int(analysisRecipe.type))
if runOverture: batchFile.write(overtureLine)
if analysisRecipe.type == 0:
# write view files
if len(self.rhinoViewNames)==0:
self.rhinoViewNames = [sc.doc.Views.ActiveView.ActiveViewport.Name]
#recalculate vh and vv
nXDiv = int(math.sqrt(numOfCPUs))
while numOfCPUs%nXDiv !=0 and nXDiv < numOfCPUs:
nXDiv += 1
nYDiv = numOfCPUs/nXDiv
fileNames = []
HDRPieces = {}
for cpuCount in range(numOfCPUs):
# create a batch file
batchFileName = os.path.join(subWorkingDir, radFileName + '_' + `cpuCount` + '_IMG.bat')
batchFiles.append(batchFileName)
fileNames.append(batchFileName.split("\\")[-1])
batchFile = open(batchFileName, "w")
# write path files
batchFile.write(pathStr)
batchFile.write("c:\n")
batchFile.write("cd " + subWorkingDir + "\n")
# calculate vs and vl for thi cpu
try: vs = (((cpuCount%nXDiv)/(nXDiv-1)) - 0.5) * (nXDiv - 1)
except: vs = 0
try: vl = ((int(cpuCount/nXDiv)/(nYDiv-1)) - 0.5) * (nYDiv - 1)
except: vl = 0
# print vs, vl
for view in self.rhinoViewNames:
view = self.lb_preparation.removeBlank(view)
if cpuCount == 0:
HDRFileAddress.append(subWorkingDir + "\\" + OCTFileName + "_" + view + ".HDR")
HDRPieces[OCTFileName + "_" + view + ".HDR"] = []
# collect name of the pieces of the picture
HDRPieces[OCTFileName + "_" + view + ".HDR"].append(OCTFileName + "_" + view + "_" + `cpuCount` + ".HDR")
viewLine = self.hb_writeRADAUX.exportView(view, analysisRecipe.radParameters, analysisRecipe.cameraType, \
analysisRecipe.imageSize, analysisRecipe.sectionPlane, \
nXDiv, nYDiv, vs, vl)
# write rpict lines
RPICTLines = self.hb_writeRADAUX.rpictLine(viewLine, OCTFileName, view, analysisRecipe.radParameters, int(analysisRecipe.simulationType), cpuCount)
batchFile.write(RPICTLines)
# close the file
batchFile.close()
# PCOMP to merge images into a single HDR
pcompFileName = os.path.join(subWorkingDir, radFileName + '_PCOMP.bat')
with open(pcompFileName, "w") as pcompFile:
# write path files
pcompFile.write(pathStr)
pcompFile.write("c:\n")
pcompFile.write("cd " + subWorkingDir + "\n")
for mergedName, pieces in HDRPieces.items():
pcomposLine = "pcompos -a " + `nXDiv` + " "
# pieces.reverse()
for piece in pieces:
pcomposLine += piece + " "
pcomposLine += " > " + mergedName + "\n"
pcompFile.write(pcomposLine)
return initBatchFileName, batchFiles, fileNames, pcompFileName, HDRFileAddress
else:
fileNames = []
RADResultFilesAddress = []
for cpuCount in range(numOfCPUs):
# create a batch file
batchFileName = os.path.join(subWorkingDir, radFileName + '_' + `cpuCount` + '_RAD.bat')
batchFiles.append(batchFileName)
RADResultFilesAddress.append(os.path.join(subWorkingDir, radFileName + '_' + `cpuCount` + '.res'))
fileNames.append(batchFileName.split("\\")[-1])
batchFile = open(batchFileName, "w")
# write path files
batchFile.write(pathStr)
batchFile.write("c:\n")
batchFile.write("cd " + subWorkingDir + "\n")
# 3.4. add rtrace lin
RTRACELine = self.hb_writeRADAUX.rtraceLine(radFileName, OCTFileName, analysisRecipe.radParameters, int(analysisRecipe.simulationType), cpuCount)
batchFile.write(RTRACELine)
# close the file
batchFile.close()
return initBatchFileName, batchFiles, fileNames, pcompFileName, RADResultFilesAddress
def runBatchFiles(self, initBatchFileName, batchFileNames, fileNames, \
pcompBatchFile, waitingTime):
def isTheStudyOver(fileNames):
while True:
cmd = 'WMIC PROCESS get Commandline' #,Processid'
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
cmdCount = 0
for line in proc.stdout:
if line.strip().startswith("cmd") and line.strip().endswith(".bat"):
fileName = line.strip().split(" ")[-1].split("\\")[-1]
# I should check the file names and make sure they are the right files
if fileName in fileNames:
cmdCount += 1
time.sleep(0.2)
if cmdCount == 0:
return
def executeBatchFiles(batchFileNames, waitingTime):
for batchFileName in batchFileNames:
p = subprocess.Popen(r'start cmd /c ' + batchFileName , shell=True)
time.sleep(waitingTime)
os.system(initBatchFileName)
time.sleep(waitingTime)
executeBatchFiles(batchFileNames, waitingTime)
isTheStudyOver(fileNames)
if pcompBatchFile!="":
os.system(pcompBatchFile) # put all the files together
def collectResults(self, subWorkingDir, radFileName, numOfCPUs, analysisRecipe, expectedResultFiles):
if analysisRecipe.type == 2:
#annual simulation
runAnnualGlare = analysisRecipe.DSParameters.runAnnualGlare
onlyAnnualGlare = analysisRecipe.DSParameters.onlyAnnualGlare
numOfIllFiles = analysisRecipe.DSParameters.numOfIll
annualGlareViews = analysisRecipe.DSParameters.RhinoViewsName
DSResultFilesAddress = []
if not(runAnnualGlare and onlyAnnualGlare):
# read the number of .ill files
# and the number of .dc files
if subWorkingDir[-1] == os.sep: subWorkingDir = subWorkingDir[:-1]
startTime = time.time()
# check if the results are available
files = os.listdir(subWorkingDir)
numIll = 0
numDc = 0
for file in files:
if file.EndsWith('ill'):
DSResultFilesAddress.append(os.path.join(subWorkingDir, file))
numIll+=1
elif file.EndsWith('dc'):
numDc+=1
if numIll!= numOfCPUs * numOfIllFiles or numDc!= numOfCPUs * numOfIllFiles:
print "Can't find the results for the study"
DSResultFilesAddress = []
# check for results of annual glare analysis if any
annualGlareResults = {}
for view in annualGlareViews:
if view not in annualGlareResults.keys():
annualGlareResults[view] = []
dgpFile = os.path.join(subWorkingDir, radFileName + '_0.dgp')
if runAnnualGlare and os.path.isfile(dgpFile):
with open(dgpFile, "r") as dgpRes:
for line in dgpRes:
try:
hourlyRes = line.split(" ")[4:]
# for each view there should be a number
for view, res in zip(annualGlareViews, hourlyRes):
annualGlareResults[view].append(res.strip())
except:
pass
return DSResultFilesAddress, annualGlareResults
elif analysisRecipe.type == 0:
# image-based analysis
return expectedResultFiles
else:
RADResultFilesAddress = expectedResultFiles
# grid-based analysis
numRes = 0
files = os.listdir(subWorkingDir)
for file in files:
if file.EndsWith('res'): numRes+=1
if numRes != numOfCPUs:
print "Cannot find the results of the study"
RADResultFilesAddress = []
time.sleep(1)
return RADResultFilesAddress
def shiftList(self, list, number = 1):
newList = []
newList.extend(list[-number:])
newList.extend(list[:-number])
return newList
def getsurfaceStr(self, surface, count, coordinates):
if surface.RadMaterial != None:
surface.construction = surface.RadMaterial
elif not hasattr(surface, 'construction'):
if not hasattr(surface, 'type'):
# find the type based on
surface.type = surface.getTypeByNormalAngle()
#assign the construction based on type
surface.construction = surface.cnstrSet[surface.type]
srfStr = surface.construction.replace(" ", "_") + " polygon " + surface.name + '_' + `count` + "\n" + \
"0\n" + \
"0\n" + \
`(len(coordinates)*3)` + "\n"
ptStr = ''
for pt in coordinates:
ptStr = ptStr + '%.4f'%pt.X + ' ' + '%.4f'%pt.Y + ' ' + '%.4f'%pt.Z + '\n'
ptStr = ptStr + '\n'
# check for polygons with only two points.
# Yes! it is possible. Import a model from REVIT/SketchUp and create some breps out of it
# and you will get some!
if len(coordinates) < 3:
comment = " Polygon " + surface.name + " has less than 3 vertices and is removed by Honeybee.\n"
return "#" + comment
return srfStr + ptStr
def RADSurface(self, surface):
fullStr = ''
# base surface coordinates
coordinatesList = surface.extractPoints(1, True)
if coordinatesList:
if type(coordinatesList[0])is not list and type(coordinatesList[0]) is not tuple:
coordinatesList = [coordinatesList]
for count, coordinates in enumerate(coordinatesList):
endCoordinate = rc.Geometry.Point3d.Add(coordinates[-1], rc.Geometry.Vector3d(0,0,0))
if surface.hasChild:
glzCoordinateLists = surface.extractGlzPoints(True)
for glzCount, glzCoorList in enumerate(glzCoordinateLists):
# glazingStr
fullStr = fullStr + self.getsurfaceStr(surface.childSrfs[0], glzCount, glzCoorList)
# shift glazing list
glzCoorList = self.shiftList(glzCoorList)
coordinates.extend(glzCoorList)
coordinates.append(glzCoorList[0])
coordinates.extend([endCoordinate, coordinates[0]])
fullStr = fullStr + self.getsurfaceStr(surface, count, coordinates)
return fullStr
else:
print "one of the surfaces is not exported correctly"
return ""
def RADNonPlanarSurface(self, surface):
fullStr = ''
# replace the geometry with the punched geometry
# for planar surfaces with multiple openings
try:
if surface.punchedGeometry!=None:
surface.geometry = surface.punchedGeometry
surface.hasInternalEdge = True
except:
#print e
# nonplanar surfaces with no openings
pass
# base surface coordinates
coordinatesList = surface.extractPoints(1, True)
if type(coordinatesList[0])is not list and type(coordinatesList[0]) is not tuple:
coordinatesList = [coordinatesList]
for count, coordinates in enumerate(coordinatesList):
#print count
fullStr = fullStr + self.getsurfaceStr(surface, count, coordinates)
return fullStr
def RADNonPlanarChildSurface(self, surface):
fullStr = ''
# I should test this function before the first release!
# Not sure if it will work for cases generated only by surface
# should probably check for meshed surface and mesh the geometry
# in case it is not meshed
# base surface coordinates
coordinatesList = surface.extractGlzPoints(True)
if type(coordinatesList[0])is not list and type(coordinatesList[0]) is not tuple:
coordinatesList = [coordinatesList]
for glzCount, glzCoorList in enumerate(coordinatesList):
# glazingStr
fullStr = fullStr + self.getsurfaceStr(surface.childSrfs[0], glzCount, glzCoorList)
return fullStr
def getIESSurfaceStr(self, surface, constructionName, count, IESObject):
if IESObject.type == "polygon":
coordinates = surface.DuplicateVertices()
srfStr = constructionName + "_light polygon " + constructionName + '_' + `count` + ".d\n" + \
"0\n" + \
"0\n" + \
`(len(coordinates)*3)` + "\n"
ptStr = ''
for pt in coordinates:
ptStr = ptStr + '%.4f'%pt.X + ' ' + '%.4f'%pt.Y + ' ' + '%.4f'%pt.Z + '\n'
ptStr = ptStr + '\n'
return srfStr + ptStr
elif IESObject.type == "sphere":
center = surface.GetBoundingBox(True).Center
radius = IESObject.radius
srfStr = constructionName + "_light sphere " + constructionName + '_' + `count` + ".d\n" + \
"0\n" + \
"0\n" + \
"4 " + `center.X` + " " + `center.Y` + " " + `center.Z` + " " + `radius` + "\n"
return srfStr
class WriteRADAUX(object):
def __init__(self):
self.hb_radParDict = sc.sticky["honeybee_RADParameters"]().radParDict
self.lb_preparation = sc.sticky["ladybug_Preparation"]()
self.hb_serializeObjects = sc.sticky["honeybee_SerializeObjects"]
self.hb_dsParameters = sc.sticky["honeybee_DSParameters"]()
self.radSkyCondition = {0: '-u',
1: '-c',
2: '-i',
3: '+i',
4: '-s',
5: '+s'}
def readAnalysisRecipe(self, analysisRecipe):
self.analysisType = analysisRecipe.type
self.radParameters = analysisRecipe.radParameters
self.backupImages = 0 # will change to 1 or 2 in case the user set it to another number for image-based analysis
self.numOfIllFiles = 1
if self.radParameters==None:
quality = 0
self.radParameters = {}
print "Default values are set for RAD parameters"
for key in self.hb_radParDict.keys():
#print key + " is set to " + str(hb_radParDict[key][quality])
self.radParameters[key] = self.hb_radParDict[key][quality]
if self.analysisType == 0:
print "Image-based simulation"
self.radSkyFileName = analysisRecipe.skyFile
self.rhinoViewNames = analysisRecipe.viewNames
self.cameraType = analysisRecipe.cameraType
self.imageSize = analysisRecipe.imageSize
self.simulationType = analysisRecipe.simulationType
self.studyFolder = analysisRecipe.studyFolder
self.sectionPlane = analysisRecipe.sectionPlane
self.backupImages = analysisRecipe.backupImages
elif self.analysisType == 1:
print "Grid-based Radiance simulation"
self.radSkyFileName = analysisRecipe.skyFile
self.testPoints = analysisRecipe.testPts
self.ptsNormals = analysisRecipe.vectors
self.simulationType = analysisRecipe.simulationType
self.studyFolder = analysisRecipe.studyFolder
self.testMesh = analysisRecipe.testMesh
elif self.analysisType == 2:
print "Annual climate-based analysis"
self.epwFileAddress = analysisRecipe.weatherFile
self.testPoints = analysisRecipe.testPts
self.ptsNormals = analysisRecipe.vectors
self.testMesh = analysisRecipe.testMesh
if analysisRecipe.DSParameters == None:
analysisRecipe.DSParameters = self.hb_dsParameters
self.runAnnualGlare = analysisRecipe.DSParameters.runAnnualGlare
self.onlyAnnualGlare = analysisRecipe.DSParameters.onlyAnnualGlare
self.annualGlareViews = analysisRecipe.DSParameters.RhinoViewsName
self.outputUnits = analysisRecipe.DSParameters.outputUnits
self.adaptiveZone = analysisRecipe.DSParameters.adaptiveZone
self.dgp_imageSize = analysisRecipe.DSParameters.dgp_imageSize
self.dynamicShadingRecipes = analysisRecipe.DSParameters.DShdR
self.numOfIllFiles = analysisRecipe.DSParameters.numOfIll
self.studyFolder = analysisRecipe.studyFolder
elif self.analysisType == 3:
print "Daylight factor"
self.radSkyFileName = analysisRecipe.skyFile
self.testPoints = analysisRecipe.testPts
self.ptsNormals = analysisRecipe.vectors
self.simulationType = analysisRecipe.simulationType
self.studyFolder = analysisRecipe.studyFolder
self.testMesh = analysisRecipe.testMesh
elif self.analysisType == 4:
print "Vertical Sky Component"
self.radSkyFileName = analysisRecipe.skyFile
self.testPoints = analysisRecipe.testPts
self.ptsNormals = analysisRecipe.vectors
self.simulationType = analysisRecipe.simulationType
self.studyFolder = analysisRecipe.studyFolder
self.testMesh = analysisRecipe.testMesh
def checkInputParametersForGridBasedAnalysis(self):
if self.analysisType == 0:
# this is an image-based analysis
return
print "The component is checking ad, as, ar and aa values. " + \
"This is just to make sure that the results are accurate enough."
if self.radParameters["_ad_"] < 1000:
self.radParameters["_ad_"] = 1000
print "-ad is set to 1000."
if self.radParameters["_as_"] < 20:
self.radParameters["_as_"] = 20
print "-as is set to 20."
if self.radParameters["_ar_"] < 300:
# setting up the ar to 300 is tricky but I'm pretty sure in many
# cases there will shadings involved.
self.radParameters["_ar_"] = 300
print "-ar is set to 300."
if self.radParameters["_aa_"] > 0.1:
# the same here. I think it is good to let the user wait a little bit more
# but have a result that makes sense. If you are an exprienced user and don't
# like this feel free to remove the if condition. Keep in mind that I only
# apply this for grid based analysis, so the images can be rendered with any quality
self.radParameters["_aa_"] = 0.1
print "-aa is set to 0.1"
print "Good to go!"
def prepareWorkingDir(self, workingDir, radFileName = None, overwriteResults = True, analysisRecipe = None):
if analysisRecipe == None:
studyFolder = self.studyFolder
analysisType = self.analysisType
if analysisType == 0:
backupImages = self.backupImages
else:
studyFolder = analysisRecipe.studyFolder
analysisType = analysisRecipe.type
if analysisType == 0:
backupImages = analysisRecipe.backupImages
if workingDir:
workingDir = self.lb_preparation.removeBlankLight(workingDir)
else:
workingDir = sc.sticky["Honeybee_DefaultFolder"]
workingDir = self.lb_preparation.makeWorkingDir(workingDir)
# make sure the directory has been created
if workingDir == -1: return -1
workingDrive = workingDir[0:1]
## check for the name of the file
if radFileName == None: radFileName = 'unnamed'
# make sure radfile name is a valid address
keepcharacters = ('.','_')
radFileName = "".join([c for c in radFileName if c.isalnum() or c in keepcharacters]).rstrip()
# make new folder for each study
subWorkingDir = self.lb_preparation.makeWorkingDir(workingDir + "\\" + radFileName + studyFolder).replace("\\\\", "\\")
print 'Current working directory is set to: ', subWorkingDir
if os.path.exists(subWorkingDir):
if analysisType == 0:
# for image-based analysis there is an option to backup the images
if backupImages != 0:
# create the backup folder and copy the images to the folder
imageFolder = workingDir + "\\" + radFileName + "\\imagesBackup"
if not os.path.exists(imageFolder): os.mkdir(imageFolder)
# copy the files into the folder
imageExtensions = ["JPEG", "JPG", "GIF", "TIFF", "TIF", "HDR", "PIC"]
timeID = self.getTime()
fileNames = os.listdir(subWorkingDir)
if backupImages == 1:
# keep all the files in the same folder
for fileName in fileNames:
if fileName.split(".")[-1].upper() in imageExtensions:
newFileName = (".").join(fileName.split(".")[:-1])
extension = fileName.split(".")[-1]
newFullName = newFileName + "_" + timeID + "." + extension
self.copyFile(os.path.join(subWorkingDir, fileName), os.path.join(imageFolder, newFullName) , True)
elif backupImages == 2:
for fileName in fileNames:
if fileName.split(".")[-1].upper() in imageExtensions:
if not os.path.exists(imageFolder + "\\" + timeID):
os.mkdir(imageFolder + "\\" + timeID)
# copy the files to image backup folder with data and time added
self.copyFile(os.path.join(subWorkingDir, fileName), os.path.join(imageFolder + "\\" + timeID, fileName) , True)
try:
if not overwriteResults:
fileNames = os.listdir(subWorkingDir)
mainBackupFolder = subWorkingDir[:-1] + "_backup"
counter = 0
backupFolder = os.path.join(mainBackupFolder, str(counter))
while os.path.isdir(backupFolder):
counter += 1
backupFolder = os.path.join(mainBackupFolder, str(counter))
os.mkdir(backupFolder)
for fileName in fileNames:
try:
# copy the files to image backup folder with data and time added
self.copyFile(os.path.join(subWorkingDir, fileName), os.path.join(backupFolder, fileName) , True)
except:
pass
print "Results of the previous study are copied to " + backupFolder
self.lb_preparation.nukedir(subWorkingDir, rmdir = False)
except Exception, e:
print 'Failed to remove the old directory.'
print `e`
return subWorkingDir, radFileName
def exportTestMesh(self, subWorkingDir, radFileName, analysisRecipe = None):
if analysisRecipe != None:
analysisType = analysisRecipe.type
if analysisType ==0: return
testMesh = analysisRecipe.testMesh
else:
analysisType = self.analysisType
if analysisType ==0: return
testMesh = self.testMesh
# try to write mesh file if any
if analysisType != 0 and testMesh !=[]:
meshFilePath = os.path.join(subWorkingDir, radFileName + ".msh")
serializer = self.hb_serializeObjects(meshFilePath, testMesh)
serializer.saveToFile()
def exportTypeFile(self, subWorkingDir, radFileName, analysisRecipe = None):
if analysisRecipe != None:
analysisType = analysisRecipe.type
else:
analysisType = self.analysisType
# try to write mesh file if any
typeFile = os.path.join(subWorkingDir, radFileName + ".typ")
with open(typeFile, "w") as typf:
typf.write(str(analysisType))
def copySkyFile(self, subWorkingDir, radFileName, analysisRecipe = None):
if analysisRecipe != None:
analysisType = analysisRecipe.type
if analysisType == 2: return
radSkyFileName = analysisRecipe.radSkyFileName
else:
analysisType = self.analysisType
if analysisType == 2: return
radSkyFileName = self.radSkyFileName
skyTempName = radSkyFileName.split("\\")[-1]
skyName = skyTempName.split("/")[-1]
self.copyFile(radSkyFileName, subWorkingDir + "\\" + skyName, True)
radSkyFileName = os.path.join(subWorkingDir, skyName)
return radSkyFileName
def getTime(self):
def addZero(number):
if len(str(number)) == 1:
return "0" + str(number)
else:
return str(number)
year, month, day, hour, minute, second = time.localtime()[0:6]
now = addZero(hour) + "_" + addZero(minute) + "_" + addZero(second)
date = addZero(year) + "_" + addZero(month) + "_" + addZero(day)
return date + "at" + now
def copyFile(self, inputFile, destinationFullpath, overwrite = False):
if overwrite: shutil.copyfile(inputFile, destinationFullpath)
elif not os.path.isfile(destinationFullpath): shutil.copyfile(inputFile, destinationFullpath)
def RADLocation(self, epw_file):
epwfile = open(epw_file,"r")
headline = epwfile.readline()
csheadline = headline.split(',')
while 1>0: #remove empty cells from the end of the list if any
try: float(csheadline[-1]); break
except: csheadline.pop()
locName = ''
for hLine in range(1,4):
if csheadline[hLine] != '-':
locName = locName + csheadline[hLine].strip() + '_'
locName = locName[:-1].strip()
lat = csheadline[-4]
lngt = csheadline[-3]
timeZone = csheadline[-2]
elev = csheadline[-1].strip()
epwfile.close()
return locName, lat, lngt, timeZone, elev
def RADRadiationSky(self, projectName):
return "# start of sky definition for radiation studies\n" + \
"void brightfunc skyfunc\n" + \
"2 skybright " + projectName + ".cal\n" + \
"0\n" + \
"0\n" + \
"skyfunc glow sky_glow\n" + \
"0\n" + \
"0\n" + \
"4 1 1 1 0\n" + \
"sky_glow source sky\n" + \
"0\n" + \
"0\n" + \
"4 0 0 1 180\n" + \
"# end of sky definition for radiation studies\n\n"
def RADDaylightingSky(self, epwFileAddress, skyCondition, time, month, day):
locName, lat, long, timeZone, elev = self.RADLocation(epwFileAddress)
return "# start of sky definition for daylighting studies\n" + \
"# location name: " + locName + " LAT: " + lat + "\n" + \
"!gensky " + `month` + ' ' + `day` + ' ' + `time` + ' ' + self.radSkyCondition[skyCondition] + \
" -a " + lat + " -o " + `-float(long)` + " -m " + `-float(timeZone) * 15` + "\n" + \
"skyfunc glow sky_mat\n" + \
"0\n" + \
"0\n" + \
"4 1 1 1 0\n" + \
"sky_mat source sky\n" + \
"0\n" + \
"0\n" + \
"4 0 0 1 180\n" + \
"skyfunc glow ground_glow\n" + \
"0\n" + \
"0\n" + \
"4 1 .8 .5 0\n" + \
"ground_glow source ground\n" + \
"0\n" + \
"0\n" + \
"4 0 0 -1 180\n" + \
"# end of sky definition for daylighting studies\n\n"
def exportView(self, viewName, radParameters, cameraType, imageSize, sectionPlane = None, nXDiv = 1, nYDiv = 1, vs = 0, vl = 0):
if viewName in rs.ViewNames():
viewName = rs.CurrentView(viewName, True)
else:
# change to RhinoDoc to get access to NamedViews
sc.doc = rc.RhinoDoc.ActiveDoc
namedViews = rs.NamedViews()
if viewName in namedViews:
viewName = rs.RestoreNamedView(viewName)
else:
viewName = None
# change back to Grasshopper
sc.doc = ghdoc
viewName = rs.CurrentView(viewName, True)
if viewName == None:
print "Illegal view name!"
viewName = "Perspective"
# Read camera type 0: Perspective, 1: fisheye, 2: parallel
try: cameraType = int(cameraType)
except:
if sc.doc.Views.ActiveView.ActiveViewport.IsPerspectiveProjection: cameraType = 0
elif sc.doc.Views.ActiveView.ActiveViewport.IsParallelProjection: cameraType = 2
# paralell view sizes
viewHSizeP = int(sc.doc.Views.ActiveView.ActiveViewport.Size.Width)
viewVSizeP = int(sc.doc.Views.ActiveView.ActiveViewport.Size.Height)
# read image size
viewHSize = int(sc.doc.Views.ActiveView.ActiveViewport.Size.Width)
viewVSize = int(sc.doc.Views.ActiveView.ActiveViewport.Size.Height)
# print viewHSize, viewVSize
userInputH = imageSize[0]
userInputV = imageSize[1]
if userInputH != None and userInputV != None:
try:
viewHSize = float(userInputH)
viewVSize = float(userInputV)
except:
print "Illegal input for view size."
pass
elif userInputH == None and userInputV != None:
try:
viewHSize = viewHSize * (userInputV/viewVSize)
viewVSize = float(userInputV)
except:
print "Illegal input for view size."
pass
elif userInputH != None and userInputV == None:
try:
viewVSize = viewVSize * (userInputH/viewHSize)
viewHSize = float(userInputH)
except:
print "Illegal input for view size."
pass
# print viewHSize, viewVSize
viewPoint = sc.doc.Views.ActiveView.ActiveViewport.CameraLocation
viewDirection = sc.doc.Views.ActiveView.ActiveViewport.CameraDirection
viewDirection.Unitize()
viewUp = sc.doc.Views.ActiveView.ActiveViewport.CameraUp
viewHA = 180 - rs.VectorAngle(sc.doc.Views.ActiveView.ActiveViewport.GetFrustumRightPlane()[1][1], sc.doc.Views.ActiveView.ActiveViewport.GetFrustumLeftPlane()[1][1])
if viewHA == 0: viewHA = 180
viewVA = 180 - rs.VectorAngle(sc.doc.Views.ActiveView.ActiveViewport.GetFrustumBottomPlane()[1][1], sc.doc.Views.ActiveView.ActiveViewport.GetFrustumTopPlane()[1][1])
if viewVA == 0: viewVA = 180
PI = math.pi
if cameraType == 2:
# Thank you to Brent Watanabe for the great discussion, and his help in figuring this out
# I should find the bounding box of the geometry and set X and Y based of that!
if nXDiv != 1:
viewHSizeP = viewHSizeP/nXDiv
viewHSize = viewHSize/nXDiv
if nYDiv != 1:
viewVSizeP = viewVSizeP/nYDiv
viewVSize = viewVSize/nYDiv
view = "-vtl -vp " + \
`viewPoint[0]` + " " + `viewPoint[1]` + " " + `viewPoint[2]` + " " + \
" -vd " + `viewDirection[0]` + " " + `viewDirection[1]` + " " + `viewDirection[2]` + " " + \
" -vu " + `viewUp[0]` + " " + `viewUp[1]` + " " + `viewUp[2]` + \
" -vh " + `int(viewHSizeP)` + " -vv " + `int(viewVSizeP)` + \
" -vs " + "%.3f"%vs + " -vl " + "%.3f"%vl + \
" -x " + `int(viewHSize)` + " -y " + `int(viewVSize)`
elif cameraType == 0:
# perspective
# recalculate vh and vv
if nXDiv != 1:
viewHA = (2.*180./PI)*math.atan(((PI/180./2.) * viewHA)/nXDiv)
viewHSize = viewHSize/nXDiv
if nYDiv != 1:
viewVA = (2.*180./PI)*math.atan(math.tan((PI/180./2.)*viewVA)/nYDiv)
viewVSize = viewVSize/nYDiv
view = "-vtv -vp " + \
"%.3f"%viewPoint[0] + " " + "%.3f"%viewPoint[1] + " " + "%.3f"%viewPoint[2] + " " + \
" -vd " + "%.3f"%viewDirection[0] + " " + "%.3f"%viewDirection[1] + " " + "%.3f"%viewDirection[2] + " " + \
" -vu " + "%.3f"%viewUp[0] + " " + "%.3f"%viewUp[1] + " " + "%.3f"%viewUp[2] + " " + \
" -vh " + "%.3f"%viewHA + " -vv " + "%.3f"%viewVA + \
" -vs " + "%.3f"%vs + " -vl " + "%.3f"%vl + " -x " + `int(viewHSize)` + " -y " + `int(viewVSize)`
elif cameraType == 1:
# fish eye
# recalculate vh and vv
viewHA = 180
viewVA = 180
if nXDiv != 1:
viewHA = (2.*180./PI)*math.asin(math.sin((PI/180./2.)*viewHA)/nXDiv)
viewHSize = viewHSize/nXDiv
if nYDiv != 1:
viewVA = (2.*180./PI)*math.asin(math.sin((PI/180./2.)*viewVA)/nYDiv)
viewVSize = viewVSize/nYDiv
view = "-vth -vp " + \
`viewPoint[0]` + " " + `viewPoint[1]` + " " + `viewPoint[2]` + " " + \
" -vd " + `viewDirection[0]` + " " + `viewDirection[1]` + " " + `viewDirection[2]` + " " + \
" -vu " + `viewUp[0]` + " " + `viewUp[1]` + " " + `viewUp[2]` + " " + \
" -vh " + "%.3f"%viewHA + " -vv " + "%.3f"%viewVA + \
" -vs " + "%.3f"%vs + " -vl " + "%.3f"%vl + " -x " + `int(viewHSize)` + " -y " + `int(viewVSize)`
if sectionPlane!=None:
# map the point on the plane
pointOnPlane = sectionPlane.ClosestPoint(viewPoint)
distance = pointOnPlane.DistanceTo(viewPoint)
view += " -vo " + str(distance)
return view + " "
def oconvLine(self, octFileName, radFilesList):
# sence files
r = 1024 * 2
senceFiles = ""
for address in radFilesList: senceFiles = senceFiles + address.replace("\\" , "/") + " "
line = "oconv -r " + str(r) + " -f " + senceFiles + " > " + octFileName + ".oct\n"
return line
def overtureLine(self, view, projectName, viewName, radParameters, analysisType = 0):
octFile = projectName + ".oct"
ambFile = projectName + ".amb" #amb file is view independent and can be used globally
unfFile = projectName + ".unf"
if analysisType==0:
# illuminance (lux)
line0 = "rpict -i "
elif analysisType==2:
# luminance (cd)
line0 = "rpict "
else:
# radiation analysis
line0 = "rpict -i "
line1 = "-t 10 "+ \
view + " -af " + ambFile + " " + \
" -ps " + str(radParameters["_ps_"]) + " -pt " + str(radParameters["_pt_"]) + \
" -pj " + str(radParameters["_pj_"]) + " -dj " + str(radParameters["_dj_"]) + \
" -ds " + str(radParameters["_ds_"]) + " -dt " + str(radParameters["_dt_"]) + \
" -dc " + str(radParameters["_dc_"]) + " -dr " + str(radParameters["_dr_"]) + \
" -dp " + str(radParameters["_dp_"]) + " -st " + str(radParameters["_st_"]) + \
" -ab " + `radParameters["_ab_"]` + \
" -ad " + `radParameters["_ad_"]` + " -as " + `radParameters["_as_"]` + \
" -ar " + `radParameters["_ar_"]` + " -aa " + '%.3f'%radParameters["_aa_"] + \
" -lr " + `radParameters["_lr_"]` + " -lw " + '%.3f'%radParameters["_lw_"] + " -av 0 0 0 " + \
" " + octFile + " > " + unfFile + "\n"
line2 = "del " + unfFile + "\n"
return line0 + line1 + line2
def rpictLine(self, view, projectName, viewName, radParameters, analysisType = 0, cpuCount = 0):
octFile = projectName + ".oct"
ambFile = projectName + ".amb" #amb file is view independent and can be used globally
unfFile = projectName + "_" + viewName + "_" + `cpuCount` + ".unf"
outputFile = projectName + "_" + viewName + "_" + `cpuCount` + ".HDR"
if analysisType==0:
# illuminance (lux)
line0 = "rpict -i "
elif analysisType==2:
# luminance (cd)
line0 = "rpict "
else:
# radiation analysis
line0 = "rpict -i "
line1 = "-t 10 "+ \
view + " -af " + ambFile + " " + \
" -ps " + str(radParameters["_ps_"]) + " -pt " + str(radParameters["_pt_"]) + \
" -pj " + str(radParameters["_pj_"]) + " -dj " + str(radParameters["_dj_"]) + \
" -ds " + str(radParameters["_ds_"]) + " -dt " + str(radParameters["_dt_"]) + \
" -dc " + str(radParameters["_dc_"]) + " -dr " + str(radParameters["_dr_"]) + \
" -dp " + str(radParameters["_dp_"]) + " -st " + str(radParameters["_st_"]) + \
" -ab " + `radParameters["_ab_"]` + \
" -ad " + `radParameters["_ad_"]` + " -as " + `radParameters["_as_"]` + \
" -ar " + `radParameters["_ar_"]` + " -aa " + '%.3f'%radParameters["_aa_"] + \
" -lr " + `radParameters["_lr_"]` + " -lw " + '%.3f'%radParameters["_lw_"] + " -av 0 0 0 " + \
" " + octFile + " > " + unfFile + "\n"
line2 = "pfilt -1 -r .6 -x/2 -y/2 " + unfFile + " > " + outputFile + "\n"
return line0 + line1 + line2
def falsecolorLine(self, projectName, viewName):
line = "c:\python27\python c:\honeybee\\falsecolor2.py -i " + projectName + "_RAD_" + viewName + "_RadStudy.pic -s auto -n 10 -mask 0.1 -l kWhm-2 -z > " + projectName + "_" + viewName + "_FalseColored.pic\n" + \
"ra_tiff " + projectName + "_" + viewName + "_FalseColored.pic " + projectName + "_" + viewName + "_FalseColored.tif\n" + \
"ra_gif " + projectName + "_" + viewName + "_FalseColored.pic " + projectName + "_" + viewName + "_FalseColored.gif\n"
return line
def rtraceLine(self, projectName, octFileName, radParameters, simulationType = 0, cpuCount = 0):
ptsFile = projectName + "_" + str(cpuCount) + ".pts"
outputFile = projectName + "_" + str(cpuCount) + ".res"
if simulationType == 0:
line0 = "rtrace -I "
elif simulationType == 2:
line0 = "rtrace "
else:
# print "Fix this for radiation analysis"
line0 = "rtrace -I "
line1 = " -h -ms 0.063 -dp " + str(radParameters["_dp_"]) + \
" -ds " + str(radParameters["_ds_"]) + " -dt " + str(radParameters["_dt_"]) + \
" -dc " + str(radParameters["_dc_"]) + " -dr " + str(radParameters["_dr_"]) + \
" -st " + str(radParameters["_st_"]) + " -lr " + str(radParameters["_lr_"]) + \
" -lw " + str(radParameters["_lw_"]) + " -ab " + str(radParameters["_ab_"]) + \
" -ad " + str(radParameters["_ad_"]) + " -as " + str(radParameters["_as_"]) + \
" -ar " + str(radParameters["_ar_"]) + " -aa " + str(radParameters["_aa_"]) + \
" " + octFileName + ".oct < " + ptsFile + \
" > " + outputFile + "\n"
return line0 + line1
def testPtsStr(self, testPoint, ptsNormal):
return '%.4f'%testPoint.X + '\t' + \
'%.4f'%testPoint.Y + '\t' + \
'%.4f'%testPoint.Z + '\t' + \
'%.4f'%ptsNormal.X + '\t' + \
'%.4f'%ptsNormal.Y + '\t' + \
'%.4f'%ptsNormal.Z + '\n'
def readRadiationResult(self, resultFile):
result = []
resultFile = open(resultFile,"r")
for line in resultFile: result.append(float(line.split(' ')[0])*179)
return result
def readDLResult(self, resultFile):
result = []
resultFile = open(resultFile,"r")
for line in resultFile:
R, G, B = line.split(' ')[0:3]
result.append( 179 * (.265 * float(R) + .67 * float(G) + .065 * float(B)))
return result
def isSrfInterior(self, HBSrf):
# This can be tricky since some of interior walls may or may not be air walls
if HBSrf.type == 0 and HBSrf.BC.lower() == "surface":
return True
else:
return False
class WriteDS(object):
def isSensor(self, testPt, sensors):
for pt in sensors:
if pt.DistanceTo(testPt) < sc.doc.ModelAbsoluteTolerance:
# this is a senor point
return True
# not a sensor
return False
def DSHeadingStr(self, projectName, subWorkingDir, tempFolder, hb_DSPath, cpuCount = 0):
return '#######################################\n' + \
'#DAYSIM HEADING - GENERATED BY HONEYBEE\n' + \
'#######################################\n' + \
'project_name ' + projectName + '_' + `cpuCount` + '\n' + \
'project_directory ' + subWorkingDir + '\\\n' + \
'bin_directory ' + hb_DSPath + '\\bin\\\n' + \
'tmp_directory ' + tempFolder + '\\\n' + \
'Template_File ' + hb_DSPath + '\\template\\DefaultTemplate.htm\n'
def DSLocationStr(self, hb_writeRADAUX, lb_preparation, epwFileAddress):
# location information
locName, lat, long, timeZone, elev = hb_writeRADAUX.RADLocation(epwFileAddress)
locName = locName.replace("/", "_")
return'\n\n#################################\n' + \
'# LOCATION INFORMATION \n' + \
'#################################\n' + \
'place ' + lb_preparation.removeBlankLight(locName) + '\n' + \
'latitude ' + lat + '\n' + \
'longitude ' + `-float(long)` + '\n' + \
'time_zone ' + `-15 * float(timeZone)` + '\n' + \
'site_elevation ' + elev + '\n' + \
'time_step ' + '60\n' + \
'wea_data_short_file ' + lb_preparation.removeBlankLight(locName) + '.wea\n' + \
'wea_data_short_file_units ' + '1\n' + \
'lower_direct_threshold ' + '2\n' + \
'lower_diffuse_threshold ' + '2\n', locName
def DSAnalysisUnits(self, outputUnits, pointsCount):
# I notice that setting output_units to 1 return all 0 results and not the radiation values
# however assigning type 2 for each point using sensor_file_unit works! I think this is a bug
# in Daysim that I should report to the email list next week when I come back from Chicago.
outputUnits = outputUnits[0]
if outputUnits == 2:
return 'output_units ' + `outputUnits` + '\n'
elif outputUnits == 1:
outputStr = "sensor_file_unit"
for pt in range(pointsCount): outputStr += " 2"
return outputStr +"\n"
# building information
def DSBldgStr(self, projectName, materialFileName, radFileFullName, adaptiveZone, dgp_image_x = 500, dgp_image_y = 500, cpuCount = 0, northAngle = 0):
return'\n\n#################################\n' + \
'# BUILDING INFORMATION \n' + \
'#################################\n' + \
'material_file Daysim_material_' + projectName + '.rad\n' + \
'geometry_file Daysim_'+ projectName + '.rad\n' + \
'radiance_source_files 2, ' + materialFileName + ', ' + radFileFullName + '\n' + \
'sensor_file ' + projectName + '_' + `cpuCount` + '.pts\n' + \
'viewpoint_file ' + projectName + '_' + 'annualGlareView.vf\n' + \
'AdaptiveZoneApplies ' + `adaptiveZone` + '\n' + \
'dgp_image_x_size ' + `dgp_image_x` + '\n' + \
'dgp_image_y_size ' + `dgp_image_y` + '\n' + \
'scene_rotation_angle ' + `northAngle` + '\n'
# radiance parameters
def DSRADStr(self, radParameters):
return '\n\n#################################\n' + \
'# RADIANCE PARAMETERS \n' + \
'#################################\n' + \
'ab ' + `radParameters["_ab_"]` + '\n' + \
'ad ' + `radParameters["_ad_"]` + '\n' + \
'as ' + `radParameters["_as_"]` + '\n' + \
'ar ' + `radParameters["_ar_"]` + '\n' + \
'aa ' + `radParameters["_aa_"]` + '\n' + \
'lr 6\n' + \
'st 0.1500\n' + \
'sj 1.0000\n' + \
'lw 0.0040000\n' + \
'dj 0.0000\n' + \
'ds 0.200\n' + \
'dr 2\n' + \
'dp 512\n'
def DSDynamicSimStr(self, shadingRecipes, projectName, subWorkingDir, testPts, cpuCount = 0):
dynOptStr = '\n==========================\n' + \
'= shading control system\n' + \
'==========================\n'
numOfDynamicShadings = 0
# find number of dynamic shadings
for shadingRecipe in shadingRecipes:
if shadingRecipe.type == 2:
numOfDynamicShadings += 1
dynamicShdHeading ='shading -' + str(numOfDynamicShadings) + '\n' + \
projectName + '_' + `cpuCount` + '.dc ' + projectName + '_' + `cpuCount` + '.ill\n'
dynamicCounter = 0
for recipeCount, shadingRecipe in enumerate(shadingRecipes):
name = shadingRecipe.name
type = shadingRecipe.type
if type == 1:
# no dynamic blind
sensorPts = []
dynamicShd ='shading ' + str(type) + ' ' + name + ' ' + projectName + '_' + `cpuCount` + '.dc ' + projectName + '_' + `cpuCount` + '.ill\n' + \
'\n'
elif type == 0:
# conceptual dynamic shading
sensors = shadingRecipe.sensorPts
dynamicShd ='shading ' + str(type) + '\n' + \
name + '_' + str(recipeCount+1) + ' ' + projectName + '_' + `cpuCount` + '.dc ' + projectName + '_' + `cpuCount` + '_up.ill\n' + \
projectName + '_' + `cpuCount` + '_down.ill\n\n'
elif type == 2:
dynamicCounter += 1
dynamicShd = ""
# advanced dynamic shading
glareControlRecipe = shadingRecipe.glareControlR
shadingStates = shadingRecipe.shadingStates
controlSystem = shadingRecipe.controlSystem
# sensors = shadingRecipe.sensorPts #sensors are removed from this part and will be added later for the analysis
coolingPeriod = shadingRecipe.coolingPeriod
# add the heading for the first dynamic shading group
if dynamicCounter == 1: dynamicShd = dynamicShdHeading
groupName = name
if controlSystem == "ManualControl":
dynamicShd += groupName + '\n' + \
str(len(shadingStates)-1) + '\n' + \
"ManualControl " + subWorkingDir + "\\" + groupName + "_state_1.rad\n"
for stateCount in range(1, len(shadingStates)):
dynamicShd += subWorkingDir + "\\" + groupName + "_state_" + str(stateCount + 1) + ".rad " + \
groupName + "_state_" + str(stateCount + 1) + '_' + `cpuCount` + ".dc " + \
groupName + "_state_" + str(stateCount + 1) + '_' + `cpuCount` + ".ill\n"
elif controlSystem == "AutomatedThermalControl":
if glareControlRecipe!=None:
controlSystem = "AutomatedGlareControl"
exteriorSensor = glareControlRecipe.exteriorSensor
threshold = glareControlRecipe.threshold
minAz = glareControlRecipe.minAz
maxAz = glareControlRecipe.maxAz
minAlt = glareControlRecipe.minAltitude
maxAlt = glareControlRecipe.maxAltitude
if len(coolingPeriod)!=0:
stMonth, stDay, hour = coolingPeriod[0]
endMonth, endDay, hour = coolingPeriod[1]
controlSystem += "WithOccupancy"
if controlSystem == "AutomatedThermalControl":
dynamicShd += groupName + '\n' + \
str(len(shadingStates)-1) + '\n' + \
"AutomatedThermalControl " + subWorkingDir + "\\" + groupName + "_state_1.rad\n"
for stateCount, shadingState in enumerate(shadingStates):
try:
dynamicShd += `int(shadingState.minIlluminance)` + " " + `int(shadingState.maxIlluminance)` + " " + \
subWorkingDir + "\\" + groupName + "_state_" + str(stateCount + 1) + ".rad " + \
groupName + "_state_" + str(stateCount + 1) + '_' + `cpuCount` + ".dc " + \
groupName + "_state_" + str(stateCount + 1) + '_' + `cpuCount` + ".ill\n"
except:
# empty shading states
pass
elif controlSystem == "AutomatedThermalControlWithOccupancy":
dynamicShd += groupName + '\n' + \
str(len(shadingStates)-1) + '\n' + \
"AutomatedThermalControlWithOccupancy " + \
`stMonth` + " " + `stDay` + " " + `endMonth` + " " + `endDay` + " " + \
subWorkingDir + "\\" + groupName + "_state_1.rad\n"
for stateCount, shadingState in enumerate(shadingStates):
try:
dynamicShd += `int(shadingState.minIlluminance)` + " " + `int(shadingState.maxIlluminance)` + " " + \
subWorkingDir + "\\" + groupName + "_state_" + str(stateCount + 1) + ".rad " + \
groupName + "_state_" + str(stateCount + 1) + '_' + `cpuCount` + ".dc " + \
groupName + "_state_" + str(stateCount + 1) + '_' + `cpuCount` + ".ill\n"
except:
pass
elif controlSystem == "AutomatedGlareControl":
dynamicShd += groupName + '\n' + \
str(len(shadingStates)-1) + '\n' + \
"AutomatedGlareControl \n" + \
`int(threshold)` + " " + `int(minAz)` + " " + `int(maxAz)` + " " + \
`int(minAlt)` + " " + `int(maxAlt)` + " " + subWorkingDir + "\\" + groupName + "_state_1.rad\n"
for stateCount, shadingState in enumerate(shadingStates):
try:
dynamicShd += `int(shadingState.minIlluminance)` + " " + `int(shadingState.maxIlluminance)` + " " + \
subWorkingDir + "\\" + groupName + "_state_" + str(stateCount + 1) + ".rad " + \
groupName + "_state_" + str(stateCount + 1) + '_' + `cpuCount` + ".dc " + \
groupName + "_state_" + str(stateCount + 1) + '_' + `cpuCount` + ".ill\n"
except:
pass
elif controlSystem == "AutomatedGlareControlWithOccupancy":
dynamicShd += groupName + '\n' + \
str(len(shadingStates)-1) + '\n' + \
"AutomatedGlareControlWithOccupancy \n" + \
`int(threshold)` + " " + `int(minAz)` + " " + `int(maxAz)` + " " + \
`int(minAlt)` + " " + `int(maxAlt)` + "\n" + \
`stMonth` + " " + `stDay` + " " + `endMonth` + " " + `endDay` + " " + \
subWorkingDir + "\\" + groupName + "_state_1.rad\n"
for stateCount, shadingState in enumerate(shadingStates):
try:
dynamicShd += `int(shadingState.minIlluminance)` + " " + `int(shadingState.maxIlluminance)` + " " + \
subWorkingDir + "\\" + groupName + "_state_" + str(stateCount + 1) + ".rad " + \
groupName + "_state_" + str(stateCount + 1) + '_' + `cpuCount` + ".dc " + \
groupName + "_state_" + str(stateCount + 1) + '_' + `cpuCount` + ".ill\n"
except:
pass
dynOptStr += dynamicShd
# I removed the sensor point from here as it wasn't really nessecay to
# apply it here and it was also
#sensorInfoStr = 'sensor_file_info'
#if type == 0 or type == 2:
# for pt in testPts:
# if self.isSensor(pt, sensors):
# sensorInfoStr += ' BG' + str(recipeCount+1)
# # if BG1_Ext
# # add external sensor_ This should happen inside the loop for each group
# # as the point maybe part of multiple shading groups
# else:
# sensorInfoStr += ' 0'
#
#else:
# for pt in testPts: sensorInfoStr += ' 0'
#
#dynOptStr += sensorInfoStr
#'==========================\n' + \
#'= electric lighting system\n' + \
#'==========================\n' + \
#'electric_lighting_system 2\n' + \
#' 4 manual_dimming 100 1 0.0 20 300\n' + \
#' 1 manual_control 200 1\n' + \
#'\n' + \
#'sensor_file_info '
#for pt in range(lenOfPts[cpuCount]): dynOptStr = dynOptStr + '0 '
return dynOptStr + '\n'
def resultStr(self, projectName, cpuCount):
return '\n\n######################\n' + \
'# daylighting results \n' + \
'######################\n' + \
'daylight_autonomy_active_RGB ' + projectName + '_' + `cpuCount` + '_autonomy.DA\n' + \
'electric_lighting ' + projectName + '_' + `cpuCount` + '_electriclighting.htm\n' + \
'direct_sunlight_file ' + projectName + '_' + `cpuCount` + '.dir\n' + \
'thermal_simulation ' + projectName + '_' + `cpuCount` + '_intgain.csv\n'
class hb_EnergySimulatioParameters(object):
def readEPParams(self, EPParameters):
if EPParameters == [] or len(EPParameters)!=11:
timestep = 6
shadowPar = ["AverageOverDaysInFrequency", 30, 3000]
solarDistribution = "FullInteriorAndExteriorWithReflections"
simulationControl = [True, True, True, False, True]
ddyFile = None
else:
timestep = int(EPParameters[0])
shadowPar = EPParameters[1:4]
solarDistribution = EPParameters[4]
simulationControl = EPParameters[5:10]
ddyFile = EPParameters[10]
return timestep, shadowPar, solarDistribution, simulationControl, ddyFile
class EPMaterialAux(object):
def __init__(self):
self.energyModelingStandards = {"0" : "ASHRAE 90.1",
"1" : "ASHRAE 189.1",
"2" : "CBECS 1980-2004",
"3" : "CBECS Before-1980",
"ASHRAE901" : "ASHRAE 90.1",
"ASHRAE1891" : "ASHRAE 189.1",
"CBECS19802004" : "CBECS 1980-2004",
"CBECSBEFORE1980" : "CBECS Before-1980"}
def calcEPMaterialUValue(self, materialObj, GHComponent = None):
materialType = materialObj[0]
if materialType.lower() == "windowmaterial:simpleglazingsystem":
UValueSI = float(materialObj[1][0])
elif materialType.lower() == "windowmaterial:glazing":
thickness = float(materialObj[3][0])
conductivity = float(materialObj[13][0])
UValueSI = conductivity/thickness
elif materialType.lower() == "material:nomass":
# Material:NoMass is defined by R-Value and not U-Value
UValueSI = 1 / float(materialObj[2][0])
elif materialType.lower() == "material":
thickness = float(materialObj[2][0])
conductivity = float(materialObj[3][0])
UValueSI = conductivity/thickness
elif materialType.lower() == "material:airgap":
UValueSI = 1 / float(materialObj[1][0])
#print materialObj
#print UValueSI
elif materialType.lower() == "material:airgap":
UValueSI = 1 / float(materialObj[1][0])
elif materialType.lower() == "windowmaterial:gas":
thickness = float(materialObj[2][0])
if materialObj[1][0].lower() == "air":
# conductivity = 0.1603675
# considering ('0.18' for 'Thermal Resistance {m2-K/W}')
UValueSI = 5.55555555556
else:
warningMsg = "Honeybee can't calculate the UValue for " + materialObj[1][0] + ".\n" + \
"Let us know if you think it is really neccesary and we will add it to the list. :)"
if GHComponent!=None:
w = gh.GH_RuntimeMessageLevel.Warning
GHComponent.AddRuntimeMessage(w, warningMsg)
print materialObj
else:
warningMsg = "Honeybee currently doesn't support U-Value calculation for " + materialType + ".\n" +\
"Let us know if you think it is really neccesary and we will add it to the list. :)"
if GHComponent!=None:
w = gh.GH_RuntimeMessageLevel.Warning
GHComponent.AddRuntimeMessage(w, warningMsg)
# http://bigladdersoftware.com/epx/docs/8-0/input-output-reference/page-010.html
UValueSI = -1
return UValueSI
def calcEPConstructionUValue(self, constructionObj, GHComponent=None):
# find material layers
uValues = []
for layer in constructionObj.keys()[1:]:
materialName, comment = constructionObj[layer]
try: values, comments, UValueSI, UValueIP = self.decomposeMaterial(materialName, GHComponent)
except: UValueSI = -1
uValues.append(UValueSI)
# calculate cumulative UValue
totalRValue = 0
for uValue in uValues:
totalRValue += 1/uValue
return 1/totalRValue
def convertUValueToIP(self, UValueSI):
return 0.176110 * UValueSI
def convertUValueToSI(self, UValueIP):
return 5.678263 * UValueIP
def decomposeMaterial(self, matName, GHComponent = None):
try:
try:
materialObj = sc.sticky["honeybee_materialLib"][matName.upper()]
except:
materialObj = sc.sticky["honeybee_windowMaterialLib"][matName.upper()]
comments = []
values = []
#print matName
for layer in materialObj.keys():
try:
value, comment = materialObj[layer]
# print value + ',\t!-' + comment + "\n"
values.append(value)
comments.append(comment)
except:
value = materialObj[layer]
values.append(value)
comments.append('Material Type')
UValueSI = self.calcEPMaterialUValue(materialObj, GHComponent)
UValueIP = self.convertUValueToIP(UValueSI)
return values, comments, UValueSI, UValueIP
except Exception, e:
print `e`
print "Failed to find " + matName + " in the Honeybee material library."
return -1
def decomposeEPCnstr(self, cnstrName, GHComponent = None):
try:
constructionObj = sc.sticky ["honeybee_constructionLib"][cnstrName.upper()]
comments = []
materials = []
# print cnstrName
for layer in constructionObj.keys():
try:
material, comment = constructionObj[layer]
materials.append(material)
comments.append(comment)
except:
material = constructionObj[layer]
materials.append(material)
comments.append("!- Material Type")
# place holder
UValue_SI = self.calcEPConstructionUValue(constructionObj, GHComponent)
UValue_IP = self.convertUValueToIP(UValue_SI)
return materials[1:], comments[1:], UValue_SI, UValue_IP
except Exception, e:
print `e`
print "Failed to find " + cnstrName + " in the Honeybee construction library."
return -1
def searchListByKeyword(self, inputList, keywords):
""" search inside a list of strings for keywords """
def checkMultipleKeywords(name, keywordlist):
for kw in keywordlist:
if name.find(kw)== -1:
return False
return True
kWords = []
for kw in keywords:
kWords.append(kw.strip().upper().split(" "))
selectedItems = []
alternateOptions = []
for item in inputList:
if len(kWords)!= 0 and not "*" in keywords:
for keyword in kWords:
if len(keyword) > 1 and checkMultipleKeywords(item.ToUpper(), keyword):
selectedItems.append(item)
elif len(keyword) == 1 and item.ToUpper().find(keyword[0])!= -1:
selectedItems.append(item)
else:
selectedItems.append(item)
return selectedItems
def filterMaterials(self, constrList, standard, climateZone, surfaceType, bldgProgram, constructionType, sourceComponent):
hb_EPTypes = EPTypes()
w = gh.GH_RuntimeMessageLevel.Warning
try:
standard = str(standard).upper().Replace(" ", "").Replace("-", "").Replace(".", "")
standard = self.energyModelingStandards[standard]
except:
msg = "The input for standard is not valid. Standard is set to ASHRAE 90.1"
sourceComponent.AddRuntimeMessage(w, msg)
standard = "ASHRAE 90.1"
selConstr =[]
for cnstrName in constrList:
if cnstrName.upper().find(standard.upper())!=-1 and cnstrName.upper().find(surfaceType.upper())!=-1:
# check for climate zone
if climateZone!="":
clmZones = []
# split by space " "
possibleAlt, zoneCode = cnstrName.split(" ")[-2:]
clmZoneList = zoneCode.split("-")
if len(clmZoneList) != 1:
try:
clmZoneRange = range(int(clmZoneList[0]), int(clmZoneList[1]) + 1)
for clmZone in clmZoneRange: clmZones.append(str(clmZone))
except:
clmZones = [clmZoneList[0], clmZoneList[1]]
else:
clmZones = clmZoneList
if climateZone in clmZones:
selConstr.append(cnstrName)
elif climateZone[0] in clmZones:
# cases like 3a that is included in 3
selConstr.append(cnstrName)
else:
selConstr.append(cnstrName)
# check if any alternate
alternateFit = []
if bldgProgram!=None and bldgProgram!="":
bldgProgram = hb_EPTypes.bldgTypes[int(bldgProgram)]
# print bldgProgram
for cnstrName in selConstr:
possibleAlt = cnstrName.split(" ")[-2].split("-")
if possibleAlt[0].upper().find("ALT")!= -1:
if bldgProgram.upper().find(possibleAlt[1].upper())!= -1:
# if there is an alternate fit the rest should be removed
gistName = " ".join(cnstrName.split(" ")[:-2])
gistName.replace
alternateFit.append(gistName)
else:
selConstr.remove(cnstrName)
# check if there is a best fit and if not just return the list
if alternateFit!=[]:
for cnstrName in selConstr:
for gistName in alternateFit:
if cnstrName.upper().find(gistName.upper())!= -1 and cnstrName.split(" ")[-2].split("-")[0].upper() != "ALT":
try: selConstr.remove(cnstrName)
except: pass
# if there are multiple options they should be for different construction types
# so let check that
if len(selConstr) > 1 and constructionType != "":
tempSelConstr = []
for cnstrName in selConstr:
if cnstrName.upper().find(constructionType.upper())!= -1:
tempSelConstr.append(cnstrName)
if len(tempSelConstr)!=0:
selConstr = tempSelConstr
return selConstr
def isEPMaterialObjectAlreadyExists(self, name):
"""
Check if material or construction exist
"""
if name in sc.sticky ["honeybee_constructionLib"].keys(): return True
if name in sc.sticky ["honeybee_materialLib"].keys(): return True
if name in sc.sticky ["honeybee_windowMaterialLib"].keys(): return True
return False
def getEPObjectsStr(self, objectName):
"""
This function should work for materials, and counstructions
"""
objectData = None
if objectName in sc.sticky ["honeybee_windowMaterialLib"].keys():
objectData = sc.sticky ["honeybee_windowMaterialLib"][objectName]
elif objectName in sc.sticky ["honeybee_materialLib"].keys():
objectData = sc.sticky ["honeybee_materialLib"][objectName]
elif objectName in sc.sticky ["honeybee_constructionLib"].keys():
objectData = sc.sticky ["honeybee_constructionLib"][objectName]
if objectData!=None:
numberOfLayers = len(objectData.keys())
# add material/construction type
# print objectData
objectStr = objectData[0] + ",\n"
# add the name
objectStr = objectStr + " " + objectName + ", !- name\n"
for layer in range(1, numberOfLayers):
if layer < numberOfLayers-1:
objectStr = objectStr + " " + str(objectData[layer][0]) + ", !- " + objectData[layer][1] + "\n"
else:
objectStr = objectStr + " " + str(objectData[layer][0]) + "; !- " + objectData[layer][1] + "\n\n"
return objectStr
def getObjectKey(self, EPObject):
EPKeys = ["Material", "WindowMaterial", "Construction"]
# check if it is a full string
for key in EPKeys:
if EPObject.strip().startswith(key):
return key
def addEPConstructionToLib(self, EPMaterial, overwrite = False):
key = self.getObjectKey(EPMaterial)
if key == None:
return None, None
HBLibrarieNames = {
"Construction" : "honeybee_constructionLib",
"Material" : "honeybee_materialLib",
"WindowMaterial" : "honeybee_windowMaterialLib"
}
# find construction/material name
name = EPMaterial.split("\n")[1].split("!")[0].strip()[:-1].upper()
if name in sc.sticky[HBLibrarieNames[key]].keys():
#overwrite = True
if not overwrite:
# ask user if they want to overwrite it
add = self.duplicateEPMaterialWarning(name, EPMaterial)
if not add: return False, name
# add material/construction to the lib
# create an empty dictoinary for the material
sc.sticky[HBLibrarieNames[key]][name] = {}
lines = EPMaterial.split("\n")
# store the data into the dictionary
for lineCount, line in enumerate(lines):
objValue = line.split("!")[0].strip()
try: objDescription = line.split("!")[1].strip()
except: objDescription = ""
if lineCount == 0:
sc.sticky[HBLibrarieNames[key]][name][lineCount] = objValue[:-1]
elif lineCount == 1:
pass # name is already there as the key
elif objValue.endswith(","):
sc.sticky[HBLibrarieNames[key]][name][lineCount-1] = objValue[:-1], objDescription
elif objValue.endswith(";"):
sc.sticky[HBLibrarieNames[key]][name][lineCount-1] = objValue[:-1], objDescription
break
# add name to list
# sc.sticky [HBLibrarieNames[key]]["List"].append(name)
return True, name
def duplicateEPMaterialWarning(self, objectName, newMaterialString):
# this function is duplicate with duplicateEPObject warning and should be removed at some point
returnYN = {'YES': True, 'NO': False}
buttons = System.Windows.Forms.MessageBoxButtons.YesNo
icon = System.Windows.Forms.MessageBoxIcon.Warning
currentMaterialString = self.getEPObjectsStr(objectName)
msg = objectName + " already exists in the library:\n\n" + \
currentMaterialString + "\n" + \
"Do you want to overwrite the current with this new definition?\n\n" + \
newMaterialString + "\n\n" + \
"Tip: If you are not sure what to do select No and change the name."
up = rc.UI.Dialogs.ShowMessageBox(msg, "Duplicate Material Name", buttons, icon)
return returnYN[up.ToString().ToUpper()]
class EPScheduleAux(object):
def getScheduleDataByName(self, schName, component = None):
if schName.lower().endswith(".csv"):
# Check for the file
if not os.path.isfile(schName):
msg = "Failed to find the schedule file: " + schName
print msg
if component is not None:
component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
return None, None
return schName, "csv"
try:
scheduleObj = sc.sticky["honeybee_ScheduleLib"][schName.upper()]
except Exception, e:
#print e
msg = "Failed to find " + schName + " in the Honeybee schedule library."
print msg
if component is not None:
component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
return None, None
comments = []
values = []
for layer in scheduleObj.keys():
try:
material, comment = scheduleObj[layer]
values.append(material)
comments.append(comment)
except:
scheduleType = scheduleObj[layer]
values.append(scheduleType)
comments.append("Schedule Type")
return values, comments
def getScheduleTypeLimitsDataByName(self, schName, component = None):
try:
scheduleObj = sc.sticky["honeybee_ScheduleTypeLimitsLib"][schName.upper()]
except Exception, e:
#print e
msg = "Failed to find " + schName + " in the Honeybee schedule type limits library."
print msg
if component is not None:
component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
return None, None
comments = []
values = []
for layer in scheduleObj.keys():
try:
material, comment = scheduleObj[layer]
values.append(material)
comments.append(comment)
except:
scheduleType = scheduleObj[layer]
values.append(scheduleType)
comments.append("Schedule Type")
return values, comments
class EPObjectsAux(object):
def isEPMaterial(self, matName):
return matName.upper() in sc.sticky["honeybee_materialLib"].keys() or \
matName.upper() in sc.sticky["honeybee_windowMaterialLib"].keys()
def isEPConstruction(self, matName):
return matName.upper() in sc.sticky["honeybee_constructionLib"].keys()
def isSchedule(self, scheduleName):
return scheduleName.upper() in sc.sticky["honeybee_ScheduleLib"].keys()
def isScheduleTypeLimits(self, scheduleName):
return scheduleName.upper() in sc.sticky["honeybee_ScheduleTypeLimitsLib"].keys()
def customizeEPObject(self, EPObjectName, indexes, inValues):
hb_EPScheduleAUX = EPScheduleAux()
hb_EPMaterialAUX = EPMaterialAux()
if self.isSchedule(EPObjectName):
values, comments = hb_EPScheduleAUX.getScheduleDataByName(EPObjectName.upper())
elif self.isScheduleTypeLimits(EPObjectName):
values, comments = hb_EPScheduleAUX.getScheduleTypeLimitsDataByName(EPObjectName.upper())
elif self.isEPConstruction(EPObjectName):
values, comments, uSI, uIP = hb_EPMaterialAUX.decomposeEPCnstr(EPObjectName.upper())
elif self.isEPMaterial(EPObjectName):
values, comments, uSI, uIP = hb_EPMaterialAUX.decomposeMaterial(EPObjectName.upper())
else:
return
# create a dictionary of index and values
if len(indexes)==0 or (len(indexes) != len(inValues)):
return
valuesDict = {}
for i, v in zip(indexes, inValues):
valuesDict[i] = v
count = 0
originalObj = ""
modifiedObj = ""
for value, comment in zip(values, comments):
if count == len(values):
separator = ";"
else:
separator = ","
if count == 1:
# add name
originalObj += "[" + `count` + "]\t" + EPObjectName.upper() + " ! Name\n"
if count in valuesDict.keys():
# update the value
modifiedObj += valuesDict[count].upper() + separator + " ! Name\n"
else:
# keep original
modifiedObj += EPObjectName.upper() + separator + " ! Name\n"
count = 2
originalObj += "[" + `count` + "]\t " + value + " !" + comment + "\n"
if count in valuesDict.keys():
modifiedObj += valuesDict[count] + separator + " !" + comment + "\n"
else:
modifiedObj += value + separator + " !" + comment + "\n"
count += 1
return originalObj, modifiedObj
def getObjectKey(self, EPObject):
EPKeys = ["Material", "WindowMaterial", "Construction", "ScheduleTypeLimits", "Schedule"]
# check if it is a full string
for key in EPKeys:
if EPObject.strip().startswith(key):
return key
def addEPObjectToLib(self, EPObject, overwrite = False):
key = self.getObjectKey(EPObject)
if key == None:
return None, None
HBLibrarieNames = {
"Construction" : "honeybee_constructionLib",
"Material" : "honeybee_materialLib",
"WindowMaterial" : "honeybee_windowMaterialLib",
"Schedule": "honeybee_ScheduleLib",
"ScheduleTypeLimits" : "honeybee_ScheduleTypeLimitsLib"
}
# find construction/material name
name = EPObject.split("\n")[1].split("!")[0].strip()[:-1].upper()
if name in sc.sticky[HBLibrarieNames[key]].keys():
#overwrite = True
if not overwrite:
# ask user if they want to overwrite it
add = self.duplicateEPObjectWarning(name, EPObject)
if not add: return False, name
# add material/construction to the lib
# create an empty dictoinary for the material
sc.sticky[HBLibrarieNames[key]][name] = {}
lines = EPObject.split("\n")
# store the data into the dictionary
for lineCount, line in enumerate(lines):
objValue = line.split("!")[0].strip()
try: objDescription = line.split("!")[1].strip()
except: objDescription = ""
if lineCount == 0:
sc.sticky[HBLibrarieNames[key]][name][lineCount] = objValue[:-1]
elif lineCount == 1:
pass # name is already there as the key
elif objValue.endswith(","):
sc.sticky[HBLibrarieNames[key]][name][lineCount-1] = objValue[:-1], objDescription
elif objValue.endswith(";"):
sc.sticky[HBLibrarieNames[key]][name][lineCount-1] = objValue[:-1], objDescription
break
# add name to list
#sc.sticky [HBLibrarieNames[key]]["List"].append(name)
return True, name
def getEPObjectDataByName(self, objectName):
objectData = None
objectName = objectName.upper()
if objectName in sc.sticky ["honeybee_windowMaterialLib"].keys():
objectData = sc.sticky ["honeybee_windowMaterialLib"][objectName]
elif objectName in sc.sticky ["honeybee_materialLib"].keys():
objectData = sc.sticky ["honeybee_materialLib"][objectName]
elif objectName in sc.sticky ["honeybee_constructionLib"].keys():
objectData = sc.sticky ["honeybee_constructionLib"][objectName]
elif objectName in sc.sticky["honeybee_ScheduleLib"].keys():
objectData = sc.sticky ["honeybee_ScheduleLib"][objectName]
elif objectName in sc.sticky["honeybee_ScheduleTypeLimitsLib"].keys():
objectData = sc.sticky ["honeybee_ScheduleTypeLimitsLib"][objectName]
return objectData
def getEPObjectsStr(self, objectName):
"""
This function should work for materials, and counstructions
"""
objectData = self.getEPObjectDataByName(objectName)
if objectData!=None:
numberOfLayers = len(objectData.keys())
# add material/construction type
# print objectData
objectStr = objectData[0] + ",\n"
# add the name
objectStr = objectStr + " " + objectName + ", !- name\n"
for layer in range(1, numberOfLayers):
if layer < numberOfLayers-1:
objectStr = objectStr + " " + str(objectData[layer][0]) + ", !- " + objectData[layer][1] + "\n"
else:
objectStr = objectStr + " " + str(objectData[layer][0]) + "; !- " + objectData[layer][1] + "\n\n"
return objectStr
def duplicateEPObjectWarning(self, objectName, newMaterialString):
returnYN = {'YES': True, 'NO': False}
buttons = System.Windows.Forms.MessageBoxButtons.YesNo
icon = System.Windows.Forms.MessageBoxIcon.Warning
currentMaterialString = self.getEPObjectsStr(objectName)
msg = objectName + " already exists in the library:\n\n" + \
currentMaterialString + "\n" + \
"Do you want to overwrite the current with this new definition?\n\n" + \
newMaterialString + "\n\n" + \
"Tip: If you are not sure what to do select No and change the name."
up = rc.UI.Dialogs.ShowMessageBox(msg, "Duplicate Material Name", buttons, icon)
return returnYN[up.ToString().ToUpper()]
class EPTypes(object):
def __init__(self):
self.srfType = {0:'WALL',
0.5: 'UndergroundWall',
1:'ROOF',
1.5: 'UndergroundCeiling',
2:'FLOOR',
2.25: 'UndergroundSlab',
2.5: 'SlabOnGrade',
2.75: 'ExposedFloor',
3:'CEILING',
4:'WALL',
5:'WINDOW',
6:'SHADING',
'WALL': 'WALL',
'ROOF':'ROOF',
'FLOOR': 'FLOOR',
'CEILING': 'CEILING',
'WINDOW':'WINDOW',
'SHADING': 'SHADING'}
self.bldgTypes = {0:'OFFICE',
'OFFICE':'OFFC',
1:'RETAIL',
'RETAIL':'RETAIL',
2:'APT',
'MIDRISEAPARTMENT':'APT',
3:'PRIMSCH',
'PRIMARYSCHOOL':'PRIMSCH',
4:'SECSCH',
'SECONDARYSCHOOL':'SECSCH',
5:'SMLHOTL',
'SMALLHOTEL':'SMLHOTL',
6:'LRGHTL',
'LARGEHOTEL':'LRGHTL',
7:'HOSP',
'HOSPITAL':'HOSP',
8:'OUTPT',
'OUTPATIENT':'OUTPT',
9:'WARE',
'WAREHOUSE':'WARE',
10:'MARKET',
'SUPERMARKET':'MARKET',
11:'FULLREST',
'FULLSERVICERESTAURANT':'FULLREST',
12:'QUICKREST',
'QUICKSERVICERESTAURANT':'QUICKREST'
}
#Restaurant(Full Service) = "FullServiceRestaurant"
#Restaurant(Quick Service) = "QuickServiceRestaurant"
#Mid-rise Apartment = "Mid-riseApartment"
#Hospital = "Hospital"
#Small Office = "Small Office"
#Medium Office = "Medium Office"
#Large Office = "Large Office"
#Small Hotel = "SmallHotel"
#Large Hotel = "LargeHotel"
#Primary School = "PrimarySchool"
#Secondary School = "SecondarySchool"
#Strip Mall = "StripMall"
#Retail = "Retail"
#Warehouse = "Warehouse"
class materialLibrary(object):
def __init__(self):
self.zoneProgram = {0: 'RETAIL',
1: 'OFFICE',
2: 'RESIDENTIAL',
3: 'HOTEL'}
self.zoneConstructionSet = {0: 'RETAIL_CON',
1: 'OFFICE_CON',
2: 'RESIDENTIAL_CON',
3: 'HOTEL_CON'}
self.zoneInternalLoad = {0: 'RETAIL_INT_LOAD',
1: 'OFFICE_INT_LOAD',
2: 'RESIDENTIAL_INT_LOAD',
3: 'HOTEL_INT_LOAD'}
self.zoneSchedule = {0: 'RETAIL_SCH',
1: 'OFFICE_SCH',
2: 'RESIDENTIAL_SCH',
3: 'HOTEL_SCH'}
self.zoneThermostat = {0: 'RETAIL_SCH',
1: 'OFFICE_SCH',
2: 'RESIDENTIAL_SCH',
3: 'HOTEL_SCH'}
class scheduleLibrary(object):
# schedule library should be updated to functions
# so it can be used to generate schedueles
def __init__(self):
self.ScheduleTypeLimits = '\n' + \
'ScheduleTypeLimits,\n' + \
'\tFraction, !- Name\n' + \
'\t0, !- Lower Limit Value\n' + \
'\t1, !- Upper Limit Value\n' + \
'\tCONTINUOUS; !- Numeric Type\n' + \
'\n' + \
'ScheduleTypeLimits,\n' + \
'\tOn/Off, !- Name\n' + \
'\t0, !- Lower Limit Value\n' + \
'\t1, !- Upper Limit Value\n' + \
'\tDISCRETE; !- Numeric Type\n' + \
'\n' + \
'ScheduleTypeLimits,\n' + \
'\tTemperature, !- Name\n' + \
'\t-60, !- Lower Limit Value\n' + \
'\t200, !- Upper Limit Value\n' + \
'\tCONTINUOUS; !- Numeric Type\n' + \
'\n' + \
'ScheduleTypeLimits,\n' + \
'\tControl Type, !- Name\n' + \
'\t0, !- Lower Limit Value\n' + \
'\t4, !- Upper Limit Value\n' + \
'\tDISCRETE; !- Numeric Type\n' + \
'\n' + \
'ScheduleTypeLimits,\n' + \
'\tAny Number; !- Name\n'
self.largeOfficeEquipmentSchedule = '\n' + \
'Schedule:Compact,\n' + \
'\tLarge Office_BLDG_EQUIP_SCH, !- Name\n' + \
'\tFraction, !- Schedule Type Limits Name\n' + \
'\tThrough: 12/31, !- Field 1\n' + \
'\tFor: Weekdays, !- Field 2\n' + \
'\tUntil: 08:00, !- Field 3\n' + \
'\t0.40, !- Field 4\n' + \
'\tUntil: 12:00, !- Field 5\n' + \
'\t0.90, !- Field 6\n' + \
'\tUntil: 13:00, !- Field 7\n' + \
'\t0.80, !- Field 8\n' + \
'\tUntil: 17:00, !- Field 9\n' + \
'\t0.90, !- Field 10\n' + \
'\tUntil: 18:00, !- Field 11\n' + \
'\t0.80, !- Field 12\n' + \
'\tUntil: 20:00, !- Field 13\n' + \
'\t0.60, !- Field 14\n' + \
'\tUntil: 22:00, !- Field 15\n' + \
'\t0.50, !- Field 16\n' + \
'\tUntil: 24:00, !- Field 17\n' + \
'\t0.40, !- Field 18\n' + \
'\tFor: Saturday, !- Field 19\n' + \
'\tUntil: 06:00, !- Field 20\n' + \
'\t0.30, !- Field 21\n' + \
'\tUntil: 08:00, !- Field 22\n' + \
'\t0.4, !- Field 23\n' + \
'\tUntil: 14:00, !- Field 24\n' + \
'\t0.5, !- Field 25\n' + \
'\tUntil: 17:00, !- Field 26\n' + \
'\t0.35, !- Field 27\n' + \
'\tUntil: 24:00, !- Field 28\n' + \
'\t0.30, !- Field 29\n' + \
'\tFor: SummerDesignDay, !- Field 30\n' + \
'\tUntil: 24:00, !- Field 31\n' + \
'\t1.0, !- Field 32\n' + \
'\tFor: WinterDesignDay, !- Field 33\n' + \
'\tUntil: 24:00, !- Field 34\n' + \
'\t0.0, !- Field 35\n' + \
'\tFor: AllOtherDays, !- Field 36\n' + \
'\tUntil: 24:00, !- Field 37\n' + \
'\t0.30; !- Field 38\n'
self.largeOfficeElevatorsSchedule = '\n' + \
'Schedule:Compact,\n' + \
'\tLarge Office_BLDG_ELEVATORS, !- Name\n' + \
'\tFraction, !- Schedule Type Limits Name\n' + \
'\tThrough: 12/31, !- Field 1\n' + \
'\tFor: AllDays, !- Field 2\n' + \
'\tUntil: 04:00, !- Field 3\n' + \
'\t0.05, !- Field 4\n' + \
'\tUntil: 05:00, !- Field 5\n' + \
'\t0.10, !- Field 6\n' + \
'\tUntil: 06:00, !- Field 7\n' + \
'\t0.20, !- Field 8\n' + \
'\tUntil: 07:00, !- Field 9\n' + \
'\t0.40, !- Field 10\n' + \
'\tUntil: 09:00, !- Field 11\n' + \
'\t0.50, !- Field 12\n' + \
'\tUntil: 10:00, !- Field 13\n' + \
'\t0.35, !- Field 14\n' + \
'\tUntil: 16:00, !- Field 15\n' + \
'\t0.15, !- Field 16\n' + \
'\tUntil: 17:00, !- Field 17\n' + \
'\t0.35, !- Field 18\n' + \
'\tUntil: 19:00, !- Field 19\n' + \
'\t0.50, !- Field 20\n' + \
'\tUntil: 21:00, !- Field 21\n' + \
'\t0.40, !- Field 22\n' + \
'\tUntil: 22:00, !- Field 23\n' + \
'\t0.30, !- Field 24\n' + \
'\tUntil: 23:00, !- Field 25\n' + \
'\t0.20, !- Field 26\n' + \
'\tUntil: 24:00, !- Field 27\n' + \
'\t0.10; !- Field 28\n'
self.largeOfficeOccupancySchedule = '\n' + \
'Schedule:Compact,\n' + \
'\tLarge Office_BLDG_OCC_SCH, !- Name\n' + \
'\tFraction, !- Schedule Type Limits Name\n' + \
'\tThrough: 12/31, !- Field 1\n' + \
'\tFor: SummerDesignDay, !- Field 2\n' + \
'\tUntil: 06:00, !- Field 3\n' + \
'\t0.0, !- Field 4\n' + \
'\tUntil: 22:00, !- Field 5\n' + \
'\t1.0, !- Field 6\n' + \
'\tUntil: 24:00, !- Field 7\n' + \
'\t0.05, !- Field 8\n' + \
'\tFor: Weekdays, !- Field 9\n' + \
'\tUntil: 06:00, !- Field 10\n' + \
'\t0.0, !- Field 11\n' + \
'\tUntil: 07:00, !- Field 12\n' + \
'\t0.1, !- Field 13\n' + \
'\tUntil: 08:00, !- Field 14\n' + \
'\t0.2, !- Field 15\n' + \
'\tUntil: 12:00, !- Field 16\n' + \
'\t0.95, !- Field 17\n' + \
'\tUntil: 13:00, !- Field 18\n' + \
'\t0.5, !- Field 19\n' + \
'\tUntil: 17:00, !- Field 20\n' + \
'\t0.95, !- Field 21\n' + \
'\tUntil: 18:00, !- Field 22\n' + \
'\t0.7, !- Field 23\n' + \
'\tUntil: 20:00, !- Field 24\n' + \
'\t0.4, !- Field 25\n' + \
'\tUntil: 22:00, !- Field 26\n' + \
'\t0.1, !- Field 27\n' + \
'\tUntil: 24:00, !- Field 28\n' + \
'\t0.05, !- Field 29\n' + \
'\tFor: Saturday, !- Field 30\n' + \
'\tUntil: 06:00, !- Field 31\n' + \
'\t0.0, !- Field 32\n' + \
'\tUntil: 08:00, !- Field 33\n' + \
'\t0.1, !- Field 34\n' + \
'\tUntil: 14:00, !- Field 35\n' + \
'\t0.5, !- Field 36\n' + \
'\tUntil: 17:00, !- Field 37\n' + \
'\t0.1, !- Field 38\n' + \
'\tUntil: 24:00, !- Field 39\n' + \
'\t0.0, !- Field 40\n' + \
'\tFor: AllOtherDays, !- Field 41\n' + \
'\tUntil: 24:00, !- Field 42\n' + \
'\t0.0; !- Field 43\n'
self.largeOfficeWorkEffSchedule = '\n' + \
'Schedule:Compact,\n' + \
'\tLarge Office_WORK_EFF_SCH, !- Name\n' + \
'\tOn/Off, !- Schedule Type Limits Name\n' + \
'\tThrough: 12/31, !- Field 1\n' + \
'\tFor: AllDays, !- Field 2\n' + \
'\tUntil: 24:00, !- Field 3\n' + \
'\t0.0; !- Field 4\n'
self.largeOfficeInfiltrationSchedule = '\n' + \
'Schedule:Compact,\n' + \
'\tLarge Office_INFIL_QUARTER_ON_SCH, !- Name\n' + \
'\tFraction, !- Schedule Type Limits Name\n' + \
'\tThrough: 12/31, !- Field 1\n' + \
'\tFor: Weekdays SummerDesignDay, !- Field 2\n' + \
'\tUntil: 06:00, !- Field 3\n' + \
'\t1.0, !- Field 4\n' + \
'\tUntil: 22:00, !- Field 5\n' + \
'\t0.25, !- Field 6\n' + \
'\tUntil: 24:00, !- Field 7\n' + \
'\t1.0, !- Field 8\n' + \
'\tFor: Saturday WinterDesignDay, !- Field 9\n' + \
'\tUntil: 06:00, !- Field 10\n' + \
'\t1.0, !- Field 11\n' + \
'\tUntil: 18:00, !- Field 12\n' + \
'\t0.25, !- Field 13\n' + \
'\tUntil: 24:00, !- Field 14\n' + \
'\t1.0, !- Field 15\n' + \
'\tFor: Sunday Holidays AllOtherDays, !- Field 16\n' + \
'\tUntil: 24:00, !- Field 17\n' + \
'\t1.0; !- Field 18\n'
self.largeOfficeClothingSchedule = '\n' + \
'Schedule:Compact,\n' + \
'\tLarge Office_CLOTHING_SCH, !- Name\n' + \
'\tFraction, !- Schedule Type Limits Name\n' + \
'\tThrough: 04/30, !- Field 1\n' + \
'\tFor: AllDays, !- Field 2\n' + \
'\tUntil: 24:00, !- Field 3\n' + \
'\t1.0, !- Field 4\n' + \
'\tThrough: 09/30, !- Field 5\n' + \
'\tFor: AllDays, !- Field 6\n' + \
'\tUntil: 24:00, !- Field 7\n' + \
'\t0.5, !- Field 8\n' + \
'\tThrough: 12/31, !- Field 9\n' + \
'\tFor: AllDays, !- Field 10\n' + \
'\tUntil: 24:00, !- Field 11\n' + \
'\t1.0; !- Field 12\n'
self.alwaysOffSchedule = '\n' + \
'Schedule:Compact,\n' + \
'\tAlways_Off, !- Name\n' + \
'\tOn/Off, !- Schedule Type Limits Name\n' + \
'\tThrough: 12/31, !- Field 1\n' + \
'\tFor: AllDays, !- Field 2\n' + \
'\tUntil: 24:00, !- Field 3\n' + \
'\t0; !- Field 4\n'
self.largeOfficeHeatingSetPtSchedule = '\n' + \
'Schedule:Compact,\n' + \
'\tLarge Office_HTGSETP_SCH,!- Name\n' + \
'\tTemperature, !- Schedule Type Limits Name\n' + \
'\tThrough: 12/31, !- Field 1\n' + \
'\tFor: Weekdays, !- Field 2\n' + \
'\tUntil: 06:00, !- Field 3\n' + \
'\t15.6, !- Field 4\n' + \
'\tUntil: 22:00, !- Field 5\n' + \
'\t21.0, !- Field 6\n' + \
'\tUntil: 24:00, !- Field 7\n' + \
'\t15.6, !- Field 8\n' + \
'\tFor SummerDesignDay, !- Field 9\n' + \
'\tUntil: 24:00, !- Field 10\n' + \
'\t15.6, !- Field 11\n' + \
'\tFor: Saturday, !- Field 12\n' + \
'\tUntil: 06:00, !- Field 13\n' + \
'\t15.6, !- Field 14\n' + \
'\tUntil: 18:00, !- Field 15\n' + \
'\t21.0, !- Field 16\n' + \
'\tUntil: 24:00, !- Field 17\n' + \
'\t15.6, !- Field 18\n' + \
'\tFor: WinterDesignDay, !- Field 19\n' + \
'\tUntil: 24:00, !- Field 20\n' + \
'\t21.0, !- Field 21\n' + \
'\tFor: AllOtherDays, !- Field 22\n' + \
'\tUntil: 24:00, !- Field 23\n' + \
'\t15.6; !- Field 24\n'
self.largeOfficeCoolingSetPtSchedule = '\n' + \
'Schedule:Compact,\n' + \
'\tLarge Office_CLGSETP_SCH,!- Name\n' + \
'\tTemperature, !- Schedule Type Limits Name\n' + \
'\tThrough: 12/31, !- Field 1\n' + \
'\tFor: Weekdays SummerDesignDay, !- Field 2\n' + \
'\tUntil: 06:00, !- Field 3\n' + \
'\t26.7, !- Field 4\n' + \
'\tUntil: 22:00, !- Field 5\n' + \
'\t24.0, !- Field 6\n' + \
'\tUntil: 24:00, !- Field 7\n' + \
'\t26.7, !- Field 8\n' + \
'\tFor: Saturday, !- Field 9\n' + \
'\tUntil: 06:00, !- Field 10\n' + \
'\t26.7, !- Field 11\n' + \
'\tUntil: 18:00, !- Field 12\n' + \
'\t24.0, !- Field 13\n' + \
'\tUntil: 24:00, !- Field 14\n' + \
'\t26.7, !- Field 15\n' + \
'\tFor WinterDesignDay, !- Field 16\n' + \
'\tUntil: 24:00, !- Field 17\n' + \
'\t26.7, !- Field 18\n' + \
'\tFor: AllOtherDays, !- Field 19\n' + \
'\tUntil: 24:00, !- Field 20\n' + \
'\t26.7; !- Field 21\n'
self.largeOfficeActivitySchedule = '\n' + \
'Schedule:Compact,\n' + \
'\tLarge Office_ACTIVITY_SCH, !- Name\n' + \
'\tAny Number, !- Schedule Type Limits Name\n' + \
'\tThrough: 12/31, !- Field 1\n' + \
'\tFor: AllDays, !- Field 2\n' + \
'\tUntil: 24:00, !- Field 3\n' + \
'\t120; !- Field 4\n'
self.alwaysOnSchedule = '\n' + \
'Schedule:Compact,\n' + \
'\tAlways_On, !- Name\n' + \
'\tOn/Off, !- Schedule Type Limits Name\n' + \
'\tThrough: 12/31, !- Field 1\n' + \
'\tFor: AllDays, !- Field 2\n' + \
'\tUntil: 24:00, !- Field 3\n' + \
'\t1; !- Field 4\n'
self.largeOfficeLightingSchedule = '\n' + \
'Schedule:Compact,\n' + \
'\tLarge Office_BLDG_LIGHT_SCH, !- Name\n' + \
'\tFraction, !- Schedule Type Limits Name\n' + \
'\tThrough: 12/31, !- Field 1\n' + \
'\tFor: Weekdays, !- Field 2\n' + \
'\tUntil: 05:00, !- Field 3\n' + \
'\t0.05, !- Field 4\n' + \
'\tUntil: 07:00, !- Field 5\n' + \
'\t0.1, !- Field 6\n' + \
'\tUntil: 08:00, !- Field 7\n' + \
'\t0.3, !- Field 8\n' + \
'\tUntil: 17:00, !- Field 9\n' + \
'\t0.9, !- Field 10\n' + \
'\tUntil: 18:00, !- Field 11\n' + \
'\t0.7, !- Field 12\n' + \
'\tUntil: 20:00, !- Field 13\n' + \
'\t0.5, !- Field 14\n' + \
'\tUntil: 22:00, !- Field 15\n' + \
'\t0.3, !- Field 16\n' + \
'\tUntil: 23:00, !- Field 17\n' + \
'\t0.1, !- Field 18\n' + \
'\tUntil: 24:00, !- Field 19\n' + \
'\t0.05, !- Field 20\n' + \
'\tFor: Saturday, !- Field 21\n' + \
'\tUntil: 06:00, !- Field 22\n' + \
'\t0.05, !- Field 23\n' + \
'\tUntil: 08:00, !- Field 24\n' + \
'\t0.1, !- Field 25\n' + \
'\tUntil: 14:00, !- Field 26\n' + \
'\t0.5, !- Field 27\n' + \
'\tUntil: 17:00, !- Field 28\n' + \
'\t0.15, !- Field 29\n' + \
'\tUntil: 24:00, !- Field 30\n' + \
'\t0.05, !- Field 31\n' + \
'\tFor: SummerDesignDay, !- Field 32\n' + \
'\tUntil: 24:00, !- Field 33\n' + \
'\t1.0, !- Field 34\n' + \
'\tFor: WinterDesignDay, !- Field 35\n' + \
'\tUntil: 24:00, !- Field 36\n' + \
'\t0.0, !- Field 37\n' + \
'\tFor: AllOtherDays, !- Field 38\n' + \
'\tUntil: 24:00, !- Field 39\n' + \
'\t0.05; !- Field 40\n'
class BuildingProgramsLib(object):
def __init__(self):
self.bldgPrograms = {
0 : 'Office',
1 : 'Retail',
2 : 'MidriseApartment',
3 : 'PrimarySchool',
4 : 'SecondarySchool',
5 : 'SmallHotel',
6 : 'LargeHotel',
7 : 'Hospital',
8 : 'Outpatient',
9 : 'Warehouse',
10 : 'SuperMarket',
11 : 'FullServiceRestaurant',
12 : 'QuickServiceRestaurant',
'Office' : 'Office',
'Retail' : 'Retail',
'MidriseApartment' : 'MidriseApartment',
'PrimarySchool' : 'PrimarySchool',
'SecondarySchool' : 'SecondarySchool',
'SmallHotel' : 'SmallHotel',
'LargeHotel' : 'LargeHotel',
'Hospital' : 'Hospital',
'Outpatient' : 'Outpatient',
'Warehouse' : 'Warehouse',
'SuperMarket' : 'SuperMarket',
'FullServiceRestaurant' : 'FullServiceRestaurant',
'QuickServiceRestaurant' : 'QuickServiceRestaurant'}
self.zonePrograms = { "MidriseApartment" : {
0: "Apartment",
1: "Office",
2: "Corridor",
},
'Outpatient' : {
0: "IT_Room",
1: "ProcedureRoom",
2: "Conference",
3: "MedGas",
4: "Janitor",
5: "Cafe",
6: "OR",
7: "PhysicalTherapy",
8: "Lobby",
9: "Xray",
10: "MRI_Control",
11: "Toilet",
12: "Elec/MechRoom",
13: "Stair",
14: "PACU",
15: "Anesthesia",
16: "MRI",
17: "CleanWork",
18: "NurseStation",
19: "PreOp",
20: "Lounge",
21: "BioHazard",
22: "Office",
23: "Hall",
24: "Soil Work",
25: "DressingRoom",
26: "Exam",
27: "LockerRoom",
},
'LargeHotel' : {
0: "Storage",
1: "Mechanical",
2: "Banquet",
3: "GuestRoom",
4: "Laundry",
5: "Retail",
6: "Kitchen",
7: "Cafe",
8: "Corridor",
9: "Lobby"
},
'FullServiceRestaurant' : {
0: "Kitchen",
1: "Dining"
},
'PrimarySchool' : {
0: "Mechanical",
1: "Library",
2: "Cafeteria",
3: "Gym",
4: "Restroom",
5: "Office",
6: "Classroom",
7: "Kitchen",
8: "Corridor",
9: "Lobby"
},
'SmallHotel' : {
0: "Storage",
1: "GuestLounge",
2: "Mechanical",
3: "StaffLounge",
4: "PublicRestroom",
5: "GuestRoom",
6: "Exercise",
7: "Laundry",
8: "Meeting",
9: "Office",
10: "Stair",
11: "Corridor"
},
'SuperMarket' : {
0: "Sales/Produce",
1: "DryStorage",
2: "Office",
3: "Deli/Bakery"
},
'SecondarySchool' : {
0: "Mechanical",
1: "Library",
2: "Auditorium",
3: "Cafeteria",
4: "Gym",
5: "Restroom",
6: "Office",
7: "Classroom",
8: "Kitchen",
9: "Corridor",
10: "Lobby"
},
'Retail' : {
0: "Back_Space",
1: "Point_of_Sale",
2: "Entry",
3: "Retail"
},
'Hospital' : {
0: "ER_Trauma",
1: "PatCorridor",
2: "ICU_PatRm",
3: "ER_NurseStn",
4: "ICU_Open",
5: "NurseStn",
6: "PhysTherapy",
7: "ICU_NurseStn",
8: "Radiology",
9: "Dining",
10: "PatRoom",
11: "OR",
12: "Office",
13: "Kitchen",
14: "Lab",
15: "ER_Exam",
16: "ER_Triage",
17: "Corridor",
18: "Lobby"
},
'Office' : {
0: "BreakRoom",
1: "Storage",
2: "Vending",
3: "OpenOffice",
4: "ClosedOffice",
5: "Conference",
6: "PrintRoom",
7: "Restroom",
8: "Elec/MechRoom",
9: "IT_Room",
10: "Stair",
11: "Corridor",
12: "Lobby"
},
'Warehouse' : {
0: "Office",
1: "Fine",
2: "Bulk"
},
'QuickServiceRestaurant' : {
0: "Kitchen",
1: "Dining"
}
}
class EPSurfaceLib(object):
# I think I can remove this now
def __init__(self):
# 4 represents an Air Wall
self.srfType = {0:'WALL',
1:'ROOF',
2:'FLOOR',
3:'CEILING',
4:'WALL',
5:'WINDOW'}
# surface construction should change later
# to be based on the zone program
self.srfCnstr = {0:'Exterior_Wall',
1:'Exterior_Roof',
2:'Exterior_Floor',
3:'Interior_Floor',
4:'Air_Wall',
5:'Exterior_Window'}
self.srfBC = {0:'Outdoors',
1:'Outdoors',
2: 'Outdoors',
3: 'Adiabatic',
4: 'surface',
5: 'Outdoors'}
self.srfSunExposure = {0:'SunExposed',
1:'SunExposed',
2:'SunExposed',
3:'NoSun',
4:'NoSun',
5:'SunExposed',}
self.srfWindExposure = {0:'WindExposed',
1:'WindExposed',
2:'WindExposed',
3:'NoWind',
4:'NoWind',
5:'WindExposed'}
class EPZone(object):
"""This calss represents a honeybee zone that will be used for energy and daylighting
simulatios"""
def __init__(self, zoneBrep, zoneID, zoneName, program = [None, None], isConditioned = True):
self.north = 0
self.objectType = "HBZone"
self.origin = rc.Geometry.Point3d.Origin
self.geometry = zoneBrep
self.num = zoneID
self.name = zoneName
self.hasNonPlanarSrf = False
self.hasInternalEdge = False
self.mixAir = False
self.mixAirZoneList = []
self.mixAirFlowList = []
self.natVent = False
self.natVentMinIndoorTemp = 24.0
self.natVentMaxIndoorTemp = 100.0
self.natVentMinOutdoorTemp = -100.0
self.natVentMaxOutdoorTemp = 100.0
self.windowOpeningArea = 0.0
self.windowHeightDiff = 0.0
self.natVentSchedule = None
self.surfaces = []
self.daylightThreshold = ""
self.coolingSetPt= ""
self.heatingSetPt= ""
self.coolingSetback= ""
self.heatingSetback= ""
self.coolSupplyAirTemp= ""
self.heatSupplyAirTemp= ""
if zoneBrep != None:
self.isClosed = self.geometry.IsSolid
else:
self.isClosed = False
if self.isClosed:
try:
self.checkZoneNormalsDir()
except Exception, e:
print 'Checking normal directions failed:\n' + `e`
self.bldgProgram = program[0]
self.zoneProgram = program[1]
# assign schedules
self.assignScheduleBasedOnProgram()
# assign loads
self.assignLoadsBasedOnProgram()
if isConditioned: self.HVACSystem = ["GroupI", 0, None] # assign ideal loads as default
else: self.HVACSystem = ["NoHVAC", -1, None] # no system
self.isConditioned = isConditioned
self.isThisTheTopZone = False
self.isThisTheFirstZone = False
def assignScheduleBasedOnProgram(self, component = None):
# create an open office is the program is not assigned
if self.bldgProgram == None: self.bldgProgram = "Office"
if self.zoneProgram == None: self.zoneProgram = "OpenOffice"
openStudioStandardLib = sc.sticky ["honeybee_OpenStudioStandardsFile"]
try:
schedulesAndLoads = openStudioStandardLib['space_types']['90.1-2007']['ClimateZone 1-8'][self.bldgProgram][self.zoneProgram]
except:
msg = "Either your input for bldgProgram > [" + self.bldgProgram + "] or " + \
"the input for zoneProgram > [" + self.zoneProgram + "] is not valid.\n" + \
"Use ListSpacePrograms component to find the available programs."
print msg
if component != None:
component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
return
self.occupancySchedule = schedulesAndLoads['occupancy_sch']
self.occupancyActivitySch = schedulesAndLoads['occupancy_activity_sch']
self.heatingSetPtSchedule = schedulesAndLoads['heating_setpoint_sch']
self.coolingSetPtSchedule = schedulesAndLoads['cooling_setpoint_sch']
self.lightingSchedule = schedulesAndLoads['lighting_sch']
self.equipmentSchedule = schedulesAndLoads['elec_equip_sch']
self.infiltrationSchedule = schedulesAndLoads['infiltration_sch']
# find all the patameters and assign them to
self.isSchedulesAssigned = True
def assignLoadsBasedOnProgram(self, component=None):
# create an open office is the program is not assigned
if self.bldgProgram == None: self.bldgProgram = "Office"
if self.zoneProgram == None: self.zoneProgram = "OpenOffice"
openStudioStandardLib = sc.sticky ["honeybee_OpenStudioStandardsFile"]
try:
schedulesAndLoads = openStudioStandardLib['space_types']['90.1-2007']['ClimateZone 1-8'][self.bldgProgram][self.zoneProgram]
except:
msg = "Either your input for bldgProgram > [" + self.bldgProgram + "] or " + \
"the input for zoneProgram > [" + self.zoneProgram + "] is not valid.\n" + \
"Use ListSpacePrograms component to find the available programs."
print msg
if component != None:
component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
return
# numbers in OpenStudio standard library are in IP and I have to convert them to SI!
self.equipmentLoadPerArea = schedulesAndLoads['elec_equip_per_area'] * 10.763961 #Per ft^2 to Per m^2
self.infiltrationRatePerArea = schedulesAndLoads['infiltration_per_area_ext'] * 0.00508001 #1 ft3/min.m2 = 5.08001016E-03 m3/s.m2
self.lightingDensityPerArea = schedulesAndLoads['lighting_w_per_area'] * 10.763961 #Per ft^2 to Per m^2
self.numOfPeoplePerArea = schedulesAndLoads[ 'occupancy_per_area'] * 10.763961 /1000 #Per 1000 ft^2 to Per m^2
self.ventilationPerArea = schedulesAndLoads['ventilation_per_area'] * 0.00508001 #1 ft3/min.m2 = 5.08001016E-03 m3/s.m2
self.ventilationPerPerson = schedulesAndLoads['ventilation_per_person'] * 0.0004719 #1 ft3/min.perosn = 4.71944743E-04 m3/s.person
self.isLoadsAssigned = True
def getCurrentSchedules(self, returnDictionary = False, component = None):
# assign the default is there is no schedule assigned
if not self.isSchedulesAssigned:
self.assignScheduleBasedOnProgram(component)
if not returnDictionary:
report = " Schedule list:\n" + \
"occupancySchedule: " + str(self.occupancySchedule) + "\n" + \
"occupancyActivitySch: " + str(self.occupancyActivitySch) + "\n" + \
"heatingSetPtSchedule: " + str(self.heatingSetPtSchedule) + "\n" + \
"coolingSetPtSchedule: " + str(self.coolingSetPtSchedule) + "\n" + \
"lightingSchedule: " + str(self.lightingSchedule) + "\n" + \
"equipmentSchedule: " + str(self.equipmentSchedule) + "\n" + \
"infiltrationSchedule: " + str(self.infiltrationSchedule) + "."
return report
else:
scheduleDict = {"occupancySchedule" : str(self.occupancySchedule),
"occupancyActivitySch" : str(self.occupancyActivitySch),
"heatingSetPtSchedule" :str(self.heatingSetPtSchedule),
"coolingSetPtSchedule" : str(self.coolingSetPtSchedule),
"lightingSchedule" : str(self.lightingSchedule),
"equipmentSchedule" : str(self.equipmentSchedule),
"infiltrationSchedule" : str(self.infiltrationSchedule)}
return scheduleDict
def getCurrentLoads(self, returnDictionary = False, component = None):
# assign the default is there is no schedule assigned
if not self.isLoadsAssigned:
self.assignLoadsBasedOnProgram(component)
if not returnDictionary:
report = " Internal Loads [SI]:\n" + \
"EquipmentsLoadPerArea: " + "%.4f"%self.equipmentLoadPerArea + "\n" + \
"infiltrationRatePerArea: " + "%.4f"%self.infiltrationRatePerArea + "\n" + \
"lightingDensityPerArea: " + "%.4f"%self.lightingDensityPerArea + "\n" + \
"numOfPeoplePerArea: " + "%.4f"%self.numOfPeoplePerArea + "\n" + \
"ventilationPerPerson: " + "%.4f"%self.ventilationPerPerson + "\n" + \
"ventilationPerArea: " + "%.4f"%self.ventilationPerArea + "."
return report
else:
loadsDict = {"EquipmentsLoadPerArea" : "%.4f"%self.equipmentLoadPerArea,
"infiltrationRatePerArea" : "%.4f"%self.infiltrationRatePerArea,
"lightingDensityPerArea" : "%.4f"%self.lightingDensityPerArea,
"numOfPeoplePerArea" : "%.4f"%self.numOfPeoplePerArea,
"ventilationPerArea" : "%.4f"%self.ventilationPerArea,
"ventilationPerPerson" : "%.4f"%self.ventilationPerPerson}
return loadsDict
def joinMesh(self, meshList):
joinedMesh = rc.Geometry.Mesh()
for m in meshList: joinedMesh.Append(m)
return joinedMesh
def checkZoneNormalsDir(self):
def checkSrfNormal(HBSrf, printAngle = False):
#create a plane from the surface
srfPlane = rc.Geometry.Plane(HBSrf.cenPt, HBSrf.normalVector)
# project center point of the geometry to surface plane
projectedPt = srfPlane.ClosestPoint(self.cenPt)
# make a vector from the center point of the zone to center point of the surface
testVector = rc.Geometry.Vector3d(projectedPt - self.cenPt)
# check the direction of the vectors and flip zone surfaces if needed
vecAngleDiff = math.degrees(rc.Geometry.Vector3d.VectorAngle(testVector, HBSrf.normalVector))
# vecAngleDiff should be 0 otherwise the normal is reversed
if printAngle:
print vecAngleDiff
if vecAngleDiff > 10:
HBSrf.geometry.Flip()
HBSrf.normalVector.Reverse()
if not HBSrf.isChild and HBSrf.hasChild:
for childSrf in HBSrf.childSrfs:
checkSrfNormal(childSrf)
# isPointInside for Breps is buggy, that's why I mesh the geometry here
mesh = rc.Geometry.Mesh.CreateFromBrep(self.geometry)
joinedMesh = self.joinMesh(mesh)
"""check normal direction of the surfaces"""
MP3D = rc.Geometry.AreaMassProperties.Compute(self.geometry)
self.cenPt = MP3D.Centroid
MP3D.Dispose()
#Check if the centroid is inside the volume.
if joinedMesh.IsPointInside(self.cenPt, sc.doc.ModelAbsoluteTolerance, True) != True:
# point is not inside so this method can't be used
print "Honeybee cannot check normal directions for " + self.name
return
for HBSrf in self.surfaces:
checkSrfNormal(HBSrf)
def decomposeZone(self, maximumRoofAngle = 30):
# this method is useufl when the zone is going to be constructed from a closed brep
# materials will be applied based on the zones construction set
#This check fails for any L-shaped zone so it has been disabled. We check the normals well elsewhere.
def checkGHSrfNormal(GHSrf, printAngle = False):
cenPt, normalVector = self.getSrfCenPtandNormal(surface)
#create a plane from the surface
#srfPlane = rc.Geometry.Plane(cenPt, normalVector)
# project center point of the geometry to surface plane
#projectedPt = srfPlane.ClosestPoint(self.cenPt)
# make a vector from the center point of the zone to center point of the surface
#testVector = rc.Geometry.Vector3d(projectedPt - self.cenPt)
# check the direction of the vectors and flip zone surfaces if needed
#vecAngleDiff = math.degrees(rc.Geometry.Vector3d.VectorAngle(testVector, normalVector))
# vecAngleDiff should be 0 otherwise the normal is reversed
#if printAngle:
# print vecAngleDiff
#if vecAngleDiff > 10:
# print vecAngleDiff
# GHSrf.Flip()
# normalVector.Reverse()
return normalVector, GHSrf
# explode zone
for i in range(self.geometry.Faces.Count):
surface = self.geometry.Faces[i].DuplicateFace(False)
# check surface Normal
normal, surface = checkGHSrfNormal(surface)
angle2Z = math.degrees(rc.Geometry.Vector3d.VectorAngle(normal, rc.Geometry.Vector3d.ZAxis))
if angle2Z < maximumRoofAngle or angle2Z > 360- maximumRoofAngle:
# roof is the right assumption
# it will change to ceiling after solveAdj if it is a ceiling
surafceType = 1 #roof
#if self.isThisTheTopZone: surafceType = 1 #roof
#else: surafceType = 3 # ceiling
elif 160 < angle2Z <200:
surafceType = 2 # floor
else: surafceType = 0 #wall
HBSurface = hb_EPZoneSurface(surface, i, self.name + '_Srf_' + `i`, self, surafceType)
self.addSrf(HBSurface)
def createZoneFromSurfaces(self, maximumRoofAngle = 30):
# this method recreate the geometry from the surfaces
srfs = []
# check if surface has a type
for srf in self.surfaces:
srf.parent = self
# check planarity and set it for parent zone
if not srf.isPlanar:
self.hasNonPlanarSrf = True
if srf.hasInternalEdge:
self.hasInternalEdge = True
# also chek for interal Edges
surface = srf.geometry.Faces[0].DuplicateFace(False)
#print surface
srfs.append(surface)
try:
surfaceType = srf.type
except:
srf.type = srf.getTypeByNormalAngle()
srf.reEvaluateType(True)
# check for child surfaces
if srf.hasChild: srf.calculatePunchedSurface()
# assign construction
srf.construction = srf.cnstrSet[srf.type]
if srf.EPConstruction == "":
srf.EPConstruction = srf.construction
try:
self.geometry = rc.Geometry.Brep.JoinBreps(srfs, sc.doc.ModelAbsoluteTolerance)[0]
self.isClosed = self.geometry.IsSolid
if self.isClosed:
try:
self.checkZoneNormalsDir()
except Exception, e:
print '0_Check Zone Normals Direction Failed:\n' + `e`
else:
MP3D = rc.Geometry.AreaMassProperties.Compute(self.geometry)
self.cenPt = MP3D.Centroid
MP3D.Dispose()
except Exception, e:
print " Failed to create the geometry from the surface:\n" + `e`
def getSrfCenPtandNormal(self, surface):
surface = surface.Faces[0]
u_domain = surface.Domain(0)
v_domain = surface.Domain(1)
centerU = (u_domain.Min + u_domain.Max)/2
centerV = (v_domain.Min + v_domain.Max)/2
centerPt = surface.PointAt(centerU, centerV)
normalVector = surface.NormalAt(centerU, centerV)
normalVector.Unitize()
return centerPt, normalVector
def addSrf(self, srf):
self.surfaces.append(srf)
def updateConstructionSet(newProgramCode, level = 1):
"""level defines the level of the construction set
0: low performance; 1: normal; 2: high performance"""
self.constructionSet = constructionSet[newProgramCode]
def cleanMeshedFaces(self):
for srf in self.surfaces: srf.disposeCurrentMeshes()
def prepareNonPlanarZone(self, meshingParameters = None, isEnergyPlus = False):
# clean current meshedFaces
self.cleanMeshedFaces()
# collect walls and windows, and roofs
srfsToBeMeshed = []
for srf in self.surfaces:
#clean the meshedFaces if any
# if surface is planar just collect the surface
if srf.isPlanar or not srf.hasChild: srfsToBeMeshed.append(srf.geometry)
# else collect the punched wall and child surfaces
else:
for fenSrf in srf.childSrfs:
srfsToBeMeshed.append(fenSrf.geometry)
srfsToBeMeshed.append(fenSrf.parent.punchedGeometry)
# join surfaces
joinedBrep = rc.Geometry.Brep.JoinBreps(srfsToBeMeshed, sc.doc.ModelAbsoluteTolerance)[0]
# mesh the geometry
if meshingParameters == None or type(meshingParameters)!= rc.Geometry.MeshingParameters:
mp = rc.Geometry.MeshingParameters.Default; disFactor = 3
else:
disFactor = 1
mp = meshingParameters
meshedGeo = rc.Geometry.Mesh.CreateFromBrep(joinedBrep, mp)
for mesh in meshedGeo:
# generate quad surfaces for EnergyPlus model
# if isEnergyPlus:
# angleTol = sc.doc.ModelAngleToleranceRadians
# minDiagonalRatio = .875
# #print mesh.Faces.ConvertTrianglesToQuads(angleTol, minDiagonalRatio)
# mesh.Faces.ConvertTrianglesToQuads(angleTol, minDiagonalRatio)
mesh.FaceNormals.ComputeFaceNormals()
#mesh.FaceNormals.UnitizeFaceNormals()
for faceIndex in range(mesh.Faces.Count):
normal = mesh.FaceNormals[faceIndex]
cenPt = mesh.Faces.GetFaceCenter(faceIndex)
##check mesh normal direction
reverseList = False
## make a vector from the center point of the zone to center point of the surface
testVector = rc.Geometry.Vector3d(cenPt - self.cenPt)
## check the direction of the vectors and flip zone surfaces if needed
if rc.Geometry.Vector3d.VectorAngle(testVector, normal)> 1:
normal.Reverse()
reverseList = True
## create a ray
#ray = rc.Geometry.Ray3d(cenPt, normal)
for srf in self.surfaces:
if srf.isPlanar or not srf.hasChild:
## shoot a ray from the center of the mesh to each surface
#intPt = rc.Geometry.Intersect.Intersection.RayShoot(ray, [srf.geometry], 1)
#if intPt:
if cenPt.DistanceTo(srf.geometry.ClosestPoint(cenPt))<0.05 * disFactor:
srf.collectMeshFaces(mesh.Faces.GetFaceVertices(faceIndex), reverseList) ## if hit then add this face to that surface
break
else:
for fenSrf in srf.childSrfs:
#intPt = rc.Geometry.Intersect.Intersection.RayShoot(ray, [fenSrf.geometry], 1)
#if intPt:
if cenPt.DistanceTo(fenSrf.geometry.ClosestPoint(cenPt))<0.05 * disFactor:
fenSrf.collectMeshFaces(mesh.Faces.GetFaceVertices(faceIndex), reverseList); break
#intPt = rc.Geometry.Intersect.Intersection.RayShoot(ray, [fenSrf.parent.punchedGeometry], 1)
#if intPt:
if cenPt.DistanceTo(fenSrf.parent.punchedGeometry.ClosestPoint(cenPt))<0.05 * disFactor:
srf.collectMeshFaces(mesh.Faces.GetFaceVertices(faceIndex), reverseList); break
def getFloorArea(self):
totalFloorArea = 0
for HBSrf in self.surfaces:
if int(HBSrf.type) == 2:
totalFloorArea += HBSrf.getTotalArea()
return totalFloorArea
def getZoneVolume(self):
return self.geometry.GetVolume()
def getFloorZLevel(self):
# useful for gbXML export
minZ = float("inf")
for HBSrf in self.surfaces:
if int(HBSrf.type) == 2:
#get the center point
centerPt, normalVector = HBSrf.getSrfCenPtandNormalAlternate()
if centerPt.Z < minZ: minZ = centerPt.Z
return minZ
def setName(self, newName):
self.name = newName
def __str__(self):
try:
return 'Zone name: ' + self.name + \
'\nZone program: ' + self.bldgProgram + "::" + self.zoneProgram + \
'\n# of surfaces: ' + `len(self.surfaces)` + \
'\n-------------------------------------'
except:
return 'Zone name: ' + self.name + \
'\nZone program: Unknown' + \
'\n# of surfaces: ' + `len(self.surfaces)` + \
'\n-------------------------------------'
class hb_reEvaluateHBZones(object):
"""
This class check Honeybee zones once more and zones with nonplanar surfaces
or non-rectangualr glazings recreates the surfaces so the output zones will
be all zones with planar surfaces, and they can be exported with two functions
for planar EPSurfaces and planar fenestration.
It also assigns the right boundary condition object to each sub surface
and checks duplicate names for zones and surfaces and give a warning
to user to get them fixed.
"""
def __init__(self, inHBZones, meshingParameters):
# import the classes
self.hb_EPZone = sc.sticky["honeybee_EPZone"]
self.hb_EPSrf = sc.sticky["honeybee_EPSurface"]
self.hb_EPZoneSurface = sc.sticky["honeybee_EPZoneSurface"]
self.hb_EPFenSurface = sc.sticky["honeybee_EPFenSurface"]
self.fakeSurface = rc.Geometry.Brep.CreateFromCornerPoints(
rc.Geometry.Point3d(0,0.5,0),
rc.Geometry.Point3d(-0.5,-0.5,0),
rc.Geometry.Point3d(0.5,-0.5,0),
sc.doc.ModelAbsoluteTolerance)
self.originalHBZones = inHBZones
self.meshingParameters = meshingParameters
#self.triangulate = triangulate
self.zoneNames = []
self.srfNames = []
self.modifiedSrfsNames= []
self.modifiedGlzSrfsNames = []
self.adjcGlzSrfCollection = []
self.adjcSrfCollection = {} #collect adjacent surfaces for nonplanar surfaces
def checkSrfNameDuplication(self, surface):
if surface.name in self.srfNames:
warning = "Duplicate surface name!"
name = surface.name
while name in self.srfNames:
name += "_Dup"
surface.name = name
print warning + " Name is changed to: " + surface.name
self.srfNames.append(surface.name)
if not surface.isChild and surface.hasChild:
for child in surface.childSrfs:
self.checkSrfNameDuplication(child)
def checkNameDuplication(self, HBZone):
if HBZone.name in self.zoneNames:
warning = "Duplicate zone name!"
name = HBZone.name
while name in self.zoneNames:
name += "_Dup"
HBZone.name = name
print warning + " Name is changed to: " + HBZone.name
self.zoneNames.append(HBZone.name)
for surface in HBZone.surfaces:
self.checkSrfNameDuplication(surface)
def prepareNonPlanarZones(self, HBZone):
# prepare nonplanar zones
if HBZone.hasNonPlanarSrf or HBZone.hasInternalEdge:
HBZone.prepareNonPlanarZone(self.meshingParameters)
def createSurface(self, pts):
"""
# it takes so long if I generate the geometry
if len(pts) == 3:
return rc.Geometry.Brep.CreateFromCornerPoints(pts[0], pts[1], pts[2], sc.doc.ModelAbsoluteTolerance)
elif len(pts) == 4:
return rc.Geometry.Brep.CreateFromCornerPoints(pts[0], pts[1], pts[2], pts[3], sc.doc.ModelAbsoluteTolerance)
else:
# create a planar surface
pts.append(pts[0])
pl = rc.Geometry.Polyline(pts).ToNurbsCurve()
return rc.Geometry.Brep.CreatePlanarBreps([pl])[0]
"""
return self.fakeSurface
def evaluateZones(self):
for HBZone in self.originalHBZones:
self.checkNameDuplication(HBZone)
self.prepareNonPlanarZones(HBZone)
modifiedSurfaces = []
for surface in HBZone.surfaces:
srfs = self.checkZoneSurface(surface)
try: modifiedSurfaces.extend(srfs)
except: modifiedSurfaces.append(srfs)
# replace surfaces with new ones
HBZone.surfaces = []
for HBSrf in modifiedSurfaces:
HBZone.surfaces.append(HBSrf)
def createSubSurfaceFromBaseSrf(self, surface, newSurfaceName, count, coordinates, glazingBase = False, nameAddition = None):
# pass the wrong geometry for now. I assume creating planar surface from
# coordinates will be computationally heavy and at this point geometry doesn't
# matter, since I have the coordinates.
newSurface = self.hb_EPZoneSurface(self.createSurface(coordinates),
count, newSurfaceName, surface.parent, surface.type)
newSurface.coordinates = coordinates
newSurface.type = surface.type # protect the surface from reEvaluate
newSurface.construction = surface.construction
newSurface.EPConstruction = surface.EPConstruction
newSurface.BC = surface.BC
newSurface.sunExposure = surface.sunExposure
newSurface.windExposure = surface.windExposure
newSurface.groundViewFactor = surface.groundViewFactor
if surface.BC.upper() == 'SURFACE':
adjcSurface = surface.BCObject
if not glazingBase:
newAdjcSurfaceName = adjcSurface.name + "_srfP_" + `count`
else:
try: newAdjcSurfaceName = adjcSurface.name + str(nameAddition)
except: newAdjcSurfaceName = adjcSurface.name + "_"
newAdjcSurface = self.hb_EPZoneSurface(self.createSurface(coordinates),
count, newAdjcSurfaceName, adjcSurface.parent, adjcSurface.type)
# reverse the order of points
restOfcoordinates = list(coordinates[1:])
restOfcoordinates.reverse()
newAdjcSurface.coordinates = [coordinates[0]] + restOfcoordinates
newAdjcSurface.type = adjcSurface.type
newAdjcSurface.construction = adjcSurface.construction
newAdjcSurface.EPConstruction = adjcSurface.EPConstruction
newAdjcSurface.BC = adjcSurface.BC
newAdjcSurface.sunExposure = adjcSurface.sunExposure
newAdjcSurface.windExposure = adjcSurface.windExposure
newAdjcSurface.groundViewFactor = adjcSurface.groundViewFactor
# assign boundary objects
newSurface.BCObject = newAdjcSurface
newAdjcSurface.BCObject = newSurface
self.adjcSrfCollection[adjcSurface.name].append(newAdjcSurface)
return newSurface
def createSubGlzSurfaceFromBaseSrf(self, baseChildSurface, parentSurface, glzSurfaceName, count, coordinates):
newFenSrf = self.hb_EPFenSurface(self.createSurface(coordinates),
count, glzSurfaceName, parentSurface, 5, punchedWall = None)
newFenSrf.coordinates = coordinates
newFenSrf.type = baseChildSurface.type
newFenSrf.construction = baseChildSurface.construction
newFenSrf.EPConstruction = baseChildSurface.EPConstruction
newFenSrf.parent = parentSurface
newFenSrf.groundViewFactor = baseChildSurface.groundViewFactor
newFenSrf.shadingControlName = baseChildSurface.shadingControlName
newFenSrf.frameName = baseChildSurface.frameName
newFenSrf.Multiplier = baseChildSurface.Multiplier
newFenSrf.blindsMaterial = baseChildSurface.blindsMaterial
newFenSrf.shadingControl = baseChildSurface.shadingControl
newFenSrf.shadingSchName = baseChildSurface.shadingSchName
# Will be overwritten later if needed
newFenSrf.BCObject = baseChildSurface.BCObject
newFenSrf.BCObject = baseChildSurface.BCObject
return newFenSrf
def getInsetGlazingCoordinates(self, glzCoordinates):
# find the coordinates
def averagePts(ptList):
pt = rc.Geometry.Point3d(0,0,0)
for p in ptList: pt = pt + p
return rc.Geometry.Point3d(pt.X/len(ptList), pt.Y/len(ptList), pt.Z/len(ptList))
distance = 2 * sc.doc.ModelAbsoluteTolerance
# offset was so slow so I changed the method to this
pts = []
for pt in glzCoordinates:
pts.append(rc.Geometry.Point3d(pt.X, pt.Y, pt.Z))
cenPt = averagePts(pts)
insetPts = []
for pt in pts:
movingVector = rc.Geometry.Vector3d(cenPt-pt)
movingVector.Unitize()
newPt = rc.Geometry.Point3d.Add(pt, movingVector * 2 * sc.doc.ModelAbsoluteTolerance)
insetPts.append(newPt)
return insetPts
def checkChildSurfaces(self, surface):
def isRectangle(ptList):
vector1 = rc.Geometry.Vector3d(ptList[0] - ptList[1])
vector2 = rc.Geometry.Vector3d(ptList[1] - ptList[2])
vector3 = rc.Geometry.Vector3d(ptList[2] - ptList[3])
vector4 = rc.Geometry.Vector3d(ptList[3] - ptList[0])
if ptList[0].DistanceTo(ptList[2]) != ptList[1].DistanceTo(ptList[3]) or \
math.degrees(rc.Geometry.Vector3d.VectorAngle(vector1, vector2))!= 90 or \
math.degrees(rc.Geometry.Vector3d.VectorAngle(vector3, vector4))!= 90:
return False
else:
return True
def isAntiClockWise(pts, faceNormal):
def crossProduct(vector1, vector2):
return vector1.X * vector2.X + vector1.Y * vector2.Y + vector1.Z * vector2.Z
# check if the order if clock-wise
vector0 = rc.Geometry.Vector3d(pts[1]- pts[0])
vector1 = rc.Geometry.Vector3d(pts[-1]- pts[0])
ptsNormal = rc.Geometry.Vector3d.CrossProduct(vector0, vector1)
# in case points are anti-clockwise then normals should be parallel
if crossProduct(ptsNormal, faceNormal) > 0:
return True
return False
# get glaing coordinates- coordinates will be returned as lists of lists
glzCoordinates = surface.extractGlzPoints()
# make sure order is right
for coorList in glzCoordinates:
if not isAntiClockWise(coorList, surface.normalVector):
coorList.reverse()
glzSrfs = []
if surface.isPlanar:
for count, coordinates in enumerate(glzCoordinates):
try: child = surface.childSrfs[count]
except: child = surface.childSrfs[0]
if len(glzCoordinates) == 1: #not hasattr(glzCoordinates, '__iter__'):
# single rectangular glazing - All should be fine
# also the adjacent surface will be fine by itself
child.coordinates = coordinates
self.modifiedGlzSrfsNames.append(child.name)
else:
# surface is planar but glazing is not rectangular
# and so it is meshed now and is multiple glazing
glzSurfaceName = surface.name + "_glz_" + `count`
# create glazing surface
HBGlzSrf = self.createSubGlzSurfaceFromBaseSrf(child, surface, glzSurfaceName, count, coordinates)
# create adjacent glazingin case needed
if surface.BC.upper() == 'SURFACE':
# add glazing to adjacent surface
if count == 0:
adjcSrf = surface.BCObject
adjcSrf.childSrfs = []
# add glazing to adjacent surface
adjcSrf = surface.BCObject
glzAdjcSrfName = adjcSrf.name + "_glz_" + `count`
adjcGlzPt = glzCoordinates[1:]
adjcGlzPt.reverse()
adjcGlzPt = [glzCoordinates[0]] + adjcGlzPt
adjHBGlzSrf = self.createSubGlzSurfaceFromBaseSrf(child, adjcSrf, glzAdjcSrfName, count, adjcGlzPt)
# overwrite BC Object
adjHBGlzSrf.BCObject = HBGlzSrf
HBGlzSrf.BCObject = adjHBGlzSrf
adjcSrf.addChildSrf(adjHBGlzSrf)
# collect surfaces
glzSrfs.append(HBGlzSrf)
# add to parent surface
if len(glzCoordinates) != 1:
surface.removeAllChildSrfs()
surface.addChildSrf(glzSrfs)
else:
# convert nonplanar surface to planar wall surfaces with offset glazing
# and treat them similar to other surfaces except the fact that if it has
# another surface next to it the surface should be generated regardless of
# being single geometry or not
newSurfaces =[]
count = 0
baseChildSrf = surface.childSrfs[0]
for count, glzCoordinate in enumerate(glzCoordinates):
# check if the points are recetangle
if len(glzCoordinate) == 3 or isRectangle(glzCoordinate):
insetGlzCoordinates = [glzCoordinate]
else:
# triangulate
insetGlzCoordinates = [glzCoordinate[:3], [glzCoordinate[0],glzCoordinate[2],glzCoordinate[3]]]
for glzCount, insetGlzCoordinate in enumerate(insetGlzCoordinates):
# self.modifiedGlzSrfsNames.append(child.name)
# create new Honeybee surfaces as parent surface for glass face
if len(insetGlzCoordinates) == 1:
newSurfaceName = surface.name + '_glzP_' + `count`
else:
newSurfaceName = surface.name + '_glzP_' + `count` + '_' + `glzCount`
newSurface = self.createSubSurfaceFromBaseSrf(surface, newSurfaceName, count, insetGlzCoordinate, glazingBase = True, nameAddition = '_glzP_' + `count` + '_' + `glzCount`)
# collect them here so it will have potential new BC
newSurfaces.append(newSurface)
# create glazing coordinate and add it to the parent surface
insetPts = self.getInsetGlazingCoordinates(insetGlzCoordinate)
# create new window and go for it
glzSurfaceName = newSurface.name + "_glz_" + `count`
HBGlzSrf = self.createSubGlzSurfaceFromBaseSrf(baseChildSrf, newSurface, glzSurfaceName, count, insetPts)
if surface.BC.upper() == 'SURFACE':
# add glazing to adjacent surface
if count == 0:
adjcSrf = newSurface.BCObject
adjcSrf.childSrfs = []
# add glazing to adjacent surface
adjcSrf = newSurface.BCObject
glzAdjcSrfName = adjcSrf.name + "_glz_" + `count`
adjcGlzPt = insetPts[1:]
adjcGlzPt.reverse()
adjcGlzPt = [insetPts[0]] + adjcGlzPt
adjHBGlzSrf = self.createSubGlzSurfaceFromBaseSrf(baseChildSrf, adjcSrf, glzAdjcSrfName, count, adjcGlzPt)
# overwrite BC Object
adjHBGlzSrf.BCObject = HBGlzSrf
HBGlzSrf.BCObject = adjHBGlzSrf
adjcSrf.addChildSrf(adjHBGlzSrf)
# add to parent surface
newSurface.addChildSrf(HBGlzSrf)
return newSurfaces
def checkZoneSurface(self, surface):
if not hasattr(surface, 'coordinates'):
coordinatesL = surface.extractPoints()
else:
coordinatesL = surface.coordinates
# case 0 : it is a planar surface so it is all fine
if not hasattr(coordinatesL[0], '__iter__'):
# it is a single surface so just let it go to the modified list
surface.coordinates = coordinatesL
self.modifiedSrfsNames.append(surface.name)
if not surface.isChild and surface.hasChild:
self.checkChildSurfaces(surface)
return surface
# case 1 : it is not planar
else:
# case 1-1 : surface is a nonplanar surface and adjacent to another surface
# sub surfaces has been already generated based on the adjacent surface
if surface.BC.upper() == 'SURFACE' and surface.name in self.adjcSrfCollection.keys():
# print "collecting sub surfaces for surface " + surface.name
# surface has been already generated by the other adjacent surface
self.modifiedSrfsNames.append(surface.name)
return self.adjcSrfCollection[surface.name]
# case 1-2 : surface is a nonplanar surface and adjacent to another surface
# and hasn't been generated so let's generate this surface and the adjacent one
elif surface.BC.upper() == 'SURFACE':
adjcSurface= surface.BCObject
# find adjacent zone and create the surfaces
# create a place holder for the surface
# the surfaces will be collected inside the function
self.adjcSrfCollection[adjcSurface.name] = []
self.modifiedSrfsNames.append(surface.name)
newSurfaces = []
for count, coordinates in enumerate(coordinatesL):
# create new Honeybee surfaces
# makes sense to copy the original surface here but since
# copy.deepcopy fails on a number of systems I just create
# a new surface and assign necessary data to write the surface
newSurfaceName = surface.name + "_srfP_" + `count`
newSurface = self.createSubSurfaceFromBaseSrf(surface, newSurfaceName, count, coordinates)
newSurfaces.append(newSurface)
# nonplanar surface
if not surface.isChild and surface.hasChild:
glzPSurfaces = self.checkChildSurfaces(surface)
if glzPSurfaces != None:
newSurfaces += glzPSurfaces
return newSurfaces
class hb_EPSurface(object):
def __init__(self, surface, srfNumber, srfID, *arg):
"""EP surface Class
surface: surface geometry as a Brep
srfNumber: a unique number that is only for this surface
srfID: the unique name for this surface
*arg is parentZone for EPZoneClasses
*arg is parentSurface for child surfaces"""
self.objectType = "HBSurface"
self.geometry = surface
self.num = srfNumber
self.name = srfID
self.isPlanar = self.checkPlanarity()
self.hasInternalEdge = self.checkForInternalEdge()
self.meshedFace = rc.Geometry.Mesh()
self.RadMaterial = None
self.EPConstruction = None # this gets overwritten below
self.cenPt, self.normalVector = self.getSrfCenPtandNormalAlternate()
self.basePlane = rc.Geometry.Plane(self.cenPt, self.normalVector)
# define if type and BC is defined by user and should be kept
self.srfTypeByUser = False
self.srfBCByUser = False
# 4 represents an Air Wall
self.srfType = {0:'WALL',
0.5: 'UndergroundWall',
1:'ROOF',
1.5: 'UndergroundCeiling',
2:'FLOOR',
2.25: 'UndergroundSlab',
2.5: 'SlabOnGrade',
2.75: 'ExposedFloor',
3:'CEILING',
4:'WALL',
5:'WINDOW',
6:'SHADING',
'WALL': 'WALL',
'ROOF':'ROOF',
'FLOOR': 'FLOOR',
'CEILING': 'CEILING',
'WINDOW':'WINDOW',
'SHADING': 'SHADING'}
self.cnstrSet = {0:'Exterior Wall',
0.5: 'Exterior Wall',
1: 'Exterior Roof',
1.5: 'Exterior Roof',
2:'Interior Floor',
2.25: 'Exterior Floor',
2.5: 'Exterior Floor',
2.75: 'Exterior Floor',
3:'Interior Ceiling',
4:'Air Wall',
5:'Exterior Window',
6:'Interior Wall'}
self.intCnstrSet = {
0:'Interior Wall',
0.5: 'Exterior Wall',
1:'Exterior Roof',
1.5:'Exterior Roof',
2:'Interior Floor',
2.25: 'Exterior Floor',
2.5: 'Exterior Floor',
2.75: 'Exterior Floor',
3:'Interior Ceiling',
4:'Air Wall',
5:'Interior Window',
6:'Interior Wall'}
self.srfBC = {0:'Outdoors',
0.5: 'ground',
1:'Outdoors',
1.5: 'ground',
2: 'outdoors', # this will be changed to surface once solveAdjacency is used
2.25: 'ground',
2.5: 'ground',
2.75: 'outdoors',
3: 'outdoors', # this will be changed to surface once solveAdjacency is used
4: 'surface',
5: 'Outdoors',
6: 'surface'}
self.srfSunExposure = {0:'SunExposed',
0.5:'NoSun',
1:'SunExposed',
1.5:'NoSun',
2:'NoSun',
2.25: 'NoSun',
2.5: 'NoSun',
2.75: 'SunExposed',
3:'NoSun',
4:'NoSun',
6: 'NoSun'}
self.srfWindExposure = {0:'WindExposed',
0.5:'NoWind',
1:'WindExposed',
1.5:'NoWind',
2:'NoWind',
2.25:'NoWind',
2.5:'NoWind',
2.75:'WindExposed',
3:'NoWind',
4:'NoWind',
6:'NoWind'}
self.numOfVertices = 'autocalculate'
if len(arg) == 0:
# minimum surface
# A minimum surface is a surface that will be added to a zone later
# or is a surface that will only be used for daylighting simulation
# so the concept of parent zone/surface is irrelevant
self.parent = None
self.reEvaluateType(True)
elif len(arg) == 1:
# represents an opening. The parent is the parent surafce
# honeybee only supports windows (and not doors) at this point so
# the type is always the same (window)
self.parent = arg[0]
elif len(arg) == 2:
# represents a normal EP surface
# parent is a parent zone and the type differs case by case
self.parent = arg[0] # parent zone
self.type = arg[1] # surface type (e.g. wall, roof,...)
self.BC = self.srfBC[self.type] # initial BC based on type
# check for special conditions(eg. slab underground, slab on ground
self.reEvaluateType(True) # I should give this another thought
# this should be fixed to be based on zone type
# I can remove default constructions at some point
self.construction = self.cnstrSet[int(self.type)]
self.EPConstruction = self.construction
def checkPlanarity(self):
# planarity tolerance should change for different
return self.geometry.Faces[0].IsPlanar(1e-3)
def checkForInternalEdge(self):
edges = self.geometry.DuplicateEdgeCurves(True)
edgesJoined = rc.Geometry.Curve.JoinCurves(edges)
if len(edgesJoined)>1:
return True
else:
return False
class outdoorBCObject(object):
"""
BCObject for surfaces with outdoor BC
"""
def __init__(self, name = ""):
self.name = name
def getAngle2North(self):
types = [0, 4, 5] # vertical surfaces
northVector = rc.Geometry.Vector3d.YAxis
# rotate north based on the zone north vector
try: northVector.Rotate(math.radians(self.parent.north), rc.Geometry.Vector3d.ZAxis)
except: pass
normalVector = self.getSrfCenPtandNormalAlternate()[1]
if self.type in types:
angle = rc.Geometry.Vector3d.VectorAngle(northVector, normalVector, rc.Geometry.Plane.WorldXY)
#if normalVector.X < 0: angle = (2* math.pi) - angle
else: angle = 0
self.angle2North = math.degrees(angle)
def findDiscontinuity(self, curve, style):
# copied and modified from rhinoScript (@Steve Baer @GitHub)
"""Search for a derivatitive, tangent, or curvature discontinuity in
a curve object.
Parameters:
curve_id = identifier of curve object
style = The type of continuity to test for. The types of
continuity are as follows:
Value Description
1 C0 - Continuous function
2 C1 - Continuous first derivative
3 C2 - Continuous first and second derivative
4 G1 - Continuous unit tangent
5 G2 - Continuous unit tangent and curvature
Returns:
List 3D points where the curve is discontinuous
"""
dom = curve.Domain
t0 = dom.Min
t1 = dom.Max
points = []
get_next = True
while get_next:
get_next, t = curve.GetNextDiscontinuity(System.Enum.ToObject(rc.Geometry.Continuity, style), t0, t1)
if get_next:
points.append(curve.PointAt(t))
t0 = t # Advance to the next parameter
return points
def extractMeshPts(self, mesh, triangulate = False):
coordinatesList = []
for face in range(mesh.Faces.Count):
# get each mesh surface vertices
if mesh.Faces.GetFaceVertices(face)[3] != mesh.Faces.GetFaceVertices(face)[4]:
meshVertices = mesh.Faces.GetFaceVertices(face)[1:5]
# triangulation
if triangulate or not self.isRectangle(meshVertices):
coordinatesList.append(meshVertices[:3])
coordinatesList.append([meshVertices[0], meshVertices[2], meshVertices[3]])
else:
coordinatesList.append(list(meshVertices))
else:
meshVertices = mesh.Faces.GetFaceVertices(face)[1:4]
coordinatesList.append(list(meshVertices))
# check order of the points
for coorCount, coorList in enumerate(coordinatesList):
# check if clockWise and reverse the list in case it is not
if not self.isAntiClockWise(coorList):
try: coorList.reverse()
except:
try: coordinatesList[coorCount] = [coorList[3], coorList[2], coorList[1], coorList[0]]
except: coordinatesList[coorCount] = [coorList[2], coorList[1], coorList[0]]
#coordinatesList.reverse()
return coordinatesList
def isAntiClockWise(self, pts):
def crossProduct(vector1, vector2):
return vector1.X * vector2.X + vector1.Y * vector2.Y + vector1.Z * vector2.Z
# check if the order if clock-wise
vector0 = rc.Geometry.Vector3d(pts[1]- pts[0])
vector1 = rc.Geometry.Vector3d(pts[-1]- pts[0])
ptsNormal = rc.Geometry.Vector3d.CrossProduct(vector0, vector1)
# in case points are anti-clockwise then normals should be parallel
if crossProduct(ptsNormal, self.basePlane.Normal) > 0:
return True
return False
def extractPoints(self, method = 1, triangulate = False, meshPar = None):
# if not self.meshedFace.IsValid:
# meshed surface will be generated regardless
# to make sure it won't fail for surfaces with multiple openings
if meshPar == None:
if self.isPlanar:
meshPar = rc.Geometry.MeshingParameters.Coarse
meshPar.SimplePlanes = True
else:
meshPar = rc.Geometry.MeshingParameters.Smooth
self.meshedFace = rc.Geometry.Mesh.CreateFromBrep(self.geometry, meshPar)[0]
if self.meshedFace.IsValid or self.hasInternalEdge:
if self.isPlanar and not self.hasInternalEdge:
plSegments = self.meshedFace.GetNakedEdges()
segments = []
[segments.append(seg.ToNurbsCurve()) for seg in plSegments]
else:
return self.extractMeshPts(self.meshedFace,triangulate)
else:
segments = self.geometry.DuplicateEdgeCurves(True)
joinedBorder = rc.Geometry.Curve.JoinCurves(segments)
if method == 0:
pts = []
[pts.append(seg.PointAtStart) for seg in segments]
else:
pts = []
pts.append(joinedBorder[0].PointAtStart)
restOfpts = self.findDiscontinuity(joinedBorder[0], style = 4)
# for some reason restOfPts returns no pt!
try: pts.extend(restOfpts)
except: pass
try: centPt, normalVector = self.getSrfCenPtandNormalAlternate()
except: centPt, normalVector = self.parent.getSrfCenPtandNormal(self.geometry)
basePlane = rc.Geometry.Plane(centPt, normalVector)
# inclusion test
if str(joinedBorder[0].Contains(centPt, basePlane)).lower() != "inside":
# average points
cumPt = rc.Geometry.Point3d(0,0,0)
for pt in pts: cumPt += pt
centPt = cumPt/len(pts)
# move basePlane to the new place
basePlane = rc.Geometry.Plane(centPt, normalVector)
# sort based on parameter on curve
pointsSorted = sorted(pts, key =lambda pt: joinedBorder[0].ClosestPoint(pt)[1])
# check if clockWise and reverse the list in case it is
if not self.isAntiClockWise(pointsSorted):
pointsSorted.reverse()
# in case the surface still doesn't have a type
# it happens for radiance surfaces. For EP it won't happen
# as it has been already assigned based on the zone
if not hasattr(self, 'type'):
self.Type = self.getTypeByNormalAngle()
## find UpperRightCorner point
## I'm changin this to find the LowerLeftCorner point
## instead as it is how gbXML needs it
# check the plane
srfType = self.getTypeByNormalAngle()
rotationCount = 0
if srfType == 0:
# vertical surface
while basePlane.YAxis.Z <= sc.doc.ModelAbsoluteTolerance and rotationCount < 3:
# keep rotating for 90 degrees
basePlane.Rotate(math.radians(90), basePlane.ZAxis)
rotationCount += 1
elif srfType == 1 or srfType == 3:
# roof + ceiling
while basePlane.YAxis.Y <= sc.doc.ModelAbsoluteTolerance and rotationCount < 3:
# keep rotating for 90 degrees
basePlane.Rotate(math.radians(90), basePlane.ZAxis)
rotationCount += 1
elif srfType == 2:
# floor
while basePlane.YAxis.Y >= sc.doc.ModelAbsoluteTolerance and rotationCount < 3:
# keep rotating for 90 degrees
basePlane.Rotate(math.radians(90), basePlane.ZAxis)
rotationCount += 1
# remap point on the new plane
remPts = []
for pt in pointsSorted: remPts.append(basePlane.RemapToPlaneSpace(pt)[1])
# find UpperRightCorner point (x>0 and max y)
firstPtIndex = None
#for ptIndex, pt in enumerate(remPts):
# if pt.X > 0 and pt.Y > 0 and firstPtIndex == None:
# firstPtIndex = ptIndex #this could be the point
# elif pt.X > 0 and pt.Y > 0:
# if pt.Y > remPts[firstPtIndex].Y: firstPtIndex = ptIndex
for ptIndex, pt in enumerate(remPts):
if pt.X < 0 and pt.Y < 0 and firstPtIndex == None:
firstPtIndex = ptIndex #this could be the point
elif pt.X < 0 and pt.Y < 0:
if pt.Y < remPts[firstPtIndex].Y: firstPtIndex = ptIndex
if firstPtIndex!=None and firstPtIndex!=0:
pointsSorted = pointsSorted[firstPtIndex:] + pointsSorted[:firstPtIndex]
return list(pointsSorted)
def isRectangle(self, ptList):
vector1 = rc.Geometry.Vector3d(ptList[0] - ptList[1])
vector2 = rc.Geometry.Vector3d(ptList[1] - ptList[2])
vector3 = rc.Geometry.Vector3d(ptList[2] - ptList[3])
vector4 = rc.Geometry.Vector3d(ptList[3] - ptList[0])
if ptList[0].DistanceTo(ptList[2]) != ptList[1].DistanceTo(ptList[3]) or \
math.degrees(rc.Geometry.Vector3d.VectorAngle(vector1, vector2))!= 90 or \
math.degrees(rc.Geometry.Vector3d.VectorAngle(vector3, vector4))!= 90:
return False
else:
return True
def extractGlzPoints(self, RAD = False, method = 2):
glzCoordinatesList = []
for glzSrf in self.childSrfs:
sortedPoints = glzSrf.extractPoints()
# check numOfPoints
if len(sortedPoints) < 4 or (self.isPlanar and RAD==True):
glzCoordinatesList.append(sortedPoints) #triangle
elif len(sortedPoints) == 4 and self.isPlanar and self.isRectangle(sortedPoints):
glzCoordinatesList.append(sortedPoints) #rectangle
else:
if method == 1:
sortedPoints.append(sortedPoints[0])
border = rc.Geometry.Polyline(sortedPoints)
mesh = rc.Geometry.Mesh.CreateFromClosedPolyline(border)
elif method == 2:
mp = rc.Geometry.MeshingParameters.Smooth
mesh = rc.Geometry.Mesh.CreateFromBrep(glzSrf.geometry, mp)[0]
if mesh:
# Make sure non-rectangular shapes with 4 edges will be triangulated
if len(sortedPoints) == 4 and self.isPlanar: triangulate= True
else: triangulate= False
try: glzCoordinatesList.extend(self.extractMeshPts(mesh, triangulate))
except: glzCoordinatesList.append(self.extractMeshPts(mesh, triangulate))
return glzCoordinatesList
def collectMeshFaces(self, meshVertices, reverseList = False):
mesh = rc.Geometry.Mesh()
if meshVertices[3]!= meshVertices[4:]:
mesh.Vertices.Add(meshVertices[1]) #0
mesh.Vertices.Add(meshVertices[2]) #1
mesh.Vertices.Add(meshVertices[3]) #2
mesh.Vertices.Add(meshVertices[4]) #3
if not reverseList: mesh.Faces.AddFace(0, 1, 2, 3)
else: mesh.Faces.AddFace(0, 1, 2, 3)
else:
mesh.Vertices.Add(meshVertices[1]) #0
mesh.Vertices.Add(meshVertices[2]) #1
mesh.Vertices.Add(meshVertices[3]) #2
if not reverseList: mesh.Faces.AddFace(0, 1, 2)
else: mesh.Faces.AddFace(0, 1, 2)
self.meshedFace.Append(mesh)
#print self.meshedFace.Faces.Count
def disposeCurrentMeshes(self):
if self.meshedFace.Faces.Count>0:
self.meshedFace.Dispose()
self.meshedFace = rc.Geometry.Mesh()
if self.hasChild:
for fenSrf in self.childSrfs:
if fenSrf.meshedFace.Faces.Count>0:
fenSrf.meshedFace.Dispose()
fenSrf.meshedFace = rc.Geometry.Mesh()
def getSrfCenPtandNormalAlternate(self):
surface = self.geometry.Faces[0]
u_domain = surface.Domain(0)
v_domain = surface.Domain(1)
centerU = (u_domain.Min + u_domain.Max)/2
centerV = (v_domain.Min + v_domain.Max)/2
centerPt = surface.PointAt(centerU, centerV)
normalVector = surface.NormalAt(centerU, centerV)
normalVector.Unitize()
return centerPt, normalVector
def isUnderground(self, wall = False):
"""
check if this surface is underground
"""
# extract points
coordinatesList = self.extractPoints()
# create a list of list
if type(coordinatesList[0])is not list and type(coordinatesList[0]) is not tuple:
coordinatesList = [coordinatesList]
for ptList in coordinatesList:
for pt in ptList:
if not wall and pt.Z - rc.Geometry.Point3d.Origin.Z >= sc.doc.ModelAbsoluteTolerance: return False
elif pt.Z >= sc.doc.ModelAbsoluteTolerance: return False
return True
def isOnGrade(self):
"""
check if this surface is underground
"""
# extract points
coordinatesList = self.extractPoints()
# create a list of list
if type(coordinatesList[0])is not list and type(coordinatesList[0]) is not tuple:
coordinatesList = [coordinatesList]
for ptList in coordinatesList:
for pt in ptList:
if abs(pt.Z - rc.Geometry.Point3d.Origin.Z) >= sc.doc.ModelAbsoluteTolerance: return False
return True
def reEvaluateType(self, overwrite= True):
"""
Find special surface types
"""
if not overwrite and hasattr(self, "type"): return self.type
if self.srfTypeByUser: return self.type
if self.srfBCByUser: return self.type
# find initial type it has no type yet
if not hasattr(self, "type"):
self.type = self.getTypeByNormalAngle()
self.BC = "OUTDOORS"
if self.type == 0:
if self.isUnderground(True):
self.type += 0.5 #UndergroundWall
self.BC = "GROUND"
elif self.type == 1:
# A roof underground will be assigned as UndergroundCeiling!
if self.isUnderground():
self.type += 0.5 #UndergroundCeiling
self.BC = "GROUND"
elif self.BC.upper() == "SURFACE":
self.type == 3 # ceiling
elif self.type == 2:
# floor
if self.isOnGrade():
self.type += 0.5 #SlabOnGrade
self.BC = "GROUND"
elif self.isUnderground():
self.type += 0.25 #UndergroundSlab
self.BC = "GROUND"
elif self.BC.upper() != "SURFACE":
self.type += 0.75 #Exposed floor
# update boundary condition based on new type
self.BC = self.srfBC[self.type]
def getTypeByNormalAngle(self, maximumRoofAngle = 30):
# find the normal
try: findNormal = self.getSrfCenPtandNormalAlternate()
except: findNormal = self.parent.getSrfCenPtandNormal(self.geometry) #I should fix this at some point - Here just for shading surfaces for now
if findNormal:
try:
normal = findNormal[1]
angle2Z = math.degrees(rc.Geometry.Vector3d.VectorAngle(normal, rc.Geometry.Vector3d.ZAxis))
except:
print self
print rc.Geometry.AreaMassProperties.Compute(self.geometry).Centroid
angle2Z = 0
else:
#print findNormal
angle2Z = 0
if angle2Z < maximumRoofAngle or angle2Z > 360- maximumRoofAngle:
try:
if self.isThisTheTopZone:
return 1 #roof
else:
return 3 # ceiling
except:
return 1 #roof
elif 160 < angle2Z <200:
return 2 # floor
else:
return 0 #wall
def getTotalArea(self):
return self.geometry.GetArea()
def setType(self, type, isUserInput = False):
self.type = type
self.srfTypeByUser = isUserInput
def setBC(self, BC, isUserInput = False):
self.BC = BC
self.srfBCByUser = isUserInput
def setBCObject(self, BCObject):
self.BCObject = BCObject
def setBCObjectToOutdoors(self):
self.BCObject = self.outdoorBCObject()
def setEPConstruction(self, EPConstruction):
self.EPConstruction = EPConstruction
def setRADMaterial(self, RADMaterial):
self.RadMaterial = RADMaterial
def setName(self, newName):
self.name = newName
def setSunExposure(self, exposure = 'NoSun'):
self.sunExposure = exposure
def setWindExposure(self, exposure = 'NoWind'):
self.windExposure = exposure
def __str__(self):
try:
return 'Surface name: ' + self.name + '\nSurface number: ' + str(self.num) + \
'\nThis surface is a ' + str(self.srfType[self.type]) + "."
except:
return 'Surface name: ' + self.name + '\n' + 'Surface number: ' + str(self.num) + \
'\nSurface type is not assigned. Honeybee thinks this is a ' + str(self.srfType[self.getTypeByNormalAngle()]) + "."
class hb_EPZoneSurface(hb_EPSurface):
"""..."""
def __init__(self, surface, srfNumber, srfName, *args):
"""This function initiates the class for an EP surface.
surface: surface geometry as a Brep
srfNumber: a unique number that is only for this surface
srfName: the unique name for this surface
parentZone: class of the zone that this surface belongs to"""
if len(args)==2:
parentZone, surafceType = args
hb_EPSurface.__init__(self, surface, srfNumber, srfName, parentZone, surafceType)
self.getAngle2North()
self.BCObject = self.outdoorBCObject()
else:
hb_EPSurface.__init__(self, surface, srfNumber, srfName)
# Check for possible surface type and assign the BC based on that
# This will be re-evaluated in write idf file
srfType = self.getTypeByNormalAngle()
self.BC = self.srfBC[srfType]
self.BCObject = self.outdoorBCObject()
self.sunExposure = self.srfSunExposure[srfType]
self.windExposure = self.srfWindExposure[srfType]
self.getAngle2North()
if hasattr(self, 'parent') and self.parent!=None:
# in both of this cases the zone should be meshed
if not self.isPlanar:
self.parent.hasNonPlanarSrf = True
if self.hasInternalEdge:
self.parent.hasInternalEdge = True
if hasattr(self, 'type'):
self.sunExposure = self.srfSunExposure[self.type]
self.windExposure = self.srfWindExposure[self.type]
self.groundViewFactor = 'autocalculate'
self.hasChild = False
self.isChild = False
self.childSrfs = []
def isPossibleChild(self, chidSrfCandidate, tolerance = sc.doc.ModelAbsoluteTolerance):
# check if all the vertices has 0 distance with the base surface
segments = chidSrfCandidate.DuplicateEdgeCurves(True)
pts = []
[pts.append(seg.PointAtStart) for seg in segments]
for pt in pts:
ptOnSrf = self.geometry.ClosestPoint(pt)
if pt.DistanceTo(ptOnSrf) > tolerance: return False
# check the area of the child surface and make sure is smaller than base surface
#if self.geometry.GetArea() <= chidSrfCandidate.GetArea():
# print "The area of the child surface cannot be larger than the area of the parent surface!"
# return False
# all points are located on the surface and the area is less so it is all good!
return True
def addChildSrf(self, childSurface, percentage = 40):
# I should copy/paste the function here so I can run it as
# a method! For now I just collect them here together....
# use the window function
try: self.childSrfs.extend(childSurface)
except: self.childSrfs.append(childSurface)
self.hasChild = True
pass
def calculatePunchedSurface(self):
def checkCrvArea(crv):
try:
area = rc.Geometry.AreaMassProperties.Compute(crv).Area
except:
area = 0
return area > sc.doc.ModelAbsoluteTolerance
def checkCrvsPts(crv):
# in some cases crv generates a line with similar points
pts = []
pts.append(crv.PointAtStart)
restOfpts = self.findDiscontinuity(crv, style = 4)
# for some reason restOfPts returns no pt!
try: pts.extend(restOfpts)
except: pass
def isDuplicate(pt, newPts):
for p in newPts:
# print pt.DistanceTo(p)
if pt.DistanceTo(p) < 2 * sc.doc.ModelAbsoluteTolerance:
return True
return False
newPts = [pts[0]]
for pt in pts[1:]:
if not isDuplicate(pt, newPts):
newPts.append(pt)
if len(newPts) > 2:
return True
return False
glzCrvs = []
childSrfs = []
for glzSrf in self.childSrfs:
glzEdges = glzSrf.geometry.DuplicateEdgeCurves(True)
jGlzCrv = rc.Geometry.Curve.JoinCurves(glzEdges)[0]
# in some cases glazing based on percentage generates very small glazings
# here I check and remove them
# check area of curve
try:
if self.isPlanar:
area = rc.Geometry.AreaMassProperties.Compute(jGlzCrv).Area
else:
area = rc.Geometry.AreaMassProperties.Compute(glzSrf.geometry).Area
except:
# in case area calulation fails
# let it go anyways!
area = 10 * sc.doc.ModelAbsoluteTolerance
if area > sc.doc.ModelAbsoluteTolerance and checkCrvsPts(jGlzCrv):
# check normal direction of child surface and base surface
# print math.degrees(rc.Geometry.Vector3d.VectorAngle(glzSrf.normalVector, self.normalVector))
childSrfs.append(glzSrf)
glzCrvs.append(jGlzCrv)
else:
print "A very tiny glazing is removed from " + self.name+ "."
self.childSrfs = childSrfs
baseEdges = self.geometry.DuplicateEdgeCurves(True)
jBaseCrv = rc.Geometry.Curve.JoinCurves(baseEdges)
# convert array to list
jBaseCrvList = list(jBaseCrv)
try:
if self.isPlanar:
# works for planar surfaces
punchedGeometries = rc.Geometry.Brep.CreatePlanarBreps(glzCrvs + jBaseCrvList)
if len(punchedGeometries) == 1:
self.punchedGeometry = punchedGeometries[0]
else:
# curves are not in the same plane so let's
# project the curves on surface plane
srfPlane = rc.Geometry.Plane(self.cenPt, self.normalVector)
PGlzCrvs = []
for curve in glzCrvs + jBaseCrvList:
pCrv = rc.Geometry.Curve.ProjectToPlane(curve, srfPlane)
if checkCrvArea:
PGlzCrvs.append(pCrv)
punchedGeometries = rc.Geometry.Brep.CreatePlanarBreps(PGlzCrvs)
# in some cases glazing with very minor areas are generated
# which causes multiple surfaces
self.punchedGeometry = punchedGeometries[-1]
else:
# split the base geometry - Good luck!
splitBrep = self.geometry.Faces[0].Split(glzCrvs, sc.doc.ModelAbsoluteTolerance)
#splitBrep.Faces.ShrinkFaces()
for srfCount in range(splitBrep.Faces.Count):
surface = splitBrep.Faces.ExtractFace(srfCount)
edges = surface.DuplicateEdgeCurves(True)
joinedEdges = rc.Geometry.Curve.JoinCurves(edges)
if len(joinedEdges)>1:
self.punchedGeometry = surface
except Exception, e:
self.punchedGeometry = None
self.hasChild = False
self.childSrfs = []
print "Failed to calculate opaque part of the surface. " + \
"Glazing is removed from " + self.name
def getOpaqueArea(self):
if self.hasChild:
try:
return self.punchedGeometry.GetArea()
except:
self.calculatePunchedSurface()
return self.punchedGeometry.GetArea()
else:
return self.getTotalArea()
def getGlazingArea(self):
if self.hasChild:
glzArea = 0
for childSrf in self.childSrfs:
glzArea += childSrf.getTotalArea()
return glzArea
else:
return 0
def getWWR(self):
return self.getGlazingArea()/self.getTotalArea()
def removeAllChildSrfs(self):
self.childSrfs = []
self.hasChild = False
self.calculatePunchedSurface()
class hb_EPShdSurface(hb_EPSurface):
def __init__(self, surface, srfNumber, srfName):
hb_EPSurface.__init__(self, surface, srfNumber, srfName, self)
self.TransmittanceSCH = ''
self.isChild = False
self.hasChild = False
self.construction = 'Exterior Wall' # just added here to get the minimum surface to work
self.EPConstruction = 'Exterior Wall' # just added here to get the minimum surface to work
self.childSrfs = [self] # so I can use the same function as glazing to extract the points
self.type = 6
pass
def getSrfCenPtandNormal(self, surface):
# I'm not sure if we need this method
# I will remove this later
surface = surface.Faces[0]
u_domain = surface.Domain(0)
v_domain = surface.Domain(1)
centerU = (u_domain.Min + u_domain.Max)/2
centerV = (v_domain.Min + v_domain.Max)/2
centerPt = surface.PointAt(centerU, centerV)
normalVector = surface.NormalAt(centerU, centerV)
normalVector.Unitize()
return centerPt, normalVector
class hb_EPFenSurface(hb_EPSurface):
"""..."""
def __init__(self, surface, srfNumber, srfName, parentSurface, surafceType, punchedWall = None):
"""This function initiates the class for an EP surface.
surface: surface geometry as a Brep
srfNumber: a unique number that is only for this surface
srfName: the unique name for this surface
parentZone: class of the zone that this surface belongs to"""
hb_EPSurface.__init__(self, surface, srfNumber, srfName, parentSurface, surafceType)
self.blindsMaterial = ""
self.shadingControl = ""
self.shadingSchName = ""
if not self.isPlanar:
try:
self.parent.parent.hasNonplanarSrf = True
except:
# surface is not part of a zone yet.
pass
# calculate punchedWall
self.parent.punchedGeometry = punchedWall
self.shadingControlName = ''
self.frameName = ''
self.Multiplier = 1
self.BCObject = self.outdoorBCObject()
self.groundViewFactor = 'autocalculate'
self.isChild = True # is it really useful?
class hb_Hive(object):
class CopyClass(object):
pass
def addToHoneybeeHive(self, HBObjects, GHComponentID):
# check if the honeybeedictionary already existed
# if not create the dictionary
# eventually this should be generated as soon as they user let the bee fly
if not sc.sticky.has_key('HBHive'): sc.sticky['HBHive'] = {}
geometries = []
childGeometries = []
for HBObject in HBObjects:
key = GHComponentID + HBObject.name
sc.sticky['HBHive'][key] = HBObject
# assuming that all the HBOBjects has a geometry! I assume they do
try:
if HBObject.objectType != "HBZone" and HBObject.hasChild:
if HBObject.punchedGeometry == None:
HBObject.calculatePunchedSurface()
geo = HBObject.punchedGeometry.Duplicate()
geometry = geo.Duplicate()
for childObject in HBObject.childSrfs:
# for now I only return the childs as geometries and not objects
# it could cause some confusion for the users that I will try to
# address later
childGeometries.append(childObject.geometry.Duplicate())
# join geometries into a single surface
geometry = rc.Geometry.Brep.JoinBreps([geometry] + childGeometries, sc.doc.ModelAbsoluteTolerance)[0]
elif HBObject.objectType == "HBZone":
geo = HBObject.geometry
geometry = geo.Duplicate()
srfs = []
zoneHasChildSrf = False
for HBSrf in HBObject.surfaces:
if HBSrf.hasChild:
zoneHasChildSrf = True
srfs.append(HBSrf.punchedGeometry.Duplicate())
for childObject in HBSrf.childSrfs:
# for now I only return the childs as geometries and not objects
# it could cause some confusion for the users that I will try to
# address later
srfs.append(childObject.geometry.Duplicate())
else:
srfs.append(HBSrf.geometry.Duplicate())
if zoneHasChildSrf:
geometry = rc.Geometry.Brep.JoinBreps(srfs, sc.doc.ModelAbsoluteTolerance)[0]
else:
geo = HBObject.geometry
geometry = geo.Duplicate()
geometry.UserDictionary.Set('HBID', key)
geometries.append(geometry)
except Exception, e:
print "Reached the maximum array size for UserDictionary: " + `e`
# return geometry with the ID
return geometries
def callFromHoneybeeHive(self, geometryList):
HBObjects = []
for geometry in geometryList:
try:
key = geometry.UserDictionary['HBID']
if sc.sticky['HBHive'].has_key(key):
try:
HBObject = sc.sticky['HBHive'][key]
# after the first round meshedFace makes copy.deepcopy crash
# so I need to regenerate meshFaces
if HBObject.objectType == "HBZone":
for surface in HBObject.surfaces:
newMesh = rc.Geometry.Mesh()
newMesh.Append(surface.meshedFace)
surface.meshedFace = newMesh
elif HBObject.objectType == "HBSurface":
newMesh = rc.Geometry.Mesh()
newMesh.Append(HBObject.meshedFace)
HBObject.meshedFace = newMesh
HBObjects.append(copy.deepcopy(HBObject))
except Exception, e:
print `e`
print "Failed to copy the object. Returning the original objects...\n" +\
"This can cause strange behaviour!"
HBObjects.append(sc.sticky['HBHive'][key])
except:
pass
return HBObjects
class hb_RADParameters(object):
def __init__(self):
self.radParDict = {
"_ab_": [2, 3, 6],
"_ad_": [512, 2048, 4096],
"_as_": [128, 2048, 4096],
"_ar_": [16, 64, 128],
"_aa_": [.25, .2, .1],
"_ps_": [8, 4, 2],
"_pt_": [.15, .10, .05],
"_pj_": [.6, .9, .9],
"_dj_": [0, .5, .7],
"_ds_": [.5, .25, .05],
"_dt_": [.5, .25, .15],
"_dc_": [.25, .5, .75],
"_dr_": [0, 1, 3],
"_dp_": [64, 256, 512],
"_st_": [.85, .5, .15],
"_lr_": [4, 6, 8],
"_lw_": [.05, .01, .005],
"_av_": [0, 0, 0],
"xScale": [1, 2, 6],
"yScale": [1, 2, 6]
}
class hb_DSParameters(object):
def __init__(self, outputUnits = [2], dynamicSHDGroup_1 = None, dynamicSHDGroup_2 = None, RhinoViewsName = [] , adaptiveZone = False, dgp_imageSize = 250, onlyRunGlareAnalysis = True):
if len(outputUnits)!=0 and outputUnits[0]!=None: self.outputUnits = outputUnits
else: self.outputUnits = [2]
self.onlyAnnualGlare = onlyRunGlareAnalysis
self.runAnnualGlare = False
self.RhinoViewsName = RhinoViewsName
if RhinoViewsName != []:
self.runAnnualGlare = True
if adaptiveZone == None: adaptiveZone = False
self.adaptiveZone = adaptiveZone
if not dgp_imageSize: dgp_imageSize = 250
self.dgp_imageSize = dgp_imageSize
if dynamicSHDGroup_1 == None and dynamicSHDGroup_2==None:
class dynamicSHDRecipe(object):
def __init__(self, type = 1, name = "no_blind"):
self.type = type
self.name = name
self.DShdR = [dynamicSHDRecipe(type = 1, name = "no_blind")]
else:
self.DShdR = []
if dynamicSHDGroup_1 != None: self.DShdR.append(dynamicSHDGroup_1)
if dynamicSHDGroup_2 != None: self.DShdR.append(dynamicSHDGroup_2)
# Number of ill files
self.numOfIll = 1
for shadingRecipe in self.DShdR:
if shadingRecipe.name == "no_blind":
pass
elif shadingRecipe.name == "conceptual_dynamic_shading":
self.numOfIll += 1
else:
# advanced dynamic shading
self.numOfIll += len(shadingRecipe.shadingStates) - 1
# print "number of ill files = " + str(self.numOfIll)
class CalculateGridBasedDLAnalysisResults(object):
"""
calculate results of any grid based analysis
analysisType: [0] illuminance, [1] radiation, [2] luminance, [3] daylight factor, [4] vertical sky component
"""
def __init__(self, resultFiles, analysisType):
self.analysisType = analysisType
self.resultFiles = resultFiles
def getResults(self):
resultValues = []
studyType= self.analysisType
for fileCount, resultFile in enumerate(self.resultFiles):
if studyType == 0 or studyType == 2:
#illuminance / luminance
resultValues.extend(self.readDLResult(resultFile))
elif studyType == 1:
# radiation
resultValues.extend(self.readRadiationResult(resultFile))
elif studyType == 3 or studyType == 4:
resultValues.extend(self.readDFResult(resultFile))
return resultValues
def readRadiationResult(self, resultFile):
result = []
resultFile = open(resultFile,"r")
for line in resultFile:
result.append(float(line.split(' ')[0]))
return result
def readDLResult(self, resultFile):
result = []
resultFile = open(resultFile,"r")
for line in resultFile:
R, G, B = line.split(' ')[0:3]
result.append(179*(.265 * float(R) + .67 * float(G) + .065 * float(B)))
return result
def readDFResult(self, resultFile):
result = []
resultFile = open(resultFile,"r")
for line in resultFile:
R, G, B = line.split(' ')[0:3]
# divide by the sky horizontal illuminance = 1000
res = 17900*(.265 * float(R) + .67 * float(G) + .065 * float(B))/1000
if res > 100: res = 100
result.append(res)
return result
class SerializeObjects(object):
def __init__(self, filePath, data = None):
self.filePath = filePath
self.data = data
def saveToFile(self):
with open(self.filePath, 'wb') as outf:
pickle.dump(self.data, outf)
def readFromFile(self):
with open(self.filePath, 'rb') as inf:
self.data = pickle.load(inf)
class hb_hwBoilerParams(object):
def __init__(self):
self.hwBoilerDict = {
'name':'honeybeeHotWaterBoiler',
'fueltype':1,
'nominalCapacity':'Autosize',
'sizingFactor':1.25,
'nominalEfficiency':0.80,
'designOutletTemperature':80,
'designWaterFlowRate':'Autosize',
'minPartLoad':0.15,
'maxPartLoadRatio':1.1,
'optimumPartLoadRatio':0.50,
'outletTempMaximum':95,
'boilerFlowMode':'NotModulated',
'parasiticElectricLoad':0,
'curveTemperatureVariable':'LeavingBoiler',
'Curves':None
}
class hb_airsideEconoParams(object):
def __init__(self):
self.airEconoDict = {
'name':'honeybeeDefaultEconomizer',
'econoControl':0,
'controlAction':0,
'maxAirFlowRate':'Autosize',
'minAirFlowRate':'Autosize',
'minLimitType':0,
'minOutdoorAirSchedule':'OpenStudio Default',
'minOutdoorAirFracSchedule':None,
'maxLimitDewpoint':None,
'sensedMin':12,
'sensedMax':22,
'DXLockoutMethod':None
}
class hb_constVolFanParams(object):
def __init__(self):
self.cvFanDict = {
'name':'honeybeeConstVolFan',
'type':0,
'fanEfficiency':0.6,
'pressureRise':892.9,
'maxFlowRate':'Autosize',
'motorEfficiency':0.825,
'airStreamHeatPct':100.0
}
class hb_varVolFanParams(object):
def __init__(self):
self.vvFanDict = {
'name':'honeybeeConstVolFan',
'type':1,
'fanEfficiency':0.6,
'pressureRise':892.9,
'maxFlowRate':'Autosize',
'motorEfficiency':0.825,
'airStreamHeatPct':100.0,
'minFlowFrac':0.2,
'fanPowerCoefficient1':0.04076,
'fanPowerCoefficient2':0.08804,
'fanPowerCoefficient3':-0.07292,
'fanPowerCoefficient4':0.94373,
'fanPowerCoefficient5':0.00000
}
class hb_AirHandlerParams(object):
def __init__(self):
self.airHandlerDict = {
'availSch':'OpenStudio Default',
'fanPlacement':'DrawThrough',
'coolingAirflow':'Autosize',
'coolingOAflow':'Autosize',
'heatingAirflow': 'Autosize',
'heatingOAflow': 'Autosize',
'floatingAirflow':'Autosize',
'floatingOAflow':'Autosize',
'constVolSupplyFanDef':hb_constVolFanParams,
'varVolSupplyFanDef':hb_varVolFanParams,
'airsideEconomizer':hb_airsideEconoParams,
'coolingCoil': None,
'heatingCoil': None,
'evaporativeCondenser': None
}
class hb_2xDXCoilParams(object):
def __init__(self):
self.twoSpeedDXDict = {
'name':'honeybee Default 2 Speed DX Coil',
'availSch':'OpenStudio Default',
'ratedHighSpeedAirflowRate':'Autosize',
'ratedHighSpeedTotalCooling':'Autosize',
'ratedHighSpeedSHR':'Autosize',
'ratedHighSpeedCOP':3.0,
'ratedLowSpeedAirflowRate':'Autosize',
'ratedLowSpeedTotalCooling':'Autosize',
'ratedLowSpeedSHR':'Autosize',
'ratedLowSpeedCOP':3.0,
'condenserType':'AirCooled',
'evaporativeCondenserDesc':None,
'Curves':None
}
class hb_2xDXHeatingCoilParams(object):
def __init__(self):
self.twoSpeedDXDict = {
'name':'honeybee Default 2 Speed DX Heating Coil',
'availSch':'OpenStudio Default',
'ratedHighSpeedAirflowRate':'Autosize',
'ratedHighSpeedTotalHeating':'Autosize',
'ratedHighSpeedCOP':4.0,
'ratedLowSpeedAirflowRate':'Autosize',
'ratedLowSpeedTotalCooling':'Autosize',
'ratedLowSpeedCOP':5.0,
'minOutdoorDryBulb':-8,
'outdoorDBDefrostEnabled': 5,
'outdoorDBCrankcase':10,
'crankcaseCapacity': 0,
'defrostStrategy':'reverse-cycle',
'defrostControl':'timed',
'resistiveDefrostCap':0,
'Curves': None
}
class hb_1xDXCoilParams(object):
def __init__(self):
self.oneSpeedDXDict = {
'name':'honeybee Default 1 Speed DX Coil',
'availSch':'OpenStudio Default',
'ratedAirflowRate':'Autosize',
'ratedTotalCooling':'Autosize',
'ratedSHR':'Autosize',
'ratedCOP':3.0,
'condenserType':'Air Cooled',
'evaporativeCondenserDesc':None,
'Curves':None
}
class hb_1xDXHeatingCoilParams(object):
def __init__(self):
self.oneSpeedDXDict = {
'name':'honeybee Default 1 speed DX Heating Coil',
'availSch':'OpenStudio Default',
'ratedAirflowRate':'Autosize',
'ratedTotalHeating':'Autosize',
'ratedCOP':3.0,
'minOutdoorDryBulb': -8,
'outdoorDBDefrostEnabled': 5,
'outdoorDBCrankcase':10,
'crankcaseCapacity': 0,
'defrostStrategy':'reverse-cycle',
'defrostControl':'timed',
'resistiveDefrostCap':0,
'Curves': None
}
class hb_lspeedEvapCondParams(object):
def __init__(self):
self.lspeedevapCond = {
'name':'honeybee default 1 speed DX condenser',
'serviceType':0,
'evapEffectiveness':0.9,
'evapCondAirflowRate':'Autosize',
'evapPumpPower':'Autosize',
'storageTank':None,
'curves':None
}
class hb_hspeedEvapCondParams(object):
def __init__(self):
self.hspeedevapCond = {
'name':'honeybee default 1 speed DX condenser',
'serviceType':0,
'evapEffectiveness':0.9,
'evapCondAirflowRate':'Autosize',
'evapPumpPower':'Autosize',
'hiEvapEffectiveness':0.9,
'hiEvapCondAirflowRate':'Autosize',
'hiEvapPumpPower':'Autosize',
'storageTank':None,
'curves':None
}
letItFly = True
def checkGHPythonVersion(target = "0.6.0.3"):
currentVersion = int(ghenv.Version.ToString().replace(".", ""))
targetVersion = int(target.replace(".", ""))
if targetVersion > currentVersion: return False
else: return True
try:
downloadTemplate = checkIn.checkForUpdates(LB= False, HB= True, OpenStudio = True, template = True)
except:
# no internet connection
downloadTemplate = False
GHPythonTargetVersion = "0.6.0.3"
if not checkGHPythonVersion(GHPythonTargetVersion):
msg = "Honeybee failed to fly! :(\n" + \
"You are using an old version of GHPython. " +\
"Please update to version: " + GHPythonTargetVersion
print msg
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
letItFly = False
sc.sticky["honeybee_release"] = False
if letItFly:
if not sc.sticky.has_key("honeybee_release") or True:
w = gh.GH_RuntimeMessageLevel.Warning
sc.sticky["honeybee_release"] = versionCheck()
folders = hb_findFolders()
sc.sticky["honeybee_folders"] = {}
if folders.RADPath == None:
if os.path.isdir("c:\\radiance\\bin\\"):
folders.RADPath = "c:\\radiance\\bin\\"
else:
msg= "Honeybee cannot find RADIANCE folder on your system.\n" + \
"Make sure you have RADIANCE installed on your system.\n" + \
"You won't be able to run daylighting studies without RADIANCE.\n" + \
"A good place to install RADIANCE is c:\\radiance"
ghenv.Component.AddRuntimeMessage(w, msg)
folders.RADPath = ""
if folders.RADPath.find(" ") > -1:
msg = "There is a white space in RADIANCE filepath: " + folders.RADPath + "\n" + \
"Please install RADIANCE in a valid address (e.g. c:\\radiance)"
ghenv.Component.AddRuntimeMessage(w, msg)
# I should replace this with python methods in os library
# looks stupid!
if folders.RADPath.endswith("\\"): segmentNumber = -2
else: segmentNumber = -1
hb_RADLibPath = "\\".join(folders.RADPath.split("\\")[:segmentNumber]) + "\\lib"
sc.sticky["honeybee_folders"]["RADPath"] = folders.RADPath
sc.sticky["honeybee_folders"]["RADLibPath"] = hb_RADLibPath
if folders.DSPath == None:
if os.path.isdir("c:\\daysim\\bin\\"):
folders.DSPath = "c:\\daysim\\bin\\"
else:
msg= "Honeybee cannot find DAYSIM folder on your system.\n" + \
"Make sure you have DAYISM installed on your system.\n" + \
"You won't be able to run annual climate-based daylighting studies without DAYSIM.\n" + \
"A good place to install DAYSIM is c:\\DAYSIM"
ghenv.Component.AddRuntimeMessage(w, msg)
folders.DSPath = ""
if folders.DSPath.find(" ") > -1:
msg = "There is a white space in DAYSIM filepath: " + folders.DSPath + "\n" + \
"Please install Daysism in a valid address (e.g. c:\\daysim)"
ghenv.Component.AddRuntimeMessage(w, msg)
if folders.DSPath.endswith("\\"): segmentNumber = -2
else: segmentNumber = -1
hb_DSCore = "\\".join(folders.DSPath.split("\\")[:segmentNumber])
hb_DSLibPath = "\\".join(folders.DSPath.split("\\")[:segmentNumber]) + "\\lib"
sc.sticky["honeybee_folders"]["DSPath"] = folders.DSPath
sc.sticky["honeybee_folders"]["DSCorePath"] = hb_DSCore
sc.sticky["honeybee_folders"]["DSLibPath"] = hb_DSLibPath
if folders.EPPath == None:
EPVersion = "V8-1-0"
if os.path.isdir("C:\EnergyPlus" + EPVersion + "\\"):
folders.EPPath = "C:\EnergyPlus" + EPVersion + "\\"
else:
msg= "Honeybee cannot find EnergyPlus" + EPVersion + " folder on your system.\n" + \
"Make sure you have EnergyPlus" + EPVersion + " installed on your system.\n" + \
"You won't be able to run energy simulations without EnergyPlus.\n" + \
"A good place to install EnergyPlus is c:\\EnergyPlus" + EPVersion
# I remove the warning for now until EP plugins are available
# It confuses the users
ghenv.Component.AddRuntimeMessage(w, msg)
folders.EPPath = "C:\EnergyPlus" + EPVersion + "\\"
sc.sticky["honeybee_folders"]["EPPath"] = folders.EPPath
sc.sticky["honeybee_RADMaterialAUX"] = RADMaterialAux
# set up radiance materials
sc.sticky["honeybee_RADMaterialAUX"](True)
# Download EP libraries
templateFilesPrep = PrepareTemplateEPLibFiles(downloadTemplate)
libFilePaths = templateFilesPrep.downloadTemplates()
if libFilePaths != -1:
EPLibs = HB_GetEPLibraries()
EPLibs.loadEPConstructionsAndMaterials(libFilePaths)
EPLibs.loadEPSchedules(libFilePaths)
else:
msg = "Failed to load EP constructions! You won't be able to run analysis with Honeybee!\n" + \
"Download the files from address below and copy them to: " + sc.sticky["Honeybee_DefaultFolder"] + \
"\nhttps://app.box.com/s/bh9sbpgajdtmmystv3n4"
print msg
ghenv.Component.AddRuntimeMessage(w, msg)
sc.sticky["honeybee_Hive"] = hb_Hive
sc.sticky["honeybee_GetEPLibs"] = HB_GetEPLibraries
sc.sticky["honeybee_DefaultMaterialLib"] = materialLibrary
sc.sticky["honeybee_DefaultScheduleLib"] = scheduleLibrary
sc.sticky["honeybee_DefaultSurfaceLib"] = EPSurfaceLib
sc.sticky["honeybee_EPMaterialAUX"] = EPMaterialAux
sc.sticky["honeybee_EPScheduleAUX"] = EPScheduleAux
sc.sticky["honeybee_EPObjectsAUX"] = EPObjectsAux
sc.sticky["honeybee_BuildingProgramsLib"] = BuildingProgramsLib
sc.sticky["honeybee_EPTypes"] = EPTypes()
sc.sticky["honeybee_EPZone"] = EPZone
sc.sticky["honeybee_reEvaluateHBZones"] = hb_reEvaluateHBZones
sc.sticky["honeybee_AirsideEconomizerParams"] = hb_airsideEconoParams
sc.sticky["honeybee_constantVolumeFanParams"] = hb_constVolFanParams
sc.sticky["honeybee_variableVolumeFanParams"] = hb_varVolFanParams
sc.sticky["honeybee_AirHandlerParams"] = hb_AirHandlerParams
sc.sticky["honeybee_2xDXCoilParams"] = hb_2xDXCoilParams
sc.sticky["honeybee_2xDXHeatingCoilParams"] = hb_2xDXHeatingCoilParams
sc.sticky["honeybee_1xDXCoilParams"] = hb_1xDXCoilParams
sc.sticky["honeybee_1xDXHeatingCoilParams"] = hb_1xDXHeatingCoilParams
sc.sticky["honeybee_lspeedevapcondParams"] = hb_lspeedEvapCondParams
sc.sticky["honeybee_hspeedevapcondParams"] = hb_hspeedEvapCondParams
sc.sticky["honeybee_hwBoilerParams"] = hb_hwBoilerParams
sc.sticky["honeybee_EPSurface"] = hb_EPSurface
sc.sticky["honeybee_EPShdSurface"] = hb_EPShdSurface
sc.sticky["honeybee_EPZoneSurface"] = hb_EPZoneSurface
sc.sticky["honeybee_EPFenSurface"] = hb_EPFenSurface
sc.sticky["honeybee_DLAnalysisRecipe"] = DLAnalysisRecipe
sc.sticky["honeybee_MeshToRAD"] = hb_MSHToRAD
sc.sticky["honeybee_WriteRAD"] = WriteRAD
sc.sticky["honeybee_WriteRADAUX"] = WriteRADAUX
sc.sticky["honeybee_WriteDS"] = WriteDS
sc.sticky["honeybee_RADParameters"] = hb_RADParameters
sc.sticky["honeybee_DSParameters"] = hb_DSParameters
sc.sticky["honeybee_EPParameters"] = hb_EnergySimulatioParameters
sc.sticky["honeybee_SerializeObjects"] = SerializeObjects
sc.sticky["honeybee_GridBasedDLResults"] = CalculateGridBasedDLAnalysisResults
sc.sticky["honeybee_DLAnalaysisTypes"] = {0: ["0: illuminance" , "lux"],
1: ["1: radiation" , "wh/m2"],
1.1: ["1.1: cumulative radiation" , "kWh/m2"],
2: ["2: luminance" , "cd/m2"],
3: ["3: DF", "%"],
4: ["4: VSC", "%"],
5: ["5: annual analysis", "var"]}
# done! sharing the happiness.
print "Hooohooho...Flying!!\nVviiiiiiizzz..."
|
[] |
[] |
[
"USERNAME",
"PATH"
] |
[]
|
["USERNAME", "PATH"]
|
python
| 2 | 0 | |
transport/nats/nats_test.go
|
package nats
import (
"os"
"strings"
"testing"
"github.com/geiqin/go-micro/server"
"github.com/geiqin/go-micro/transport"
"github.com/go-log/log"
"github.com/nats-io/nats.go"
)
var addrTestCases = []struct {
name string
description string
addrs map[string]string // expected address : set address
}{
{
"transportOption",
"set broker addresses through a transport.Option",
map[string]string{
"nats://192.168.10.1:5222": "192.168.10.1:5222",
"nats://10.20.10.0:4222": "10.20.10.0:4222"},
},
{
"natsOption",
"set broker addresses through the nats.Option",
map[string]string{
"nats://192.168.10.1:5222": "192.168.10.1:5222",
"nats://10.20.10.0:4222": "10.20.10.0:4222"},
},
{
"default",
"check if default Address is set correctly",
map[string]string{
"nats://127.0.0.1:4222": ""},
},
}
// This test will check if options (here nats addresses) set through either
// transport.Option or via nats.Option are successfully set.
func TestInitAddrs(t *testing.T) {
for _, tc := range addrTestCases {
t.Run(tc.name, func(t *testing.T) {
var tr transport.Transport
var addrs []string
for _, addr := range tc.addrs {
addrs = append(addrs, addr)
}
switch tc.name {
case "transportOption":
// we know that there are just two addrs in the dict
tr = NewTransport(transport.Addrs(addrs[0], addrs[1]))
case "natsOption":
nopts := nats.GetDefaultOptions()
nopts.Servers = addrs
tr = NewTransport(Options(nopts))
case "default":
tr = NewTransport()
}
ntport, ok := tr.(*ntport)
if !ok {
t.Fatal("Expected broker to be of types *nbroker")
}
// check if the same amount of addrs we set has actually been set
if len(ntport.addrs) != len(tc.addrs) {
t.Errorf("Expected Addr count = %d, Actual Addr count = %d",
len(ntport.addrs), len(tc.addrs))
}
for _, addr := range ntport.addrs {
_, ok := tc.addrs[addr]
if !ok {
t.Errorf("Expected '%s' has not been set", addr)
}
}
})
}
}
var listenAddrTestCases = []struct {
name string
address string
mustPass bool
}{
{"default address", server.DefaultAddress, true},
{"nats.NewInbox", nats.NewInbox(), true},
{"correct service name", "micro.test.myservice", true},
{"several space chars", "micro.test.my new service", false},
{"one space char", "micro.test.my oldservice", false},
{"empty", "", false},
}
func TestListenAddr(t *testing.T) {
natsURL := os.Getenv("NATS_URL")
if natsURL == "" {
log.Logf("NATS_URL is undefined - skipping tests")
return
}
for _, tc := range listenAddrTestCases {
t.Run(tc.address, func(t *testing.T) {
nOpts := nats.GetDefaultOptions()
nOpts.Servers = []string{natsURL}
nTport := ntport{
nopts: nOpts,
}
trListener, err := nTport.Listen(tc.address)
if err != nil {
if tc.mustPass {
t.Fatalf("%s (%s) is not allowed", tc.name, tc.address)
}
// correctly failed
return
}
if trListener.Addr() != tc.address {
//special case - since an always string will be returned
if tc.name == "default address" {
if strings.Contains(trListener.Addr(), "_INBOX.") {
return
}
}
t.Errorf("expected address %s but got %s", tc.address, trListener.Addr())
}
})
}
}
|
[
"\"NATS_URL\""
] |
[] |
[
"NATS_URL"
] |
[]
|
["NATS_URL"]
|
go
| 1 | 0 | |
examples/plot_geodesics_s2.py
|
"""
Plot a geodesic on the sphere S2
"""
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import geomstats.visualization as visualization
from geomstats.geometry.hypersphere import Hypersphere
SPHERE2 = Hypersphere(dimension=2)
METRIC = SPHERE2.metric
def main():
initial_point = [1., 0., 0.]
initial_tangent_vec = SPHERE2.projection_to_tangent_space(
vector=[1., 2., 0.8], base_point=initial_point)
geodesic = METRIC.geodesic(
initial_point=initial_point,
initial_tangent_vec=initial_tangent_vec)
n_steps = 10
t = np.linspace(0, 1, n_steps)
points = geodesic(t)
visualization.plot(points, space='S2')
plt.show()
if __name__ == "__main__":
if os.environ['GEOMSTATS_BACKEND'] == 'tensorflow':
logging.info('Examples with visualizations are only implemented '
'with numpy backend.\n'
'To change backend, write: '
'export GEOMSTATS_BACKEND = \'numpy\'.')
else:
main()
|
[] |
[] |
[
"GEOMSTATS_BACKEND"
] |
[]
|
["GEOMSTATS_BACKEND"]
|
python
| 1 | 0 | |
docs/conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# skorch documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 13 11:29:12 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# on_rtd is whether we are on readthedocs.org, this line of code grabbed
# from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'numpydoc',
'sphinx.ext.linkcode',
'sphinx.ext.autosummary',
'sphinx.ext.autosectionlabel',
'sphinx.ext.intersphinx',
]
numpydoc_class_members_toctree = False
intersphinx_mapping = {
'pytorch': ('https://pytorch.org/docs/stable/', None),
'sklearn': ('http://scikit-learn.org/stable/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'python': ('https://docs.python.org/3', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'skorch'
copyright = '2017, Marian Tietz, Daniel Nouri, Benjamin Bossan'
author = 'Marian Tietz, Daniel Nouri, Benjamin Bossan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '0.1'
# The full version, including alpha/beta/rc tags.
# release = '0.1'
with open('../VERSION', 'r') as f:
release = f.read().strip()
version = release.rsplit('.', 1)[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**tests**']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
def setup(app):
app.add_stylesheet('css/my_theme.css')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'skorchdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'skorch.tex', 'skorch Documentation',
'Marian Tietz, Daniel Nouri, Benjamin Bossan', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'skorch', 'skorch Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'skorch', 'skorch Documentation',
author, 'skorch', 'One line description of project.',
'Miscellaneous'),
]
# -- GitHub source code link ----------------------------------------------
# Functionality to build github source URI, taken from sklearn.
from operator import attrgetter
import inspect
import subprocess
from functools import partial
REVISION_CMD = 'git rev-parse --short HEAD'
def _get_git_revision():
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except (subprocess.CalledProcessError, OSError):
print('Failed to execute git to get revision')
return None
return revision.decode('utf-8')
def _linkcode_resolve(domain, info, package, url_fmt, revision):
"""Determine a link to online source for a class/method/function
This is called by sphinx.ext.linkcode
An example with a long-untouched module that everyone has
>>> _linkcode_resolve('py', {'module': 'tty',
... 'fullname': 'setraw'},
... package='tty',
... url_fmt='http://hg.python.org/cpython/file/'
... '{revision}/Lib/{package}/{path}#L{lineno}',
... revision='xxxx')
'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
"""
if revision is None:
return
if domain not in ('py', 'pyx'):
return
if not info.get('module') or not info.get('fullname'):
return
class_name = info['fullname'].split('.')[0]
if type(class_name) != str:
# Python 2 only
class_name = class_name.encode('utf-8')
module = __import__(info['module'], fromlist=[class_name])
obj = attrgetter(info['fullname'])(module)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return
fn = os.path.relpath(fn,
start=os.path.dirname(__import__(package).__file__))
try:
lineno = inspect.getsourcelines(obj)[1]
except Exception:
lineno = ''
return url_fmt.format(revision=revision, package=package,
path=fn, lineno=lineno)
def project_linkcode_resolve(domain, info):
global _linkcode_git_revision
return _linkcode_resolve(domain, info,
package='skorch',
revision=_linkcode_git_revision,
url_fmt='https://github.com/skorch-dev/skorch/'
'blob/{revision}/'
'{package}/{path}#L{lineno}')
_linkcode_git_revision = _get_git_revision()
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = project_linkcode_resolve
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
pkg/exporters/redisc.go
|
package exporters
import (
"context"
"os"
"time"
"github.com/piqba/wallertme/pkg/errors"
"github.com/piqba/wallertme/pkg/logger"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"github.com/go-redis/redis/v8"
)
const (
// TXS_STREAM_KEY ...
TXS_STREAM_KEY = "txs"
)
var (
// ErrRedisDbCheckConn ...
ErrRedisDbCheckConn = errors.NewError("Redis: Fail to check connection")
// ErrRedisXADDStreamID ...
ErrRedisXADDStreamID = errors.NewError("ERR The ID specified in XADD is equal or smaller than the target stream top item")
)
// GetRedisDbClient ...
func GetRedisDbClient(ctx context.Context) *redis.Client {
_, span := otel.Tracer(nameRedisClient).Start(ctx, "GetRedisDbClient")
defer span.End()
clientInstance := redis.NewClient(&redis.Options{
Addr: os.Getenv("REDIS_URI"),
Username: "",
Password: os.Getenv("REDIS_PASS"),
DB: 0,
DialTimeout: 60 * time.Second,
ReadTimeout: 60 * time.Second,
WriteTimeout: 60 * time.Second,
})
_, err := clientInstance.Ping(context.TODO()).Result()
if err != nil {
span.RecordError(err)
span.SetStatus(codes.Error, err.Error())
logger.LogError(ErrRedisDbCheckConn.Error())
}
span.SetAttributes(attribute.String("create.redis.client", "Success"))
return clientInstance
}
|
[
"\"REDIS_URI\"",
"\"REDIS_PASS\""
] |
[] |
[
"REDIS_PASS",
"REDIS_URI"
] |
[]
|
["REDIS_PASS", "REDIS_URI"]
|
go
| 2 | 0 | |
provider/provider.go
|
package provider
import (
"fmt"
"os"
"github.com/lob/rack/pkg/structs"
"github.com/lob/rack/provider/aws"
"github.com/lob/rack/provider/local"
)
var Mock = &structs.MockProvider{}
// FromEnv returns a new Provider from env vars
func FromEnv() (structs.Provider, error) {
return FromName(os.Getenv("PROVIDER"))
}
func FromName(name string) (structs.Provider, error) {
switch name {
case "aws":
return aws.FromEnv()
case "local":
return local.FromEnv()
case "test":
return Mock, nil
default:
return nil, fmt.Errorf("unknown provider: %s", name)
}
}
|
[
"\"PROVIDER\""
] |
[] |
[
"PROVIDER"
] |
[]
|
["PROVIDER"]
|
go
| 1 | 0 | |
pkg/vault/vault.go
|
// Copyright © 2018 Banzai Cloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package vault
import (
"errors"
"fmt"
"io/ioutil"
"os"
"runtime"
"strings"
"time"
"github.com/banzaicloud/bank-vaults/pkg/kv"
"github.com/hashicorp/vault/api"
"github.com/sirupsen/logrus"
"github.com/spf13/cast"
"github.com/spf13/viper"
)
// DefaultConfigFile is the name of the default config file
const DefaultConfigFile = "vault-config.yml"
// Config holds the configuration of the Vault initialization
type Config struct {
// how many key parts exist
SecretShares int
// how many of these parts are needed to unseal Vault (secretThreshold <= secretShares)
SecretThreshold int
// if this root token is set, the dynamic generated will be invalidated and this created instead
InitRootToken string
// should the root token be stored in the keyStore
StoreRootToken bool
}
// vault is an implementation of the Vault interface that will perform actions
// against a Vault server, using a provided KMS to retrieve
type vault struct {
keyStore kv.Service
cl *api.Client
config *Config
}
// Interface check
var _ Vault = &vault{}
// Vault is an interface that can be used to attempt to perform actions against
// a Vault server.
type Vault interface {
Init() error
Sealed() (bool, error)
Unseal() error
Leader() (bool, error)
Configure() error
StepDownActive(string) error
}
// New returns a new vault Vault, or an error.
func New(k kv.Service, cl *api.Client, config Config) (Vault, error) {
if config.SecretShares < config.SecretThreshold {
return nil, errors.New("the secret threshold can't be bigger than the shares")
}
return &vault{
keyStore: k,
cl: cl,
config: &config,
}, nil
}
func (v *vault) Sealed() (bool, error) {
resp, err := v.cl.Sys().SealStatus()
if err != nil {
return false, fmt.Errorf("error checking status: %s", err.Error())
}
return resp.Sealed, nil
}
func (v *vault) Leader() (bool, error) {
resp, err := v.cl.Sys().Leader()
if err != nil {
return false, fmt.Errorf("error checking leader: %s", err.Error())
}
return resp.IsSelf, nil
}
// Unseal will attempt to unseal vault by retrieving keys from the kms service
// and sending unseal requests to vault. It will return an error if retrieving
// a key fails, or if the unseal progress is reset to 0 (indicating that a key)
// was invalid.
func (v *vault) Unseal() error {
defer runtime.GC()
for i := 0; ; i++ {
keyID := v.unsealKeyForID(i)
logrus.Debugf("retrieving key from kms service...")
k, err := v.keyStore.Get(keyID)
if err != nil {
return fmt.Errorf("unable to get key '%s': %s", keyID, err.Error())
}
logrus.Debugf("sending unseal request to vault...")
resp, err := v.cl.Sys().Unseal(string(k))
if err != nil {
return fmt.Errorf("fail to send unseal request to vault: %s", err.Error())
}
logrus.Debugf("got unseal response: %+v", *resp)
if !resp.Sealed {
return nil
}
// if progress is 0, we failed to unseal vault.
if resp.Progress == 0 {
return fmt.Errorf("failed to unseal vault. progress reset to 0")
}
}
}
func (v *vault) keyStoreNotFound(key string) (bool, error) {
_, err := v.keyStore.Get(key)
if _, ok := err.(*kv.NotFoundError); ok {
return true, nil
}
return false, err
}
func (v *vault) keyStoreSet(key string, val []byte) error {
notFound, err := v.keyStoreNotFound(key)
if notFound {
return v.keyStore.Set(key, val)
} else if err == nil {
return fmt.Errorf("error setting key '%s': it already exists", key)
} else {
return fmt.Errorf("error setting key '%s': %s", key, err.Error())
}
}
// Init initializes Vault if is not initialized already
func (v *vault) Init() error {
initialized, err := v.cl.Sys().InitStatus()
if err != nil {
return fmt.Errorf("error testing if vault is initialized: %s", err.Error())
}
if initialized {
logrus.Info("vault is already initialized")
return nil
}
logrus.Info("initializing vault")
// test backend first
err = v.keyStore.Test(v.testKey())
if err != nil {
return fmt.Errorf("error testing keystore before init: %s", err.Error())
}
// test for an existing keys
keys := []string{
v.rootTokenKey(),
}
// add unseal keys
for i := 0; i <= v.config.SecretShares; i++ {
keys = append(keys, v.unsealKeyForID(i))
}
// test every key
for _, key := range keys {
notFound, err := v.keyStoreNotFound(key)
if notFound && err != nil {
return fmt.Errorf("error before init: checking key '%s' failed: %s", key, err.Error())
} else if !notFound && err == nil {
return fmt.Errorf("error before init: keystore value for '%s' already exists", key)
}
}
resp, err := v.cl.Sys().Init(&api.InitRequest{
SecretShares: v.config.SecretShares,
SecretThreshold: v.config.SecretThreshold,
})
if err != nil {
return fmt.Errorf("error initializing vault: %s", err.Error())
}
for i, k := range resp.Keys {
keyID := v.unsealKeyForID(i)
err := v.keyStoreSet(keyID, []byte(k))
if err != nil {
return fmt.Errorf("error storing unseal key '%s': %s", keyID, err.Error())
}
logrus.WithField("key", keyID).Info("unseal key stored in key store")
}
rootToken := resp.RootToken
// this sets up a predefined root token
if v.config.InitRootToken != "" {
logrus.Info("setting up init root token, waiting for vault to be unsealed")
count := 0
wait := time.Second * 2
for {
sealed, err := v.Sealed()
if !sealed {
break
}
if err == nil {
logrus.Info("vault still sealed, wait for unsealing")
} else {
logrus.Infof("vault not reachable: %s", err.Error())
}
count++
time.Sleep(wait)
}
// use temporary token
v.cl.SetToken(resp.RootToken)
// setup root token with provided key
_, err := v.cl.Auth().Token().CreateOrphan(&api.TokenCreateRequest{
ID: v.config.InitRootToken,
Policies: []string{"root"},
DisplayName: "root-token",
NoParent: true,
})
if err != nil {
return fmt.Errorf("unable to setup requested root token, (temporary root token: '%s'): %s", resp.RootToken, err)
}
// revoke the temporary token
err = v.cl.Auth().Token().RevokeSelf(resp.RootToken)
if err != nil {
return fmt.Errorf("unable to revoke temporary root token: %s", err.Error())
}
rootToken = v.config.InitRootToken
}
if v.config.StoreRootToken {
rootTokenKey := v.rootTokenKey()
if err = v.keyStoreSet(rootTokenKey, []byte(resp.RootToken)); err != nil {
return fmt.Errorf("error storing root token '%s' in key'%s'", rootToken, rootTokenKey)
}
logrus.WithField("key", rootTokenKey).Info("root token stored in key store")
} else if v.config.InitRootToken == "" {
logrus.WithField("root-token", resp.RootToken).Warnf("won't store root token in key store, this token grants full privileges to vault, so keep this secret")
}
return nil
}
func (v *vault) StepDownActive(address string) error {
logrus.Debugf("retrieving key from kms service...")
rootToken, err := v.keyStore.Get(v.rootTokenKey())
if err != nil {
return fmt.Errorf("unable to get key '%s': %s", v.rootTokenKey(), err.Error())
}
v.cl.SetToken(string(rootToken))
// Clear the token and GC it
defer runtime.GC()
defer v.cl.SetToken("")
defer func() { rootToken = nil }()
v.cl.SetAddress(address)
return v.cl.Sys().StepDown()
}
func (v *vault) Configure() error {
logrus.Debugf("retrieving key from kms service...")
rootToken, err := v.keyStore.Get(v.rootTokenKey())
if err != nil {
return fmt.Errorf("unable to get key '%s': %s", v.rootTokenKey(), err.Error())
}
v.cl.SetToken(string(rootToken))
// Clear the token and GC it
defer runtime.GC()
defer v.cl.SetToken("")
defer func() { rootToken = nil }()
existingAuths, err := v.cl.Sys().ListAuth()
if err != nil {
return fmt.Errorf("error listing auth backends vault: %s", err.Error())
}
authMethods := []map[string]interface{}{}
err = viper.UnmarshalKey("auth", &authMethods)
if err != nil {
return fmt.Errorf("error unmarshalling vault auth methods config: %s", err.Error())
}
for _, authMethod := range authMethods {
authMethodType, err := cast.ToStringE(authMethod["type"])
if err != nil {
return fmt.Errorf("error finding auth method type: %s", err.Error())
}
path := authMethodType
if pathOverwrite, ok := authMethod["path"]; ok {
path, err = cast.ToStringE(pathOverwrite)
if err != nil {
return fmt.Errorf("error converting path for auth method: %s", err.Error())
}
}
// Check and skip existing auth mounts
exists := false
if authMount, ok := existingAuths[path+"/"]; ok {
if authMount.Type == authMethodType {
logrus.Debugf("%s auth backend is already mounted in vault", authMethodType)
exists = true
}
}
if !exists {
logrus.Debugf("enabling %s auth backend in vault...", authMethodType)
// https://www.vaultproject.io/api/system/auth.html
options := api.EnableAuthOptions{
Type: authMethodType,
}
err := v.cl.Sys().EnableAuthWithOptions(path, &options)
if err != nil {
return fmt.Errorf("error enabling %s auth method for vault: %s", authMethodType, err.Error())
}
}
switch authMethodType {
case "kubernetes":
config, err := getOrDefaultStringMap(authMethod, "config")
if err != nil {
return fmt.Errorf("error finding config block for kubernetes: %s", err.Error())
}
defaultConfig, err := v.kubernetesAuthConfigDefault()
if err != nil {
return fmt.Errorf("error getting default kubernetes auth config for vault: %s", err.Error())
}
// merge the config blocks
for k, v := range config {
defaultConfig[k] = v
}
config = defaultConfig
err = v.kubernetesAuthConfig(path, config)
if err != nil {
return fmt.Errorf("error configuring kubernetes auth for vault: %s", err.Error())
}
roles := authMethod["roles"].([]interface{})
err = v.configureKubernetesRoles(roles)
if err != nil {
return fmt.Errorf("error configuring kubernetes auth roles for vault: %s", err.Error())
}
case "github":
config, err := cast.ToStringMapE(authMethod["config"])
if err != nil {
return fmt.Errorf("error finding config block for github: %s", err.Error())
}
err = v.configureGithubConfig(config)
if err != nil {
return fmt.Errorf("error configuring github auth for vault: %s", err.Error())
}
mappings, err := cast.ToStringMapE(authMethod["map"])
if err != nil {
return fmt.Errorf("error finding map block for github: %s", err.Error())
}
err = v.configureGithubMappings(mappings)
if err != nil {
return fmt.Errorf("error configuring github mappings for vault: %s", err.Error())
}
case "aws":
config, err := cast.ToStringMapE(authMethod["config"])
if err != nil {
return fmt.Errorf("error finding config block for aws: %s", err.Error())
}
err = v.configureAwsConfig(config)
if err != nil {
return fmt.Errorf("error configuring aws auth for vault: %s", err.Error())
}
if crossaccountroleRaw, ok := authMethod["crossaccountrole"]; ok {
crossaccountrole, err := cast.ToSliceE(crossaccountroleRaw)
if err != nil {
return fmt.Errorf("error finding crossaccountrole block for aws: %s", err.Error())
}
err = v.configureAWSCrossAccountRoles(crossaccountrole)
if err != nil {
return fmt.Errorf("error configuring aws auth cross account roles for vault: %s", err.Error())
}
}
roles, err := cast.ToSliceE(authMethod["roles"])
if err != nil {
return fmt.Errorf("error finding roles block for aws: %s", err.Error())
}
err = v.configureAwsRoles(roles)
if err != nil {
return fmt.Errorf("error configuring aws auth roles for vault: %s", err.Error())
}
case "ldap":
config, err := cast.ToStringMapE(authMethod["config"])
if err != nil {
return fmt.Errorf("error finding config block for ldap: %s", err.Error())
}
err = v.configureLdapConfig(config)
if err != nil {
return fmt.Errorf("error configuring ldap auth for vault: %s", err.Error())
}
if groupsRaw, ok := authMethod["groups"]; ok {
groups, err := cast.ToStringMapE(groupsRaw)
if err != nil {
return fmt.Errorf("error finding groups block for ldap: %s", err.Error())
}
err = v.configureLdapMappings("groups", groups)
if err != nil {
return fmt.Errorf("error configuring ldap groups for vault: %s", err.Error())
}
}
if usersRaw, ok := authMethod["users"]; ok {
users, err := cast.ToStringMapE(usersRaw)
if err != nil {
return fmt.Errorf("error finding users block for ldap: %s", err.Error())
}
err = v.configureLdapMappings("users", users)
if err != nil {
return fmt.Errorf("error configuring ldap users for vault: %s", err.Error())
}
}
}
}
err = v.configurePolicies()
if err != nil {
return fmt.Errorf("error configuring policies for vault: %s", err.Error())
}
err = v.configureSecretEngines()
if err != nil {
return fmt.Errorf("error configuring secret engines for vault: %s", err.Error())
}
return err
}
func (*vault) unsealKeyForID(i int) string {
return fmt.Sprint("vault-unseal-", i)
}
func (*vault) rootTokenKey() string {
return fmt.Sprint("vault-root")
}
func (*vault) testKey() string {
return fmt.Sprint("vault-test")
}
func (v *vault) kubernetesAuthConfigDefault() (map[string]interface{}, error) {
kubernetesCACert, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt")
if err != nil {
return nil, err
}
tokenReviewerJWT, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token")
if err != nil {
return nil, err
}
config := map[string]interface{}{
"kubernetes_host": fmt.Sprint("https://", os.Getenv("KUBERNETES_SERVICE_HOST")),
"kubernetes_ca_cert": string(kubernetesCACert),
"token_reviewer_jwt": string(tokenReviewerJWT),
}
return config, err
}
func (v *vault) kubernetesAuthConfig(path string, config map[string]interface{}) error {
_, err := v.cl.Logical().Write(fmt.Sprintf("auth/%s/config", path), config)
if err != nil {
return fmt.Errorf("error putting %s kubernetes config into vault: %s", config, err.Error())
}
return nil
}
func (v *vault) configurePolicies() error {
policies := []map[string]string{}
err := viper.UnmarshalKey("policies", &policies)
if err != nil {
return fmt.Errorf("error unmarshalling vault policy config: %s", err.Error())
}
for _, policy := range policies {
err := v.cl.Sys().PutPolicy(policy["name"], policy["rules"])
if err != nil {
return fmt.Errorf("error putting %s policy into vault: %s", policy["name"], err.Error())
}
}
return nil
}
func (v *vault) configureKubernetesRoles(roles []interface{}) error {
for _, roleInterface := range roles {
role, err := cast.ToStringMapE(roleInterface)
if err != nil {
return fmt.Errorf("error converting role for kubernetes: %s", err.Error())
}
_, err = v.cl.Logical().Write(fmt.Sprint("auth/kubernetes/role/", role["name"]), role)
if err != nil {
return fmt.Errorf("error putting %s kubernetes role into vault: %s", role["name"], err.Error())
}
}
return nil
}
func (v *vault) configureGithubConfig(config map[string]interface{}) error {
// https://www.vaultproject.io/api/auth/github/index.html
_, err := v.cl.Logical().Write("auth/github/config", config)
if err != nil {
return fmt.Errorf("error putting %s github config into vault: %s", config, err.Error())
}
return nil
}
func (v *vault) configureGithubMappings(mappings map[string]interface{}) error {
for mappingType, mapping := range mappings {
mapping, err := cast.ToStringMapStringE(mapping)
if err != nil {
return fmt.Errorf("error converting mapping for github: %s", err.Error())
}
for userOrTeam, policy := range mapping {
_, err := v.cl.Logical().Write(fmt.Sprintf("auth/github/map/%s/%s", mappingType, userOrTeam), map[string]interface{}{"value": policy})
if err != nil {
return fmt.Errorf("error putting %s github mapping into vault: %s", mappingType, err.Error())
}
}
}
return nil
}
func (v *vault) configureAwsConfig(config map[string]interface{}) error {
// https://www.vaultproject.io/api/auth/aws/index.html
_, err := v.cl.Logical().Write("auth/aws/config/client", config)
if err != nil {
return fmt.Errorf("error putting %s aws config into vault: %s", config, err.Error())
}
return nil
}
func (v *vault) configureAwsRoles(roles []interface{}) error {
for _, roleInterface := range roles {
role, err := cast.ToStringMapE(roleInterface)
if err != nil {
return fmt.Errorf("error converting roles for aws: %s", err.Error())
}
_, err = v.cl.Logical().Write(fmt.Sprint("auth/aws/role/", role["name"]), role)
if err != nil {
return fmt.Errorf("error putting %s aws role into vault: %s", role["name"], err.Error())
}
}
return nil
}
func (v *vault) configureAWSCrossAccountRoles(crossAccountRoles []interface{}) error {
for _, roleInterface := range crossAccountRoles {
crossAccountRole, err := cast.ToStringMapE(roleInterface)
if err != nil {
return fmt.Errorf("error converting cross account aws roles for aws: %s", err.Error())
}
_, err = v.cl.Logical().Write(fmt.Sprint("auth/aws/config/sts/", crossAccountRole["sts_account"]), crossAccountRole)
if err != nil {
return fmt.Errorf("error putting %s cross account aws role into vault: %s", crossAccountRole["sts_account"], err.Error())
}
}
return nil
}
func (v *vault) configureLdapConfig(config map[string]interface{}) error {
// https://www.vaultproject.io/api/auth/ldap/index.html
_, err := v.cl.Logical().Write("auth/ldap/config", config)
if err != nil {
return fmt.Errorf("error putting %s ldap config into vault: %s", config, err.Error())
}
return nil
}
func (v *vault) configureLdapMappings(mappingType string, mappings map[string]interface{}) error {
for userOrGroup, policy := range mappings {
mapping, err := cast.ToStringMapE(policy)
if err != nil {
return fmt.Errorf("error converting mapping for ldap: %s", err.Error())
}
_, err = v.cl.Logical().Write(fmt.Sprintf("auth/ldap/%s/%s", mappingType, userOrGroup), mapping)
if err != nil {
return fmt.Errorf("error putting %s ldap mapping into vault: %s", mappingType, err.Error())
}
}
return nil
}
func (v *vault) configureSecretEngines() error {
secretsEngines := []map[string]interface{}{}
err := viper.UnmarshalKey("secrets", &secretsEngines)
if err != nil {
return fmt.Errorf("error unmarshalling vault secrets config: %s", err.Error())
}
for _, secretEngine := range secretsEngines {
secretEngineType, err := cast.ToStringE(secretEngine["type"])
if err != nil {
return fmt.Errorf("error finding type for secret engine: %s", err.Error())
}
path := secretEngineType
if pathOverwrite, ok := secretEngine["path"]; ok {
path, err = cast.ToStringE(pathOverwrite)
if err != nil {
return fmt.Errorf("error converting path for secret engine: %s", err.Error())
}
}
mounts, err := v.cl.Sys().ListMounts()
if err != nil {
return fmt.Errorf("error reading mounts from vault: %s", err.Error())
}
logrus.Debugf("Already existing mounts: %#v\n", mounts)
if mounts[path+"/"] == nil {
description, err := getOrDefault(secretEngine, "description")
if err != nil {
return fmt.Errorf("error getting description for secret engine: %s", err.Error())
}
pluginName, err := getOrDefault(secretEngine, "plugin_name")
if err != nil {
return fmt.Errorf("error getting plugin_name for secret engine: %s", err.Error())
}
options, err := getOrDefaultStringMapString(secretEngine, "options")
if err != nil {
return fmt.Errorf("error getting options for secret engine: %s", err.Error())
}
input := api.MountInput{
Type: secretEngineType,
Description: description,
PluginName: pluginName,
Options: options,
}
logrus.Infof("Mounting secret engine with input: %#v\n", input)
err = v.cl.Sys().Mount(path, &input)
if err != nil {
return fmt.Errorf("error mounting %s into vault: %s", path, err.Error())
}
logrus.Infoln("mounted", secretEngineType, "to", path)
} else {
options, err := getOrDefaultStringMapString(secretEngine, "options")
if err != nil {
return fmt.Errorf("error getting options for secret engine: %s", err.Error())
}
input := api.MountConfigInput{
Options: options,
}
err = v.cl.Sys().TuneMount(path, input)
if err != nil {
return fmt.Errorf("error tuning %s in vault: %s", path, err.Error())
}
}
// Configuration of the Secret Engine in a very generic manner, YAML config file should have the proper format
configuration, err := getOrDefaultStringMap(secretEngine, "configuration")
if err != nil {
return fmt.Errorf("error getting configuration for secret engine: %s", err.Error())
}
for configOption, configData := range configuration {
configData, err := cast.ToSliceE(configData)
if err != nil {
return fmt.Errorf("error converting config data for secret engine: %s", err.Error())
}
for _, subConfigData := range configData {
subConfigData, err := cast.ToStringMapE(subConfigData)
if err != nil {
return fmt.Errorf("error converting sub config data for secret engine: %s", err.Error())
}
name, ok := subConfigData["name"]
if !ok {
return fmt.Errorf("error finding sub config data name for secret engine")
}
configPath := fmt.Sprintf("%s/%s/%s", path, configOption, name)
_, err = v.cl.Logical().Write(configPath, subConfigData)
if err != nil {
if isOverwriteProhibitedError(err) {
logrus.Debugln("Can't reconfigure", configPath, "please delete it manually")
continue
}
return fmt.Errorf("error putting %+v -> %s config into vault: %s", configData, configPath, err.Error())
}
}
}
}
return nil
}
func getOrDefault(m map[string]interface{}, key string) (string, error) {
value := m[key]
if value != nil {
return cast.ToStringE(value)
}
return "", nil
}
func getOrDefaultStringMapString(m map[string]interface{}, key string) (map[string]string, error) {
value := m[key]
if value != nil {
return cast.ToStringMapStringE(value)
}
return map[string]string{}, nil
}
func getOrDefaultStringMap(m map[string]interface{}, key string) (map[string]interface{}, error) {
value := m[key]
if value != nil {
return cast.ToStringMapE(value)
}
return map[string]interface{}{}, nil
}
func isOverwriteProhibitedError(err error) bool {
return strings.Contains(err.Error(), "delete them before reconfiguring")
}
|
[
"\"KUBERNETES_SERVICE_HOST\""
] |
[] |
[
"KUBERNETES_SERVICE_HOST"
] |
[]
|
["KUBERNETES_SERVICE_HOST"]
|
go
| 1 | 0 | |
example/main.go
|
package main
import (
"fmt"
"log"
"net/http"
"os"
"github.com/mtraver/gaelog"
)
// wrappedHandler must be wrapped using gaelog.Wrap or gaelog.WrapWithID so that the
// request context can be used with the package-level logging functions.
type wrappedHandler struct{}
func (h wrappedHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
ctx := r.Context()
gaelog.Debugf(ctx, "Debug")
gaelog.Infof(ctx, "Info")
gaelog.Noticef(ctx, "Notice")
gaelog.Warningf(ctx, "Warning")
gaelog.Errorf(ctx, "Error")
gaelog.Criticalf(ctx, "Critical")
gaelog.Alertf(ctx, "Alert")
gaelog.Emergencyf(ctx, "Emergency")
message := struct {
Places []string
}{
[]string{"Kings Canyon", "Sequoia", "Yosemite", "Death Valley"},
}
gaelog.Info(ctx, message)
fmt.Fprintf(w, "Hello!")
}
// manualHandler creates and closes a logger manually. This usage does not require
// gaelog.Wrap or gaelog.WrapWithID.
func manualHandler(w http.ResponseWriter, r *http.Request) {
lg, err := gaelog.New(r)
if err != nil {
// The returned logger is valid despite the error. It falls back to logging
// via the standard library's "log" package.
lg.Errorf("Failed to make logger: %v", err)
}
defer lg.Close()
lg.Warningf("Some important info right here, that's for sure")
fmt.Fprintf(w, "Hello!")
}
func main() {
// Wrap the handler.
http.Handle("/", gaelog.Wrap(wrappedHandler{}))
http.HandleFunc("/manual", manualHandler)
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), nil))
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
config/backend_test.go
|
/*
* Copyright © 2015-2018 Aeneas Rekkas <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @author Aeneas Rekkas <[email protected]>
* @copyright 2015-2018 Aeneas Rekkas <[email protected]>
* @license Apache-2.0
*/
package config
import (
"flag"
"fmt"
"log"
"os"
"strings"
"testing"
"time"
"github.com/ory/fosite"
"github.com/ory/hydra/client"
"github.com/ory/hydra/jwk"
"github.com/ory/hydra/pkg"
"github.com/sirupsen/logrus"
)
type testCase struct {
name string
b BackendConnector
u string
}
var (
tests []testCase = []testCase{
{
"memory",
&MemoryBackend{},
"memory",
},
}
l = logrus.New()
hasher = &fosite.BCrypt{WorkFactor: 8}
encryptionKey, _ = jwk.RandomBytes(32)
cipher = &jwk.AEAD{Key: encryptionKey}
)
func TestMain(m *testing.M) {
flag.Parse()
if !testing.Short() {
if uri := os.Getenv("TEST_DATABASE_POSTGRESQL"); uri != "" {
tests = append(tests, testCase{"postgresql", &SQLBackend{}, uri})
} else {
log.Println("Did not find postgresql test database config, skipping backend connector test")
}
if uri := os.Getenv("TEST_DATABASE_MYSQL"); uri != "" {
if !strings.HasPrefix(uri, "mysql") {
uri = fmt.Sprintf("mysql://%s", uri)
}
tests = append(tests, testCase{"mysql", &SQLBackend{}, uri})
} else {
log.Println("Did not find mysql test database config, skipping backend connector test")
}
}
os.Exit(m.Run())
}
func TestBackendConnectors(t *testing.T) {
for _, tc := range tests {
var cm client.Manager
var fs pkg.FositeStorer
t.Run(fmt.Sprintf("%s/Init", tc.name), func(t *testing.T) {
if err := tc.b.Init(tc.u, l); err != nil {
t.Fatalf("could not initialize backend due to error: %v", err)
}
})
t.Run(fmt.Sprintf("%s/Ping", tc.name), func(t *testing.T) {
if err := tc.b.Ping(); err != nil {
t.Errorf("could not ping backend due to error: %v", err)
}
})
t.Run(fmt.Sprintf("%s/NewClientManager", tc.name), func(t *testing.T) {
if cm = tc.b.NewClientManager(hasher); cm == nil {
t.Errorf("expected non-nil result")
}
})
t.Run(fmt.Sprintf("%s/NewOAuth2Manager", tc.name), func(t *testing.T) {
if fs = tc.b.NewOAuth2Manager(cm, time.Hour, "opaque"); fs == nil {
t.Errorf("expected non-nil result")
}
})
t.Run(fmt.Sprintf("%s/NewConsentManager", tc.name), func(t *testing.T) {
if want := tc.b.NewConsentManager(cm, fs); want == nil {
t.Errorf("expected non-nil result")
}
})
t.Run(fmt.Sprintf("%s/NewJWKManager", tc.name), func(t *testing.T) {
if want := tc.b.NewJWKManager(cipher); want == nil {
t.Errorf("expected non-nil result")
}
})
t.Run(fmt.Sprintf("%s/Prefixes", tc.name), func(t *testing.T) {
prefixes := tc.b.Prefixes()
for _, prefix := range prefixes {
if strings.HasPrefix(tc.u, prefix) {
return
}
}
t.Errorf("did not find matching prefix for given backend uri")
})
}
}
|
[
"\"TEST_DATABASE_POSTGRESQL\"",
"\"TEST_DATABASE_MYSQL\""
] |
[] |
[
"TEST_DATABASE_POSTGRESQL",
"TEST_DATABASE_MYSQL"
] |
[]
|
["TEST_DATABASE_POSTGRESQL", "TEST_DATABASE_MYSQL"]
|
go
| 2 | 0 | |
dump/python3.6/index.py
|
import os
import sys
import subprocess
import json
def handler(event, context):
bucket = os.environ['BUCKET']
subprocess.call(['sh', '-c', f'lambda-dump -bucket {bucket} -key fs/__ARCH__/python3.6.tgz'])
info = {
'sys.executable': sys.executable,
'sys.argv': sys.argv,
'sys.path': sys.path,
'os.getcwd': os.getcwd(),
'__file__': __file__,
'os.environ': {k: str(v) for k, v in os.environ.items()},
'context': {k: str(v) for k, v in context.__dict__.items()},
}
print(json.dumps(info, indent=2))
return {}
|
[] |
[] |
[
"BUCKET"
] |
[]
|
["BUCKET"]
|
python
| 1 | 0 | |
zbar/test_package/conanfile.py
|
from conans import ConanFile, CMake
import os
channel = os.getenv("CONAN_CHANNEL", "testing")
username = os.getenv("CONAN_USERNAME", "popescu-af")
class ZbarTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = "zbar/0.10.0@%s/%s" % (username, channel)
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure(source_dir=self.conanfile_directory, build_dir="./")
cmake.build()
def imports(self):
self.copy("*.dll", "bin", "lib")
self.copy("*.dylib", "bin", "lib")
def test(self):
os.chdir("bin")
self.run(".%szbar_test" % os.sep)
|
[] |
[] |
[
"CONAN_USERNAME",
"CONAN_CHANNEL"
] |
[]
|
["CONAN_USERNAME", "CONAN_CHANNEL"]
|
python
| 2 | 0 | |
fileserver.go
|
package main
import (
"bufio"
"bytes"
"errors"
"flag"
"fmt"
"image"
"image/jpeg"
"image/png"
"io"
"io/ioutil"
slog "log"
random "math/rand"
"mime/multipart"
"net/http"
_ "net/http/pprof"
"net/smtp"
"net/url"
"os"
"os/signal"
"path"
"path/filepath"
"regexp"
"runtime"
"runtime/debug"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"unsafe"
"github.com/astaxie/beego/httplib"
"github.com/deckarep/golang-set"
_ "github.com/eventials/go-tus"
"github.com/json-iterator/go"
"github.com/nfnt/resize"
"github.com/sjqzhang/googleAuthenticator"
"github.com/sjqzhang/goutil"
log "github.com/sjqzhang/seelog"
"github.com/sjqzhang/tusd"
"github.com/sjqzhang/tusd/filestore"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
var staticHandler http.Handler
var json = jsoniter.ConfigCompatibleWithStandardLibrary
var server *Server
var logacc log.LoggerInterface
var FOLDERS = []string{DATA_DIR, STORE_DIR, CONF_DIR, STATIC_DIR}
var CONST_QUEUE_SIZE = 10000
var (
VERSION string
BUILD_TIME string
GO_VERSION string
GIT_VERSION string
v = flag.Bool("v", false, "display version")
)
var (
FileName string
ptr unsafe.Pointer
DOCKER_DIR = ""
STORE_DIR = STORE_DIR_NAME
CONF_DIR = CONF_DIR_NAME
LOG_DIR = LOG_DIR_NAME
DATA_DIR = DATA_DIR_NAME
STATIC_DIR = STATIC_DIR_NAME
LARGE_DIR_NAME = "haystack"
LARGE_DIR = STORE_DIR + "/haystack"
CONST_LEVELDB_FILE_NAME = DATA_DIR + "/fileserver.db"
CONST_LOG_LEVELDB_FILE_NAME = DATA_DIR + "/log.db"
CONST_STAT_FILE_NAME = DATA_DIR + "/stat.json"
CONST_CONF_FILE_NAME = CONF_DIR + "/cfg.json"
CONST_SEARCH_FILE_NAME = DATA_DIR + "/search.txt"
logConfigStr = `
<seelog type="asynctimer" asyncinterval="1000" minlevel="trace" maxlevel="error">
<outputs formatid="common">
<buffered formatid="common" size="1048576" flushperiod="1000">
<rollingfile type="size" filename="{DOCKER_DIR}log/fileserver.log" maxsize="104857600" maxrolls="10"/>
</buffered>
</outputs>
<formats>
<format id="common" format="%Date %Time [%LEV] [%File:%Line] [%Func] %Msg%n" />
</formats>
</seelog>
`
logAccessConfigStr = `
<seelog type="asynctimer" asyncinterval="1000" minlevel="trace" maxlevel="error">
<outputs formatid="common">
<buffered formatid="common" size="1048576" flushperiod="1000">
<rollingfile type="size" filename="{DOCKER_DIR}log/access.log" maxsize="104857600" maxrolls="10"/>
</buffered>
</outputs>
<formats>
<format id="common" format="%Date %Time [%LEV] [%File:%Line] [%Func] %Msg%n" />
</formats>
</seelog>
`
)
const (
STORE_DIR_NAME = "files"
LOG_DIR_NAME = "log"
DATA_DIR_NAME = "data"
CONF_DIR_NAME = "conf"
STATIC_DIR_NAME = "static"
CONST_STAT_FILE_COUNT_KEY = "fileCount"
CONST_BIG_UPLOAD_PATH_SUFFIX = "/big/upload/"
CONST_STAT_FILE_TOTAL_SIZE_KEY = "totalSize"
CONST_Md5_ERROR_FILE_NAME = "errors.md5"
CONST_Md5_QUEUE_FILE_NAME = "queue.md5"
CONST_FILE_Md5_FILE_NAME = "files.md5"
CONST_REMOME_Md5_FILE_NAME = "removes.md5"
CONST_SMALL_FILE_SIZE = 1024 * 1024
CONST_MESSAGE_CLUSTER_IP = "Can only be called by the cluster ip or 127.0.0.1 or admin_ips(cfg.json),current ip:%s"
cfgJson = `{
"绑定端号": "端口",
"addr": ":8080",
"PeerID": "集群内唯一,请使用0-9的单字符,默认自动生成",
"peer_id": "%s",
"本主机地址": "本机http地址,默认自动生成(注意端口必须与addr中的端口一致),必段为内网,自动生成不为内网请自行修改,下同",
"host": "%s",
"集群": "集群列表,注意为了高可用,IP必须不能是同一个,同一不会自动备份,且不能为127.0.0.1,且必须为内网IP,默认自动生成",
"peers": ["%s"],
"组号": "用于区别不同的集群(上传或下载)与support_group_manage配合使用,带在下载路径中",
"group": "group1",
"是否支持按组(集群)管理,主要用途是Nginx支持多集群": "默认不支持,不支持时路径为http://10.1.5.4:8080/action,支持时为http://10.1.5.4:8080/group(配置中的group参数)/action,action为动作名,如status,delete,sync等",
"support_group_manage": false,
"是否合并小文件": "默认不合并,合并可以解决inode不够用的情况(当前对于小于1M文件)进行合并",
"enable_merge_small_file": false,
"允许后缀名": "允许可以上传的文件后缀名,如jpg,jpeg,png等。留空允许所有。",
"extensions": [],
"重试同步失败文件的时间": "单位秒",
"refresh_interval": 1800,
"是否自动重命名": "默认不自动重命名,使用原文件名",
"rename_file": false,
"是否支持web上传,方便调试": "默认支持web上传",
"enable_web_upload": true,
"是否支持非日期路径": "默认支持非日期路径,也即支持自定义路径,需要上传文件时指定path",
"enable_custom_path": true,
"下载域名": "用于外网下载文件的域名,不包含http://",
"download_domain": "",
"场景列表": "当设定后,用户指的场景必项在列表中,默认不做限制(注意:如果想开启场景认功能,格式如下:'场景名:googleauth_secret' 如 default:N7IET373HB2C5M6D ",
"scenes": [],
"默认场景": "默认default",
"default_scene": "default",
"是否显示目录": "默认显示,方便调试用,上线时请关闭",
"show_dir": true,
"邮件配置": "",
"mail": {
"user": "[email protected]",
"password": "abc",
"host": "smtp.163.com:25"
},
"告警接收邮件列表": "接收人数组",
"alarm_receivers": [],
"告警接收URL": "方法post,参数:subject,message",
"alarm_url": "",
"下载是否需带token": "真假",
"download_use_token": false,
"下载token过期时间": "单位秒",
"download_token_expire": 600,
"是否自动修复": "在超过1亿文件时出现性能问题,取消此选项,请手动按天同步,请查看FAQ",
"auto_repair": true,
"文件去重算法md5可能存在冲突,默认md5": "sha1|md5",
"file_sum_arithmetic": "md5",
"管理ip列表": "用于管理集的ip白名单,",
"admin_ips": ["127.0.0.1"],
"是否启用迁移": "默认不启用",
"enable_migrate": false,
"文件是否去重": "默认去重",
"enable_distinct_file": true,
"是否开启跨站访问": "默认开启",
"enable_cross_origin": true,
"是否开启Google认证,实现安全的上传、下载": "默认不开启",
"enable_google_auth": false,
"认证url": "当url不为空时生效,注意:普通上传中使用http参数 auth_token 作为认证参数, 在断点续传中通过HTTP头Upload-Metadata中的auth_token作为认证参数,认证流程参考认证架构图",
"auth_url": "",
"下载是否认证": "默认不认证(注意此选项是在auth_url不为空的情况下生效)",
"enable_download_auth": false,
"默认是否下载": "默认下载",
"default_download": true,
"本机是否只读": "默认可读可写",
"read_only": false,
"是否开启断点续传": "默认开启",
"enable_tus": true,
"同步单一文件超时时间(单位秒)": "默认为0,程序自动计算,在特殊情况下,自已设定",
"sync_timeout": 0
}
`
)
type Server struct {
ldb *leveldb.DB
logDB *leveldb.DB
util *goutil.Common
statMap *goutil.CommonMap
sumMap *goutil.CommonMap
queueToPeers chan FileInfo
queueFromPeers chan FileInfo
queueFileLog chan *FileLog
lockMap *goutil.CommonMap
sceneMap *goutil.CommonMap
searchMap *goutil.CommonMap
curDate string
host string
}
type FileInfo struct {
Name string `json:"name"`
ReName string `json:"rename"`
Path string `json:"path"`
Md5 string `json:"md5"`
Size int64 `json:"size"`
Peers []string `json:"peers"`
Scene string `json:"scene"`
TimeStamp int64 `json:"timeStamp"`
OffSet int64 `json:"offset"`
}
type FileLog struct {
FileInfo *FileInfo
FileName string
}
type JsonResult struct {
Message string `json:"message"`
Status string `json:"status"`
Data interface{} `json:"data"`
}
type FileResult struct {
Url string `json:"url"`
Md5 string `json:"md5"`
Path string `json:"path"`
Domain string `json:"domain"`
Scene string `json:"scene"`
Size int64 `json:"size"`
ModTime int64 `json:"mtime"`
//Just for Compatibility
Scenes string `json:"scenes"`
Retmsg string `json:"retmsg"`
Retcode int `json:"retcode"`
Src string `json:"src"`
}
type Mail struct {
User string `json:"user"`
Password string `json:"password"`
Host string `json:"host"`
}
type StatDateFileInfo struct {
Date string `json:"date"`
TotalSize int64 `json:"totalSize"`
FileCount int64 `json:"fileCount"`
}
type GloablConfig struct {
Addr string `json:"addr"`
Peers []string `json:"peers"`
Group string `json:"group"`
RenameFile bool `json:"rename_file"`
ShowDir bool `json:"show_dir"`
Extensions []string `json:"extensions"`
RefreshInterval int `json:"refresh_interval"`
EnableWebUpload bool `json:"enable_web_upload"`
DownloadDomain string `json:"download_domain"`
EnableCustomPath bool `json:"enable_custom_path"`
Scenes []string `json:"scenes"`
AlarmReceivers []string `json:"alarm_receivers"`
DefaultScene string `json:"default_scene"`
Mail Mail `json:"mail"`
AlarmUrl string `json:"alarm_url"`
DownloadUseToken bool `json:"download_use_token"`
DownloadTokenExpire int `json:"download_token_expire"`
QueueSize int `json:"queue_size"`
AutoRepair bool `json:"auto_repair"`
Host string `json:"host"`
FileSumArithmetic string `json:"file_sum_arithmetic"`
PeerId string `json:"peer_id"`
SupportGroupManage bool `json:"support_group_manage"`
AdminIps []string `json:"admin_ips"`
EnableMergeSmallFile bool `json:"enable_merge_small_file"`
EnableMigrate bool `json:"enable_migrate"`
EnableDistinctFile bool `json:"enable_distinct_file"`
ReadOnly bool `json:"read_only"`
EnableCrossOrigin bool `json:"enable_cross_origin"`
EnableGoogleAuth bool `json:"enable_google_auth"`
AuthUrl string `json:"auth_url"`
EnableDownloadAuth bool `json:"enable_download_auth"`
DefaultDownload bool `json:"default_download"`
EnableTus bool `json:"enable_tus"`
SyncTimeout int64 `json:"sync_timeout"`
}
type FileInfoResult struct {
Name string `json:"name"`
Md5 string `json:"md5"`
Path string `json:"path"`
Size int64 `json:"size"`
ModTime int64 `json:"mtime"`
IsDir bool `json:"is_dir"`
}
func NewServer() *Server {
var (
server *Server
err error
)
server = &Server{
util: &goutil.Common{},
statMap: goutil.NewCommonMap(0),
lockMap: goutil.NewCommonMap(0),
sceneMap: goutil.NewCommonMap(0),
searchMap: goutil.NewCommonMap(0),
queueToPeers: make(chan FileInfo, CONST_QUEUE_SIZE),
queueFromPeers: make(chan FileInfo, CONST_QUEUE_SIZE),
queueFileLog: make(chan *FileLog, CONST_QUEUE_SIZE),
sumMap: goutil.NewCommonMap(365 * 3),
}
defaultTransport := &http.Transport{
DisableKeepAlives: true,
Dial: httplib.TimeoutDialer(time.Second*6, time.Second*300),
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
}
settins := httplib.BeegoHTTPSettings{
UserAgent: "Go-FastDFS",
ConnectTimeout: 10 * time.Second,
ReadWriteTimeout: 10 * time.Second,
Gzip: true,
DumpBody: true,
Transport: defaultTransport,
}
httplib.SetDefaultSetting(settins)
server.statMap.Put(CONST_STAT_FILE_COUNT_KEY, int64(0))
server.statMap.Put(CONST_STAT_FILE_TOTAL_SIZE_KEY, int64(0))
server.statMap.Put(server.util.GetToDay()+"_"+CONST_STAT_FILE_COUNT_KEY, int64(0))
server.statMap.Put(server.util.GetToDay()+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, int64(0))
server.curDate = server.util.GetToDay()
opts := &opt.Options{
CompactionTableSize: 1024 * 1024 * 20,
WriteBuffer: 1024 * 1024 * 20,
}
server.ldb, err = leveldb.OpenFile(CONST_LEVELDB_FILE_NAME, opts)
if err != nil {
fmt.Println(err)
log.Error(err)
panic(err)
}
server.logDB, err = leveldb.OpenFile(CONST_LOG_LEVELDB_FILE_NAME, opts)
if err != nil {
fmt.Println(err)
log.Error(err)
panic(err)
}
return server
}
func Config() *GloablConfig {
return (*GloablConfig)(atomic.LoadPointer(&ptr))
}
func ParseConfig(filePath string) {
var (
data []byte
)
if filePath == "" {
data = []byte(strings.TrimSpace(cfgJson))
} else {
file, err := os.Open(filePath)
if err != nil {
panic(fmt.Sprintln("open file path:", filePath, "error:", err))
}
defer file.Close()
FileName = filePath
data, err = ioutil.ReadAll(file)
if err != nil {
panic(fmt.Sprintln("file path:", filePath, " read all error:", err))
}
}
var c GloablConfig
if err := json.Unmarshal(data, &c); err != nil {
panic(fmt.Sprintln("file path:", filePath, "json unmarshal error:", err))
}
log.Info(c)
atomic.StorePointer(&ptr, unsafe.Pointer(&c))
log.Info("config parse success")
}
func (this *Server) BackUpMetaDataByDate(date string) {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("BackUpMetaDataByDate")
log.Error(re)
log.Error(string(buffer))
}
}()
var (
err error
keyPrefix string
msg string
name string
fileInfo FileInfo
logFileName string
fileLog *os.File
fileMeta *os.File
metaFileName string
fi os.FileInfo
)
logFileName = DATA_DIR + "/" + date + "/" + CONST_FILE_Md5_FILE_NAME
this.lockMap.LockKey(logFileName)
defer this.lockMap.UnLockKey(logFileName)
metaFileName = DATA_DIR + "/" + date + "/" + "meta.data"
os.MkdirAll(DATA_DIR+"/"+date, 0775)
if this.util.IsExist(logFileName) {
os.Remove(logFileName)
}
if this.util.IsExist(metaFileName) {
os.Remove(metaFileName)
}
fileLog, err = os.OpenFile(logFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
if err != nil {
log.Error(err)
return
}
defer fileLog.Close()
fileMeta, err = os.OpenFile(metaFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
if err != nil {
log.Error(err)
return
}
defer fileMeta.Close()
keyPrefix = "%s_%s_"
keyPrefix = fmt.Sprintf(keyPrefix, date, CONST_FILE_Md5_FILE_NAME)
iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
defer iter.Release()
for iter.Next() {
if err = json.Unmarshal(iter.Value(), &fileInfo); err != nil {
continue
}
name = fileInfo.Name
if fileInfo.ReName != "" {
name = fileInfo.ReName
}
msg = fmt.Sprintf("%s\t%s\n", fileInfo.Md5, string(iter.Value()))
if _, err = fileMeta.WriteString(msg); err != nil {
log.Error(err)
}
msg = fmt.Sprintf("%s\t%s\n", this.util.MD5(fileInfo.Path+"/"+name), string(iter.Value()))
if _, err = fileMeta.WriteString(msg); err != nil {
log.Error(err)
}
msg = fmt.Sprintf("%s|%d|%d|%s\n", fileInfo.Md5, fileInfo.Size, fileInfo.TimeStamp, fileInfo.Path+"/"+name)
if _, err = fileLog.WriteString(msg); err != nil {
log.Error(err)
}
}
if fi, err = fileLog.Stat(); err != nil {
log.Error(err)
} else if fi.Size() == 0 {
fileLog.Close()
os.Remove(logFileName)
}
if fi, err = fileMeta.Stat(); err != nil {
log.Error(err)
} else if fi.Size() == 0 {
fileMeta.Close()
os.Remove(metaFileName)
}
}
func (this *Server) RepairFileInfoFromFile() {
var (
pathPrefix string
err error
fi os.FileInfo
)
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("RepairFileInfoFromFile")
log.Error(re)
log.Error(string(buffer))
}
}()
if this.lockMap.IsLock("RepairFileInfoFromFile") {
log.Warn("Lock RepairFileInfoFromFile")
return
}
this.lockMap.LockKey("RepairFileInfoFromFile")
defer this.lockMap.UnLockKey("RepairFileInfoFromFile")
handlefunc := func(file_path string, f os.FileInfo, err error) error {
var (
files []os.FileInfo
fi os.FileInfo
fileInfo FileInfo
sum string
pathMd5 string
)
if f.IsDir() {
files, err = ioutil.ReadDir(file_path)
if err != nil {
return err
}
for _, fi = range files {
if fi.IsDir() || fi.Size() == 0 {
continue
}
file_path = strings.Replace(file_path, "\\", "/", -1)
if DOCKER_DIR != "" {
file_path = strings.Replace(file_path, DOCKER_DIR, "", 1)
}
if pathPrefix != "" {
file_path = strings.Replace(file_path, pathPrefix, STORE_DIR_NAME, 1)
}
if strings.HasPrefix(file_path, STORE_DIR_NAME+"/"+LARGE_DIR_NAME) {
log.Info(fmt.Sprintf("ignore small file file %s", file_path+"/"+fi.Name()))
continue
}
pathMd5 = this.util.MD5(file_path + "/" + fi.Name())
//if finfo, _ := this.GetFileInfoFromLevelDB(pathMd5); finfo != nil && finfo.Md5 != "" {
// log.Info(fmt.Sprintf("exist ignore file %s", file_path+"/"+fi.Name()))
// continue
//}
//sum, err = this.util.GetFileSumByName(file_path+"/"+fi.Name(), Config().FileSumArithmetic)
sum = pathMd5
if err != nil {
log.Error(err)
continue
}
fileInfo = FileInfo{
Size: fi.Size(),
Name: fi.Name(),
Path: file_path,
Md5: sum,
TimeStamp: fi.ModTime().Unix(),
Peers: []string{this.host},
OffSet: -2,
}
//log.Info(fileInfo)
log.Info(file_path, "/", fi.Name())
this.AppendToQueue(&fileInfo)
//this.postFileToPeer(&fileInfo)
this.SaveFileInfoToLevelDB(fileInfo.Md5, &fileInfo, this.ldb)
//this.SaveFileMd5Log(&fileInfo, CONST_FILE_Md5_FILE_NAME)
}
}
return nil
}
pathname := STORE_DIR
pathPrefix, err = os.Readlink(pathname)
if err == nil {
//link
pathname = pathPrefix
if strings.HasSuffix(pathPrefix, "/") {
//bugfix fullpath
pathPrefix = pathPrefix[0 : len(pathPrefix)-1]
}
}
fi, err = os.Stat(pathname)
if err != nil {
log.Error(err)
}
if fi.IsDir() {
filepath.Walk(pathname, handlefunc)
}
log.Info("RepairFileInfoFromFile is finish.")
}
func (this *Server) RepairStatByDate(date string) StatDateFileInfo {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("RepairStatByDate")
log.Error(re)
log.Error(string(buffer))
}
}()
var (
err error
keyPrefix string
fileInfo FileInfo
fileCount int64
fileSize int64
stat StatDateFileInfo
)
keyPrefix = "%s_%s_"
keyPrefix = fmt.Sprintf(keyPrefix, date, CONST_FILE_Md5_FILE_NAME)
iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
defer iter.Release()
for iter.Next() {
if err = json.Unmarshal(iter.Value(), &fileInfo); err != nil {
continue
}
fileCount = fileCount + 1
fileSize = fileSize + fileInfo.Size
}
this.statMap.Put(date+"_"+CONST_STAT_FILE_COUNT_KEY, fileCount)
this.statMap.Put(date+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, fileSize)
this.SaveStat()
stat.Date = date
stat.FileCount = fileCount
stat.TotalSize = fileSize
return stat
}
func (this *Server) GetFilePathByInfo(fileInfo *FileInfo, withDocker bool) string {
var (
fn string
)
fn = fileInfo.Name
if fileInfo.ReName != "" {
fn = fileInfo.ReName
}
if withDocker {
return DOCKER_DIR + fileInfo.Path + "/" + fn
}
return fileInfo.Path + "/" + fn
}
func (this *Server) CheckFileExistByInfo(md5s string, fileInfo *FileInfo) bool {
var (
err error
fullpath string
fi os.FileInfo
info *FileInfo
)
if fileInfo == nil {
return false
}
if fileInfo.OffSet >= 0 {
//small file
if info, err = this.GetFileInfoFromLevelDB(fileInfo.Md5); err == nil && info.Md5 == fileInfo.Md5 {
return true
} else {
return false
}
}
fullpath = this.GetFilePathByInfo(fileInfo, true)
if fi, err = os.Stat(fullpath); err != nil {
return false
}
if fi.Size() == fileInfo.Size {
return true
} else {
return false
}
}
func (this *Server) ParseSmallFile(filename string) (string, int64, int, error) {
var (
err error
offset int64
length int
)
err = errors.New("unvalid small file")
if len(filename) < 3 {
return filename, -1, -1, err
}
if strings.Contains(filename, "/") {
filename = filename[strings.LastIndex(filename, "/")+1:]
}
pos := strings.Split(filename, ",")
if len(pos) < 3 {
return filename, -1, -1, err
}
offset, err = strconv.ParseInt(pos[1], 10, 64)
if err != nil {
return filename, -1, -1, err
}
if length, err = strconv.Atoi(pos[2]); err != nil {
return filename, offset, -1, err
}
if length > CONST_SMALL_FILE_SIZE || offset < 0 {
err = errors.New("invalid filesize or offset")
return filename, -1, -1, err
}
return pos[0], offset, length, nil
}
func (this *Server) DownloadFromPeer(peer string, fileInfo *FileInfo) {
var (
err error
filename string
fpath string
fpathTmp string
fi os.FileInfo
sum string
data []byte
downloadUrl string
)
if Config().ReadOnly {
log.Warn("ReadOnly", fileInfo)
return
}
filename = fileInfo.Name
if fileInfo.ReName != "" {
filename = fileInfo.ReName
}
if Config().EnableDistinctFile && this.CheckFileExistByInfo(fileInfo.Md5, fileInfo) {
log.Info("DownloadFromPeer file Exist")
return
}
if !Config().EnableDistinctFile && this.util.FileExists(this.GetFilePathByInfo(fileInfo, true)) {
if fi, err = os.Stat(this.GetFilePathByInfo(fileInfo, true)); err == nil {
if fi.ModTime().Unix() > fileInfo.TimeStamp {
log.Info(fmt.Sprintf("ignore file sync path:%s", this.GetFilePathByInfo(fileInfo, false)))
fileInfo.TimeStamp = fi.ModTime().Unix()
this.postFileToPeer(fileInfo) // keep newer
return
}
os.Remove(this.GetFilePathByInfo(fileInfo, true))
}
}
if _, err = os.Stat(fileInfo.Path); err != nil {
os.MkdirAll(DOCKER_DIR+fileInfo.Path, 0775)
}
//fmt.Println("downloadFromPeer",fileInfo)
p := strings.Replace(fileInfo.Path, STORE_DIR_NAME+"/", "", 1)
//filename=this.util.UrlEncode(filename)
downloadUrl = peer + "/" + Config().Group + "/" + p + "/" + filename
log.Info("DownloadFromPeer: ", downloadUrl)
fpath = DOCKER_DIR + fileInfo.Path + "/" + filename
fpathTmp = DOCKER_DIR + fileInfo.Path + "/" + fmt.Sprintf("%s_%s", "tmp_", filename)
timeout := fileInfo.Size/1024/1024/1 + 30
if Config().SyncTimeout > 0 {
timeout = Config().SyncTimeout
}
this.lockMap.LockKey(fpath)
defer this.lockMap.UnLockKey(fpath)
download_key := fmt.Sprintf("downloading_%d_%s", time.Now().Unix(), fpath)
this.ldb.Put([]byte(download_key), []byte(""), nil)
defer func() {
this.ldb.Delete([]byte(download_key), nil)
}()
if fileInfo.OffSet == -2 {
//migrate file
if fi, err = os.Stat(fpath); err == nil && fi.Size() == fileInfo.Size {
//prevent double download
this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb)
//log.Info(fmt.Sprintf("file '%s' has download", fpath))
return
}
req := httplib.Get(downloadUrl)
req.SetTimeout(time.Second*30, time.Second*time.Duration(timeout))
if err = req.ToFile(fpathTmp); err != nil {
os.Remove(fpathTmp)
log.Error(err)
return
}
if os.Rename(fpathTmp, fpath) == nil {
//this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME)
this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb)
}
return
}
req := httplib.Get(downloadUrl)
req.SetTimeout(time.Second*30, time.Second*time.Duration(timeout))
if fileInfo.OffSet >= 0 {
//small file download
data, err = req.Bytes()
if err != nil {
log.Error(err)
return
}
data2 := make([]byte, len(data)+1)
data2[0] = '1'
for i, v := range data {
data2[i+1] = v
}
data = data2
if int64(len(data)) != fileInfo.Size {
log.Warn("file size is error")
return
}
fpath = strings.Split(fpath, ",")[0]
err = this.util.WriteFileByOffSet(fpath, fileInfo.OffSet, data)
if err != nil {
log.Warn(err)
return
}
this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME)
return
}
if err = req.ToFile(fpathTmp); err != nil {
os.Remove(fpathTmp)
log.Error(err)
return
}
if fi, err = os.Stat(fpathTmp); err != nil {
os.Remove(fpathTmp)
return
}
_ = sum
//if Config().EnableDistinctFile {
// //DistinctFile
// if sum, err = this.util.GetFileSumByName(fpathTmp, Config().FileSumArithmetic); err != nil {
// log.Error(err)
// return
// }
//} else {
// //DistinctFile By path
// sum = this.util.MD5(this.GetFilePathByInfo(fileInfo, false))
//}
if fi.Size() != fileInfo.Size { // maybe has bug remove || sum != fileInfo.Md5
log.Error("file sum check error")
os.Remove(fpathTmp)
return
}
if os.Rename(fpathTmp, fpath) == nil {
this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME)
}
}
func (this *Server) CrossOrigin(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, Depth, User-Agent, X-File-Size, X-Requested-With, X-Requested-By, If-Modified-Since, X-File-Name, X-File-Type, Cache-Control, Origin")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS, PUT, DELETE")
w.Header().Set("Access-Control-Expose-Headers", "Authorization")
//https://blog.csdn.net/yanzisu_congcong/article/details/80552155
}
func (this *Server) SetDownloadHeader(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", "attachment")
}
func (this *Server) CheckAuth(w http.ResponseWriter, r *http.Request) bool {
var (
err error
req *httplib.BeegoHTTPRequest
result string
jsonResult JsonResult
)
if err = r.ParseForm(); err != nil {
log.Error(err)
return false
}
req = httplib.Post(Config().AuthUrl)
req.SetTimeout(time.Second*10, time.Second*10)
for k, _ := range r.Form {
req.Param(k, r.FormValue(k))
}
for k, v := range r.Header {
req.Header(k, v[0])
}
result, err = req.String()
result = strings.TrimSpace(result)
if strings.HasPrefix(result, "{") && strings.HasSuffix(result, "}") {
if err = json.Unmarshal([]byte(result), &jsonResult); err != nil {
log.Error(err)
return false
}
if jsonResult.Data != "ok" {
log.Warn(result)
return false
}
} else {
if result != "ok" {
log.Warn(result)
return false
}
}
return true
}
func (this *Server) NotPermit(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(401)
}
func (this *Server) GetFilePathFromRequest(w http.ResponseWriter, r *http.Request) (string, string) {
var (
err error
fullpath string
smallPath string
)
fullpath = r.RequestURI[len(Config().Group)+2 : len(r.RequestURI)]
fullpath = strings.Split(fullpath, "?")[0] // just path
fullpath = DOCKER_DIR + STORE_DIR_NAME + "/" + fullpath
if strings.HasPrefix(r.RequestURI, "/"+Config().Group+"/"+LARGE_DIR_NAME+"/") {
smallPath = fullpath //notice order
fullpath = strings.Split(fullpath, ",")[0]
}
if fullpath, err = url.PathUnescape(fullpath); err != nil {
log.Error(err)
}
return fullpath, smallPath
}
func (this *Server) CheckDownloadAuth(w http.ResponseWriter, r *http.Request) (bool, error) {
var (
err error
maxTimestamp int64
minTimestamp int64
ts int64
token string
timestamp string
fullpath string
smallPath string
pathMd5 string
fileInfo *FileInfo
scene string
secret interface{}
code string
ok bool
)
CheckToken := func(token string, md5sum string, timestamp string) bool {
if this.util.MD5(md5sum+timestamp) != token {
return false
}
return true
}
if Config().EnableDownloadAuth && Config().AuthUrl != "" && !this.IsPeer(r) && !this.CheckAuth(w, r) {
return false, errors.New("auth fail")
}
if Config().DownloadUseToken && !this.IsPeer(r) {
token = r.FormValue("token")
timestamp = r.FormValue("timestamp")
if token == "" || timestamp == "" {
return false, errors.New("unvalid request")
}
maxTimestamp = time.Now().Add(time.Second *
time.Duration(Config().DownloadTokenExpire)).Unix()
minTimestamp = time.Now().Add(-time.Second *
time.Duration(Config().DownloadTokenExpire)).Unix()
if ts, err = strconv.ParseInt(timestamp, 10, 64); err != nil {
return false, errors.New("unvalid timestamp")
}
if ts > maxTimestamp || ts < minTimestamp {
return false, errors.New("timestamp expire")
}
fullpath, smallPath = this.GetFilePathFromRequest(w, r)
if smallPath != "" {
pathMd5 = this.util.MD5(smallPath)
} else {
pathMd5 = this.util.MD5(fullpath)
}
if fileInfo, err = this.GetFileInfoFromLevelDB(pathMd5); err != nil {
// TODO
} else {
ok := CheckToken(token, fileInfo.Md5, timestamp)
if !ok {
return ok, errors.New("unvalid token")
}
return ok, nil
}
}
if Config().EnableGoogleAuth && !this.IsPeer(r) {
fullpath = r.RequestURI[len(Config().Group)+2 : len(r.RequestURI)]
fullpath = strings.Split(fullpath, "?")[0] // just path
scene = strings.Split(fullpath, "/")[0]
code = r.FormValue("code")
if secret, ok = this.sceneMap.GetValue(scene); ok {
if !this.VerifyGoogleCode(secret.(string), code, int64(Config().DownloadTokenExpire/30)) {
return false, errors.New("invalid google code")
}
}
}
return true, nil
}
func (this *Server) GetSmallFileByURI(w http.ResponseWriter, r *http.Request) ([]byte, bool, error) {
var (
err error
data []byte
offset int64
length int
fullpath string
info os.FileInfo
)
fullpath, _ = this.GetFilePathFromRequest(w, r)
if _, offset, length, err = this.ParseSmallFile(r.RequestURI); err != nil {
return nil, false, err
}
if info, err = os.Stat(fullpath); err != nil {
return nil, false, err
}
if info.Size() < offset+int64(length) {
return nil, true, errors.New("noFound")
} else {
data, err = this.util.ReadFileByOffSet(fullpath, offset, length)
if err != nil {
return nil, false, err
}
return data, false, err
}
}
func (this *Server) DownloadSmallFileByURI(w http.ResponseWriter, r *http.Request) (bool, error) {
var (
err error
data []byte
isDownload bool
imgWidth int
imgHeight int
width string
height string
notFound bool
)
r.ParseForm()
isDownload = true
if r.FormValue("download") == "" {
isDownload = Config().DefaultDownload
}
if r.FormValue("download") == "0" {
isDownload = false
}
width = r.FormValue("width")
height = r.FormValue("height")
if width != "" {
imgWidth, err = strconv.Atoi(width)
if err != nil {
log.Error(err)
}
}
if height != "" {
imgHeight, err = strconv.Atoi(height)
if err != nil {
log.Error(err)
}
}
data, notFound, err = this.GetSmallFileByURI(w, r)
_ = notFound
if data != nil && string(data[0]) == "1" {
if isDownload {
this.SetDownloadHeader(w, r)
}
if imgWidth != 0 || imgHeight != 0 {
this.ResizeImageByBytes(w, data[1:], uint(imgWidth), uint(imgHeight))
return true, nil
}
w.Write(data[1:])
return true, nil
}
return false, errors.New("not found")
}
func (this *Server) DownloadNormalFileByURI(w http.ResponseWriter, r *http.Request) (bool, error) {
var (
err error
isDownload bool
imgWidth int
imgHeight int
width string
height string
)
r.ParseForm()
isDownload = true
if r.FormValue("download") == "" {
isDownload = Config().DefaultDownload
}
if r.FormValue("download") == "0" {
isDownload = false
}
width = r.FormValue("width")
height = r.FormValue("height")
if width != "" {
imgWidth, err = strconv.Atoi(width)
if err != nil {
log.Error(err)
}
}
if height != "" {
imgHeight, err = strconv.Atoi(height)
if err != nil {
log.Error(err)
}
}
if isDownload {
this.SetDownloadHeader(w, r)
}
fullpath, _ := this.GetFilePathFromRequest(w, r)
if imgWidth != 0 || imgHeight != 0 {
this.ResizeImage(w, fullpath, uint(imgWidth), uint(imgHeight))
return true, nil
}
staticHandler.ServeHTTP(w, r)
return true, nil
}
func (this *Server) DownloadNotFound(w http.ResponseWriter, r *http.Request) {
var (
err error
fullpath string
smallPath string
isDownload bool
pathMd5 string
peer string
fileInfo *FileInfo
)
fullpath, smallPath = this.GetFilePathFromRequest(w, r)
isDownload = true
if r.FormValue("download") == "" {
isDownload = Config().DefaultDownload
}
if r.FormValue("download") == "0" {
isDownload = false
}
if smallPath != "" {
pathMd5 = this.util.MD5(smallPath)
} else {
pathMd5 = this.util.MD5(fullpath)
}
for _, peer = range Config().Peers {
if fileInfo, err = this.checkPeerFileExist(peer, pathMd5, fullpath); err != nil {
log.Error(err)
continue
}
if fileInfo.Md5 != "" {
go this.DownloadFromPeer(peer, fileInfo)
//http.Redirect(w, r, peer+r.RequestURI, 302)
if isDownload {
this.SetDownloadHeader(w, r)
}
this.DownloadFileToResponse(peer+r.RequestURI, w, r)
return
}
}
w.WriteHeader(404)
return
}
func (this *Server) Download(w http.ResponseWriter, r *http.Request) {
var (
err error
ok bool
fullpath string
smallPath string
fi os.FileInfo
)
if ok, err = this.CheckDownloadAuth(w, r); !ok {
log.Error(err)
this.NotPermit(w, r)
return
}
if Config().EnableCrossOrigin {
this.CrossOrigin(w, r)
}
fullpath, smallPath = this.GetFilePathFromRequest(w, r)
if smallPath == "" {
if fi, err = os.Stat(fullpath); err != nil {
this.DownloadNotFound(w, r)
return
}
if !Config().ShowDir && fi.IsDir() {
w.Write([]byte("list dir deny"))
return
}
//staticHandler.ServeHTTP(w, r)
this.DownloadNormalFileByURI(w, r)
return
}
if smallPath != "" {
if ok, err = this.DownloadSmallFileByURI(w, r); !ok {
this.DownloadNotFound(w, r)
return
}
return
}
}
func (this *Server) DownloadFileToResponse(url string, w http.ResponseWriter, r *http.Request) {
var (
err error
req *httplib.BeegoHTTPRequest
resp *http.Response
)
req = httplib.Get(url)
req.SetTimeout(time.Second*20, time.Second*600)
resp, err = req.DoRequest()
if err != nil {
log.Error(err)
}
defer resp.Body.Close()
_, err = io.Copy(w, resp.Body)
if err != nil {
log.Error(err)
}
}
func (this *Server) ResizeImageByBytes(w http.ResponseWriter, data []byte, width, height uint) {
var (
img image.Image
err error
imgType string
)
reader := bytes.NewReader(data)
img, imgType, err = image.Decode(reader)
if err != nil {
log.Error(err)
return
}
img = resize.Resize(width, height, img, resize.Lanczos3)
if imgType == "jpg" || imgType == "jpeg" {
jpeg.Encode(w, img, nil)
} else if imgType == "png" {
png.Encode(w, img)
} else {
w.Write(data)
}
}
func (this *Server) ResizeImage(w http.ResponseWriter, fullpath string, width, height uint) {
var (
img image.Image
err error
imgType string
file *os.File
)
file, err = os.Open(fullpath)
if err != nil {
log.Error(err)
return
}
img, imgType, err = image.Decode(file)
if err != nil {
log.Error(err)
return
}
file.Close()
img = resize.Resize(width, height, img, resize.Lanczos3)
if imgType == "jpg" || imgType == "jpeg" {
jpeg.Encode(w, img, nil)
} else if imgType == "png" {
png.Encode(w, img)
} else {
file.Seek(0, 0)
io.Copy(w, file)
}
}
func (this *Server) GetServerURI(r *http.Request) string {
return fmt.Sprintf("http://%s/", r.Host)
}
func (this *Server) CheckFileAndSendToPeer(date string, filename string, isForceUpload bool) {
var (
md5set mapset.Set
err error
md5s []interface{}
)
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("CheckFileAndSendToPeer")
log.Error(re)
log.Error(string(buffer))
}
}()
if md5set, err = this.GetMd5sByDate(date, filename); err != nil {
log.Error(err)
return
}
md5s = md5set.ToSlice()
for _, md := range md5s {
if md == nil {
continue
}
if fileInfo, _ := this.GetFileInfoFromLevelDB(md.(string)); fileInfo != nil && fileInfo.Md5 != "" {
if isForceUpload {
fileInfo.Peers = []string{}
}
if len(fileInfo.Peers) > len(Config().Peers) {
continue
}
if !this.util.Contains(this.host, fileInfo.Peers) {
fileInfo.Peers = append(fileInfo.Peers, this.host) // peer is null
}
if filename == CONST_Md5_QUEUE_FILE_NAME {
this.AppendToDownloadQueue(fileInfo)
} else {
this.AppendToQueue(fileInfo)
}
}
}
}
func (this *Server) postFileToPeer(fileInfo *FileInfo) {
var (
err error
peer string
filename string
info *FileInfo
postURL string
result string
fi os.FileInfo
i int
data []byte
fpath string
)
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("postFileToPeer")
log.Error(re)
log.Error(string(buffer))
}
}()
//fmt.Println("postFile",fileInfo)
for i, peer = range Config().Peers {
_ = i
if fileInfo.Peers == nil {
fileInfo.Peers = []string{}
}
if this.util.Contains(peer, fileInfo.Peers) {
continue
}
filename = fileInfo.Name
if fileInfo.ReName != "" {
filename = fileInfo.ReName
if fileInfo.OffSet != -1 {
filename = strings.Split(fileInfo.ReName, ",")[0]
}
}
fpath = DOCKER_DIR + fileInfo.Path + "/" + filename
if !this.util.FileExists(fpath) {
log.Warn(fmt.Sprintf("file '%s' not found", fpath))
continue
} else {
if fileInfo.Size == 0 {
if fi, err = os.Stat(fpath); err != nil {
log.Error(err)
} else {
fileInfo.Size = fi.Size()
}
}
}
if fileInfo.OffSet != -2 && Config().EnableDistinctFile {
//not migrate file should check or update file
// where not EnableDistinctFile should check
if info, err = this.checkPeerFileExist(peer, fileInfo.Md5, ""); info.Md5 != "" {
fileInfo.Peers = append(fileInfo.Peers, peer)
if _, err = this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb); err != nil {
log.Error(err)
}
continue
}
}
postURL = fmt.Sprintf("%s%s", peer, this.getRequestURI("syncfile_info"))
b := httplib.Post(postURL)
b.SetTimeout(time.Second*30, time.Second*30)
if data, err = json.Marshal(fileInfo); err != nil {
log.Error(err)
return
}
b.Param("fileInfo", string(data))
result, err = b.String()
if !strings.HasPrefix(result, "http://") || err != nil {
this.SaveFileMd5Log(fileInfo, CONST_Md5_ERROR_FILE_NAME)
}
if strings.HasPrefix(result, "http://") {
log.Info(result)
if !this.util.Contains(peer, fileInfo.Peers) {
fileInfo.Peers = append(fileInfo.Peers, peer)
if _, err = this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb); err != nil {
log.Error(err)
}
}
}
if err != nil {
log.Error(err)
}
}
}
func (this *Server) SaveFileMd5Log(fileInfo *FileInfo, filename string) {
var (
info FileInfo
)
for len(this.queueFileLog)+len(this.queueFileLog)/10 > CONST_QUEUE_SIZE {
time.Sleep(time.Second * 1)
}
info = *fileInfo
this.queueFileLog <- &FileLog{FileInfo: &info, FileName: filename}
}
func (this *Server) saveFileMd5Log(fileInfo *FileInfo, filename string) {
var (
err error
outname string
logDate string
ok bool
fullpath string
md5Path string
logKey string
)
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("saveFileMd5Log")
log.Error(re)
log.Error(string(buffer))
}
}()
if fileInfo == nil || fileInfo.Md5 == "" || filename == "" {
log.Warn("saveFileMd5Log", fileInfo, filename)
return
}
logDate = this.util.GetDayFromTimeStamp(fileInfo.TimeStamp)
outname = fileInfo.Name
if fileInfo.ReName != "" {
outname = fileInfo.ReName
}
fullpath = fileInfo.Path + "/" + outname
logKey = fmt.Sprintf("%s_%s_%s", logDate, filename, fileInfo.Md5)
if filename == CONST_FILE_Md5_FILE_NAME {
//this.searchMap.Put(fileInfo.Md5, fileInfo.Name)
if ok, err = this.IsExistFromLevelDB(fileInfo.Md5, this.ldb); !ok {
this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_COUNT_KEY, 1)
this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, fileInfo.Size)
this.SaveStat()
}
if _, err = this.SaveFileInfoToLevelDB(logKey, fileInfo, this.logDB); err != nil {
log.Error(err)
}
if _, err := this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb); err != nil {
log.Error("saveToLevelDB", err, fileInfo)
}
if _, err = this.SaveFileInfoToLevelDB(this.util.MD5(fullpath), fileInfo, this.ldb); err != nil {
log.Error("saveToLevelDB", err, fileInfo)
}
return
}
if filename == CONST_REMOME_Md5_FILE_NAME {
//this.searchMap.Remove(fileInfo.Md5)
if ok, err = this.IsExistFromLevelDB(fileInfo.Md5, this.ldb); ok {
this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_COUNT_KEY, -1)
this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, -fileInfo.Size)
this.SaveStat()
}
this.RemoveKeyFromLevelDB(logKey, this.logDB)
md5Path = this.util.MD5(fullpath)
if err := this.RemoveKeyFromLevelDB(fileInfo.Md5, this.ldb); err != nil {
log.Error("RemoveKeyFromLevelDB", err, fileInfo)
}
if err = this.RemoveKeyFromLevelDB(md5Path, this.ldb); err != nil {
log.Error("RemoveKeyFromLevelDB", err, fileInfo)
}
// remove files.md5 for stat info(repair from logDB)
logKey = fmt.Sprintf("%s_%s_%s", logDate, CONST_FILE_Md5_FILE_NAME, fileInfo.Md5)
this.RemoveKeyFromLevelDB(logKey, this.logDB)
return
}
this.SaveFileInfoToLevelDB(logKey, fileInfo, this.logDB)
}
func (this *Server) checkPeerFileExist(peer string, md5sum string, fpath string) (*FileInfo, error) {
var (
err error
fileInfo FileInfo
)
req := httplib.Post(fmt.Sprintf("%s%s?md5=%s", peer, this.getRequestURI("check_file_exist"), md5sum))
req.Param("path", fpath)
req.Param("md5", md5sum)
req.SetTimeout(time.Second*5, time.Second*10)
if err = req.ToJSON(&fileInfo); err != nil {
return &FileInfo{}, err
}
if fileInfo.Md5 == "" {
return &fileInfo, errors.New("not found")
}
return &fileInfo, nil
}
func (this *Server) CheckFileExist(w http.ResponseWriter, r *http.Request) {
var (
data []byte
err error
fileInfo *FileInfo
fpath string
fi os.FileInfo
)
r.ParseForm()
md5sum := ""
md5sum = r.FormValue("md5")
fpath = r.FormValue("path")
if fileInfo, err = this.GetFileInfoFromLevelDB(md5sum); fileInfo != nil {
if fileInfo.OffSet != -1 {
if data, err = json.Marshal(fileInfo); err != nil {
log.Error(err)
}
w.Write(data)
return
}
fpath = DOCKER_DIR + fileInfo.Path + "/" + fileInfo.Name
if fileInfo.ReName != "" {
fpath = DOCKER_DIR + fileInfo.Path + "/" + fileInfo.ReName
}
if this.util.IsExist(fpath) {
if data, err = json.Marshal(fileInfo); err == nil {
w.Write(data)
return
} else {
log.Error(err)
}
} else {
if fileInfo.OffSet == -1 {
this.RemoveKeyFromLevelDB(md5sum, this.ldb) // when file delete,delete from leveldb
}
}
} else {
if fpath != "" {
fi, err = os.Stat(fpath)
if err == nil {
sum := this.util.MD5(fpath)
//if Config().EnableDistinctFile {
// sum, err = this.util.GetFileSumByName(fpath, Config().FileSumArithmetic)
// if err != nil {
// log.Error(err)
// }
//}
fileInfo = &FileInfo{
Path: path.Dir(fpath),
Name: path.Base(fpath),
Size: fi.Size(),
Md5: sum,
Peers: []string{Config().Host},
OffSet: -1, //very important
TimeStamp: fi.ModTime().Unix(),
}
data, err = json.Marshal(fileInfo)
w.Write(data)
return
}
}
}
data, _ = json.Marshal(FileInfo{})
w.Write(data)
return
}
func (this *Server) CheckFilesExist(w http.ResponseWriter, r *http.Request) {
var (
data []byte
err error
fileInfo *FileInfo
fileInfos []*FileInfo
fpath string
result JsonResult
)
r.ParseForm()
md5sum := ""
md5sum = r.FormValue("md5s")
md5s := strings.Split(md5sum, ",")
for _, m := range md5s {
if fileInfo, err = this.GetFileInfoFromLevelDB(m); fileInfo != nil {
if fileInfo.OffSet != -1 {
if data, err = json.Marshal(fileInfo); err != nil {
log.Error(err)
}
//w.Write(data)
//return
fileInfos = append(fileInfos, fileInfo)
continue
}
fpath = DOCKER_DIR + fileInfo.Path + "/" + fileInfo.Name
if fileInfo.ReName != "" {
fpath = DOCKER_DIR + fileInfo.Path + "/" + fileInfo.ReName
}
if this.util.IsExist(fpath) {
if data, err = json.Marshal(fileInfo); err == nil {
fileInfos = append(fileInfos, fileInfo)
//w.Write(data)
//return
continue
} else {
log.Error(err)
}
} else {
if fileInfo.OffSet == -1 {
this.RemoveKeyFromLevelDB(md5sum, this.ldb) // when file delete,delete from leveldb
}
}
}
}
result.Data = fileInfos
data, _ = json.Marshal(result)
w.Write(data)
return
}
func (this *Server) Sync(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
)
r.ParseForm()
result.Status = "fail"
if !this.IsPeer(r) {
result.Message = "client must be in cluster"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
date := ""
force := ""
inner := ""
isForceUpload := false
force = r.FormValue("force")
date = r.FormValue("date")
inner = r.FormValue("inner")
if force == "1" {
isForceUpload = true
}
if inner != "1" {
for _, peer := range Config().Peers {
req := httplib.Post(peer + this.getRequestURI("sync"))
req.Param("force", force)
req.Param("inner", "1")
req.Param("date", date)
if _, err := req.String(); err != nil {
log.Error(err)
}
}
}
if date == "" {
result.Message = "require paramete date &force , ?date=20181230"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
date = strings.Replace(date, ".", "", -1)
if isForceUpload {
go this.CheckFileAndSendToPeer(date, CONST_FILE_Md5_FILE_NAME, isForceUpload)
} else {
go this.CheckFileAndSendToPeer(date, CONST_Md5_ERROR_FILE_NAME, isForceUpload)
}
result.Status = "ok"
result.Message = "job is running"
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) IsExistFromLevelDB(key string, db *leveldb.DB) (bool, error) {
return db.Has([]byte(key), nil)
}
func (this *Server) GetFileInfoFromLevelDB(key string) (*FileInfo, error) {
var (
err error
data []byte
fileInfo FileInfo
)
if data, err = this.ldb.Get([]byte(key), nil); err != nil {
return nil, err
}
if err = json.Unmarshal(data, &fileInfo); err != nil {
return nil, err
}
return &fileInfo, nil
}
func (this *Server) SaveStat() {
SaveStatFunc := func() {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("SaveStatFunc")
log.Error(re)
log.Error(string(buffer))
}
}()
stat := this.statMap.Get()
if v, ok := stat[CONST_STAT_FILE_COUNT_KEY]; ok {
switch v.(type) {
case int64, int32, int, float64, float32:
if v.(int64) >= 0 {
if data, err := json.Marshal(stat); err != nil {
log.Error(err)
} else {
this.util.WriteBinFile(CONST_STAT_FILE_NAME, data)
}
}
}
}
}
SaveStatFunc()
}
func (this *Server) RemoveKeyFromLevelDB(key string, db *leveldb.DB) error {
var (
err error
)
err = db.Delete([]byte(key), nil)
return err
}
func (this *Server) SaveFileInfoToLevelDB(key string, fileInfo *FileInfo, db *leveldb.DB) (*FileInfo, error) {
var (
err error
data []byte
)
if fileInfo == nil || db == nil {
return nil, errors.New("fileInfo is null or db is null")
}
if data, err = json.Marshal(fileInfo); err != nil {
return fileInfo, err
}
if err = db.Put([]byte(key), data, nil); err != nil {
return fileInfo, err
}
if db == this.ldb { //search slow ,write fast, double write logDB
logDate := this.util.GetDayFromTimeStamp(fileInfo.TimeStamp)
logKey := fmt.Sprintf("%s_%s_%s", logDate, CONST_FILE_Md5_FILE_NAME, fileInfo.Md5)
this.logDB.Put([]byte(logKey), data, nil)
}
return fileInfo, nil
}
func (this *Server) IsPeer(r *http.Request) bool {
var (
ip string
peer string
bflag bool
)
//return true
ip = this.util.GetClientIp(r)
if ip == "127.0.0.1" || ip == this.util.GetPulicIP() {
return true
}
if this.util.Contains(ip, Config().AdminIps) {
return true
}
ip = "http://" + ip
bflag = false
for _, peer = range Config().Peers {
if strings.HasPrefix(peer, ip) {
bflag = true
break
}
}
return bflag
}
func (this *Server) ReceiveMd5s(w http.ResponseWriter, r *http.Request) {
var (
err error
md5str string
fileInfo *FileInfo
md5s []string
)
if !this.IsPeer(r) {
log.Warn(fmt.Sprintf("ReceiveMd5s %s", this.util.GetClientIp(r)))
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
r.ParseForm()
md5str = r.FormValue("md5s")
md5s = strings.Split(md5str, ",")
AppendFunc := func(md5s []string) {
for _, m := range md5s {
if m != "" {
if fileInfo, err = this.GetFileInfoFromLevelDB(m); err != nil {
log.Error(err)
continue
}
this.AppendToQueue(fileInfo)
}
}
}
go AppendFunc(md5s)
}
func (this *Server) GetClusterNotPermitMessage(r *http.Request) string {
var (
message string
)
message = fmt.Sprintf(CONST_MESSAGE_CLUSTER_IP, this.util.GetClientIp(r))
return message
}
func (this *Server) GetMd5sForWeb(w http.ResponseWriter, r *http.Request) {
var (
date string
err error
result mapset.Set
lines []string
md5s []interface{}
)
if !this.IsPeer(r) {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
date = r.FormValue("date")
if result, err = this.GetMd5sByDate(date, CONST_FILE_Md5_FILE_NAME); err != nil {
log.Error(err)
return
}
md5s = result.ToSlice()
for _, line := range md5s {
if line != nil && line != "" {
lines = append(lines, line.(string))
}
}
w.Write([]byte(strings.Join(lines, ",")))
}
func (this *Server) GetMd5File(w http.ResponseWriter, r *http.Request) {
var (
date string
fpath string
data []byte
err error
)
if !this.IsPeer(r) {
return
}
fpath = DATA_DIR + "/" + date + "/" + CONST_FILE_Md5_FILE_NAME
if !this.util.FileExists(fpath) {
w.WriteHeader(404)
return
}
if data, err = ioutil.ReadFile(fpath); err != nil {
w.WriteHeader(500)
return
}
w.Write(data)
}
func (this *Server) GetMd5sMapByDate(date string, filename string) (*goutil.CommonMap, error) {
var (
err error
result *goutil.CommonMap
fpath string
content string
lines []string
line string
cols []string
data []byte
)
result = goutil.NewCommonMap(0)
if filename == "" {
fpath = DATA_DIR + "/" + date + "/" + CONST_FILE_Md5_FILE_NAME
} else {
fpath = DATA_DIR + "/" + date + "/" + filename
}
if !this.util.FileExists(fpath) {
return result, errors.New(fmt.Sprintf("fpath %s not found", fpath))
}
if data, err = ioutil.ReadFile(fpath); err != nil {
return result, err
}
content = string(data)
lines = strings.Split(content, "\n")
for _, line = range lines {
cols = strings.Split(line, "|")
if len(cols) > 2 {
if _, err = strconv.ParseInt(cols[1], 10, 64); err != nil {
continue
}
result.Add(cols[0])
}
}
return result, nil
}
func (this *Server) GetMd5sByDate(date string, filename string) (mapset.Set, error) {
var (
keyPrefix string
md5set mapset.Set
keys []string
)
md5set = mapset.NewSet()
keyPrefix = "%s_%s_"
keyPrefix = fmt.Sprintf(keyPrefix, date, filename)
iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
for iter.Next() {
keys = strings.Split(string(iter.Key()), "_")
if len(keys) >= 3 {
md5set.Add(keys[2])
}
}
iter.Release()
return md5set, nil
}
func (this *Server) SyncFileInfo(w http.ResponseWriter, r *http.Request) {
var (
err error
fileInfo FileInfo
fileInfoStr string
filename string
)
r.ParseForm()
if !this.IsPeer(r) {
return
}
fileInfoStr = r.FormValue("fileInfo")
if err = json.Unmarshal([]byte(fileInfoStr), &fileInfo); err != nil {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
log.Error(err)
return
}
if fileInfo.OffSet == -2 {
// optimize migrate
this.SaveFileInfoToLevelDB(fileInfo.Md5, &fileInfo, this.ldb)
} else {
this.SaveFileMd5Log(&fileInfo, CONST_Md5_QUEUE_FILE_NAME)
}
this.AppendToDownloadQueue(&fileInfo)
filename = fileInfo.Name
if fileInfo.ReName != "" {
filename = fileInfo.ReName
}
p := strings.Replace(fileInfo.Path, STORE_DIR+"/", "", 1)
downloadUrl := fmt.Sprintf("http://%s/%s", r.Host, Config().Group+"/"+p+"/"+filename)
log.Info("SyncFileInfo: ", downloadUrl)
w.Write([]byte(downloadUrl))
}
func (this *Server) CheckScene(scene string) (bool, error) {
var (
scenes []string
)
if len(Config().Scenes) == 0 {
return true, nil
}
for _, s := range Config().Scenes {
scenes = append(scenes, strings.Split(s, ":")[0])
}
if !this.util.Contains(scene, scenes) {
return false, errors.New("not valid scene")
}
return true, nil
}
func (this *Server) GetFileInfo(w http.ResponseWriter, r *http.Request) {
var (
fpath string
md5sum string
fileInfo *FileInfo
err error
result JsonResult
)
md5sum = r.FormValue("md5")
fpath = r.FormValue("path")
result.Status = "fail"
if !this.IsPeer(r) {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
md5sum = r.FormValue("md5")
if fpath != "" {
fpath = strings.Replace(fpath, "/"+Config().Group+"/", STORE_DIR_NAME+"/", 1)
md5sum = this.util.MD5(fpath)
}
if fileInfo, err = this.GetFileInfoFromLevelDB(md5sum); err != nil {
log.Error(err)
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
result.Status = "ok"
result.Data = fileInfo
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
func (this *Server) RemoveFile(w http.ResponseWriter, r *http.Request) {
var (
err error
md5sum string
fileInfo *FileInfo
fpath string
delUrl string
result JsonResult
inner string
name string
)
_ = delUrl
_ = inner
r.ParseForm()
md5sum = r.FormValue("md5")
fpath = r.FormValue("path")
inner = r.FormValue("inner")
result.Status = "fail"
if !this.IsPeer(r) {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
if Config().AuthUrl != "" && !this.CheckAuth(w, r) {
this.NotPermit(w, r)
return
}
if fpath != "" && md5sum == "" {
fpath = strings.Replace(fpath, "/"+Config().Group+"/", STORE_DIR_NAME+"/", 1)
md5sum = this.util.MD5(fpath)
}
if inner != "1" {
for _, peer := range Config().Peers {
delFile := func(peer string, md5sum string, fileInfo *FileInfo) {
delUrl = fmt.Sprintf("%s%s", peer, this.getRequestURI("delete"))
req := httplib.Post(delUrl)
req.Param("md5", md5sum)
req.Param("inner", "1")
req.SetTimeout(time.Second*5, time.Second*10)
if _, err = req.String(); err != nil {
log.Error(err)
}
}
go delFile(peer, md5sum, fileInfo)
}
}
if len(md5sum) < 32 {
result.Message = "md5 unvalid"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if fileInfo, err = this.GetFileInfoFromLevelDB(md5sum); err != nil {
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if fileInfo.OffSet >= 0 {
result.Message = "small file delete not support"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
name = fileInfo.Name
if fileInfo.ReName != "" {
name = fileInfo.ReName
}
fpath = fileInfo.Path + "/" + name
if fileInfo.Path != "" && this.util.FileExists(DOCKER_DIR+fpath) {
this.SaveFileMd5Log(fileInfo, CONST_REMOME_Md5_FILE_NAME)
if err = os.Remove(DOCKER_DIR + fpath); err != nil {
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
} else {
result.Message = "remove success"
result.Status = "ok"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
}
result.Message = "fail remove"
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) getRequestURI(action string) string {
var (
uri string
)
if Config().SupportGroupManage {
uri = "/" + Config().Group + "/" + action
} else {
uri = "/" + action
}
return uri
}
func (this *Server) BuildFileResult(fileInfo *FileInfo, r *http.Request) FileResult {
var (
outname string
fileResult FileResult
p string
downloadUrl string
domain string
)
if Config().DownloadDomain != "" {
domain = fmt.Sprintf("http://%s", Config().DownloadDomain)
} else {
domain = fmt.Sprintf("http://%s", r.Host)
}
outname = fileInfo.Name
if fileInfo.ReName != "" {
outname = fileInfo.ReName
}
p = strings.Replace(fileInfo.Path, STORE_DIR_NAME+"/", "", 1)
p = Config().Group + "/" + p + "/" + outname
downloadUrl = fmt.Sprintf("http://%s/%s", r.Host, p)
if Config().DownloadDomain != "" {
downloadUrl = fmt.Sprintf("http://%s/%s", Config().DownloadDomain, p)
}
fileResult.Url = downloadUrl
fileResult.Md5 = fileInfo.Md5
fileResult.Path = "/" + p
fileResult.Domain = domain
fileResult.Scene = fileInfo.Scene
fileResult.Size = fileInfo.Size
fileResult.ModTime = fileInfo.TimeStamp
// Just for Compatibility
fileResult.Src = fileResult.Path
fileResult.Scenes = fileInfo.Scene
return fileResult
}
func (this *Server) SaveUploadFile(file multipart.File, header *multipart.FileHeader, fileInfo *FileInfo, r *http.Request) (*FileInfo, error) {
var (
err error
outFile *os.File
folder string
fi os.FileInfo
)
defer file.Close()
_, fileInfo.Name = filepath.Split(header.Filename)
// bugfix for ie upload file contain fullpath
if len(Config().Extensions) > 0 && !this.util.Contains(path.Ext(fileInfo.Name), Config().Extensions) {
return fileInfo, errors.New("(error)file extension mismatch")
}
if Config().RenameFile {
fileInfo.ReName = this.util.MD5(this.util.GetUUID()) + path.Ext(fileInfo.Name)
}
folder = time.Now().Format("20060102/15/04")
if Config().PeerId != "" {
folder = fmt.Sprintf(folder+"/%s", Config().PeerId)
}
if fileInfo.Scene != "" {
folder = fmt.Sprintf(STORE_DIR+"/%s/%s", fileInfo.Scene, folder)
} else {
folder = fmt.Sprintf(STORE_DIR+"/%s", folder)
}
if fileInfo.Path != "" {
if strings.HasPrefix(fileInfo.Path, STORE_DIR) {
folder = fileInfo.Path
} else {
folder = STORE_DIR + "/" + fileInfo.Path
}
}
if !this.util.FileExists(folder) {
os.MkdirAll(folder, 0775)
}
outPath := fmt.Sprintf(folder+"/%s", fileInfo.Name)
if Config().RenameFile {
outPath = fmt.Sprintf(folder+"/%s", fileInfo.ReName)
}
if this.util.FileExists(outPath) && Config().EnableDistinctFile {
for i := 0; i < 10000; i++ {
outPath = fmt.Sprintf(folder+"/%d_%s", i, header.Filename)
fileInfo.Name = fmt.Sprintf("%d_%s", i, header.Filename)
if !this.util.FileExists(outPath) {
break
}
}
}
log.Info(fmt.Sprintf("upload: %s", outPath))
if outFile, err = os.Create(outPath); err != nil {
return fileInfo, err
}
defer outFile.Close()
if err != nil {
log.Error(err)
return fileInfo, errors.New("(error)fail," + err.Error())
}
if _, err = io.Copy(outFile, file); err != nil {
log.Error(err)
return fileInfo, errors.New("(error)fail," + err.Error())
}
if fi, err = outFile.Stat(); err != nil {
log.Error(err)
} else {
fileInfo.Size = fi.Size()
}
if fi.Size() != header.Size {
return fileInfo, errors.New("(error)file uncomplete")
}
v := this.util.GetFileSum(outFile, Config().FileSumArithmetic)
fileInfo.Md5 = v
//fileInfo.Path = folder //strings.Replace( folder,DOCKER_DIR,"",1)
fileInfo.Path = strings.Replace(folder, DOCKER_DIR, "", 1)
fileInfo.Peers = append(fileInfo.Peers, this.host)
//fmt.Println("upload",fileInfo)
return fileInfo, nil
}
func (this *Server) Upload(w http.ResponseWriter, r *http.Request) {
var (
err error
ok bool
// pathname string
md5sum string
fileInfo FileInfo
uploadFile multipart.File
uploadHeader *multipart.FileHeader
scene string
output string
fileResult FileResult
data []byte
code string
secret interface{}
)
output = r.FormValue("output")
if Config().EnableCrossOrigin {
this.CrossOrigin(w, r)
}
if Config().AuthUrl != "" {
if !this.CheckAuth(w, r) {
log.Warn("auth fail", r.Form)
this.NotPermit(w, r)
w.Write([]byte("auth fail"))
return
}
}
if r.Method == "POST" {
md5sum = r.FormValue("md5")
output = r.FormValue("output")
if Config().ReadOnly {
w.Write([]byte("(error) readonly"))
return
}
if Config().EnableCustomPath {
fileInfo.Path = r.FormValue("path")
fileInfo.Path = strings.Trim(fileInfo.Path, "/")
}
scene = r.FormValue("scene")
code = r.FormValue("code")
if scene == "" {
//Just for Compatibility
scene = r.FormValue("scenes")
}
if Config().EnableGoogleAuth && scene != "" {
if secret, ok = this.sceneMap.GetValue(scene); ok {
if !this.VerifyGoogleCode(secret.(string), code, int64(Config().DownloadTokenExpire/30)) {
this.NotPermit(w, r)
w.Write([]byte("invalid request,error google code"))
return
}
}
}
fileInfo.Md5 = md5sum
fileInfo.OffSet = -1
if uploadFile, uploadHeader, err = r.FormFile("file"); err != nil {
log.Error(err)
w.Write([]byte(err.Error()))
return
}
fileInfo.Peers = []string{}
fileInfo.TimeStamp = time.Now().Unix()
if scene == "" {
scene = Config().DefaultScene
}
if output == "" {
output = "text"
}
if !this.util.Contains(output, []string{"json", "text"}) {
w.Write([]byte("output just support json or text"))
return
}
fileInfo.Scene = scene
if _, err = this.CheckScene(scene); err != nil {
w.Write([]byte(err.Error()))
return
}
if err != nil {
log.Error(err)
http.Redirect(w, r, "/", http.StatusMovedPermanently)
return
}
if _, err = this.SaveUploadFile(uploadFile, uploadHeader, &fileInfo, r); err != nil {
w.Write([]byte(err.Error()))
return
}
if Config().EnableDistinctFile {
if v, _ := this.GetFileInfoFromLevelDB(fileInfo.Md5); v != nil && v.Md5 != "" {
fileResult = this.BuildFileResult(v, r)
if Config().RenameFile {
os.Remove(DOCKER_DIR + fileInfo.Path + "/" + fileInfo.ReName)
} else {
os.Remove(DOCKER_DIR + fileInfo.Path + "/" + fileInfo.Name)
}
if output == "json" {
if data, err = json.Marshal(fileResult); err != nil {
log.Error(err)
w.Write([]byte(err.Error()))
}
w.Write(data)
} else {
w.Write([]byte(fileResult.Url))
}
return
}
}
if fileInfo.Md5 == "" {
log.Warn(" fileInfo.Md5 is null")
return
}
if md5sum != "" && fileInfo.Md5 != md5sum {
log.Warn(" fileInfo.Md5 and md5sum !=")
return
}
if !Config().EnableDistinctFile {
// bugfix filecount stat
fileInfo.Md5 = this.util.MD5(this.GetFilePathByInfo(&fileInfo, false))
}
if Config().EnableMergeSmallFile && fileInfo.Size < CONST_SMALL_FILE_SIZE {
if err = this.SaveSmallFile(&fileInfo); err != nil {
log.Error(err)
return
}
}
this.saveFileMd5Log(&fileInfo, CONST_FILE_Md5_FILE_NAME) //maybe slow
go this.postFileToPeer(&fileInfo)
if fileInfo.Size <= 0 {
log.Error("file size is zero")
return
}
fileResult = this.BuildFileResult(&fileInfo, r)
if output == "json" {
if data, err = json.Marshal(fileResult); err != nil {
log.Error(err)
w.Write([]byte(err.Error()))
}
w.Write(data)
} else {
w.Write([]byte(fileResult.Url))
}
return
} else {
md5sum = r.FormValue("md5")
output = r.FormValue("output")
if md5sum == "" {
w.Write([]byte("(error) if you want to upload fast md5 is require" +
",and if you want to upload file,you must use post method "))
return
}
if v, _ := this.GetFileInfoFromLevelDB(md5sum); v != nil && v.Md5 != "" {
fileResult = this.BuildFileResult(v, r)
}
if output == "json" {
if data, err = json.Marshal(fileResult); err != nil {
log.Error(err)
w.Write([]byte(err.Error()))
}
w.Write(data)
} else {
w.Write([]byte(fileResult.Url))
}
}
}
func (this *Server) SaveSmallFile(fileInfo *FileInfo) error {
var (
err error
filename string
fpath string
srcFile *os.File
desFile *os.File
largeDir string
destPath string
reName string
fileExt string
)
filename = fileInfo.Name
fileExt = path.Ext(filename)
if fileInfo.ReName != "" {
filename = fileInfo.ReName
}
fpath = DOCKER_DIR + fileInfo.Path + "/" + filename
largeDir = LARGE_DIR + "/" + Config().PeerId
if !this.util.FileExists(largeDir) {
os.MkdirAll(largeDir, 0775)
}
reName = fmt.Sprintf("%d", this.util.RandInt(100, 300))
destPath = largeDir + "/" + reName
this.lockMap.LockKey(destPath)
defer this.lockMap.UnLockKey(destPath)
if this.util.FileExists(fpath) {
srcFile, err = os.OpenFile(fpath, os.O_CREATE|os.O_RDONLY, 06666)
if err != nil {
return err
}
defer srcFile.Close()
desFile, err = os.OpenFile(destPath, os.O_CREATE|os.O_RDWR, 06666)
if err != nil {
return err
}
defer desFile.Close()
fileInfo.OffSet, err = desFile.Seek(0, 2)
if _, err = desFile.Write([]byte("1")); err != nil {
//first byte set 1
return err
}
fileInfo.OffSet, err = desFile.Seek(0, 2)
if err != nil {
return err
}
fileInfo.OffSet = fileInfo.OffSet - 1 //minus 1 byte
fileInfo.Size = fileInfo.Size + 1
fileInfo.ReName = fmt.Sprintf("%s,%d,%d,%s", reName, fileInfo.OffSet, fileInfo.Size, fileExt)
if _, err = io.Copy(desFile, srcFile); err != nil {
return err
}
srcFile.Close()
os.Remove(fpath)
fileInfo.Path = strings.Replace(largeDir, DOCKER_DIR, "", 1)
}
return nil
}
func (this *Server) SendToMail(to, subject, body, mailtype string) error {
host := Config().Mail.Host
user := Config().Mail.User
password := Config().Mail.Password
hp := strings.Split(host, ":")
auth := smtp.PlainAuth("", user, password, hp[0])
var contentType string
if mailtype == "html" {
contentType = "Content-Type: text/" + mailtype + "; charset=UTF-8"
} else {
contentType = "Content-Type: text/plain" + "; charset=UTF-8"
}
msg := []byte("To: " + to + "\r\nFrom: " + user + ">\r\nSubject: " + "\r\n" + contentType + "\r\n\r\n" + body)
sendTo := strings.Split(to, ";")
err := smtp.SendMail(host, auth, user, sendTo, msg)
return err
}
func (this *Server) BenchMark(w http.ResponseWriter, r *http.Request) {
t := time.Now()
batch := new(leveldb.Batch)
for i := 0; i < 100000000; i++ {
f := FileInfo{}
f.Peers = []string{"http://192.168.0.1", "http://192.168.2.5"}
f.Path = "20190201/19/02"
s := strconv.Itoa(i)
s = this.util.MD5(s)
f.Name = s
f.Md5 = s
if data, err := json.Marshal(&f); err == nil {
batch.Put([]byte(s), data)
}
if i%10000 == 0 {
if batch.Len() > 0 {
server.ldb.Write(batch, nil)
// batch = new(leveldb.Batch)
batch.Reset()
}
fmt.Println(i, time.Since(t).Seconds())
}
//fmt.Println(server.GetFileInfoFromLevelDB(s))
}
this.util.WriteFile("time.txt", time.Since(t).String())
fmt.Println(time.Since(t).String())
}
func (this *Server) RepairStatWeb(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
date string
inner string
)
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
date = r.FormValue("date")
inner = r.FormValue("inner")
if ok, err := regexp.MatchString("\\d{8}", date); err != nil || !ok {
result.Message = "invalid date"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if date == "" || len(date) != 8 {
date = this.util.GetToDay()
}
if inner != "1" {
for _, peer := range Config().Peers {
req := httplib.Post(peer + this.getRequestURI("repair_stat"))
req.Param("inner", "1")
req.Param("date", date)
if _, err := req.String(); err != nil {
log.Error(err)
}
}
}
result.Data = this.RepairStatByDate(date)
result.Status = "ok"
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) Stat(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
inner string
echart string
category []string
barCount []int64
barSize []int64
dataMap map[string]interface{}
)
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
r.ParseForm()
inner = r.FormValue("inner")
echart = r.FormValue("echart")
data := this.GetStat()
result.Status = "ok"
result.Data = data
if echart == "1" {
dataMap = make(map[string]interface{}, 3)
for _, v := range data {
barCount = append(barCount, v.FileCount)
barSize = append(barSize, v.TotalSize)
category = append(category, v.Date)
}
dataMap["category"] = category
dataMap["barCount"] = barCount
dataMap["barSize"] = barSize
result.Data = dataMap
}
if inner == "1" {
w.Write([]byte(this.util.JsonEncodePretty(data)))
} else {
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
}
func (this *Server) GetStat() []StatDateFileInfo {
var (
min int64
max int64
err error
i int64
rows []StatDateFileInfo
total StatDateFileInfo
)
min = 20190101
max = 20190101
for k := range this.statMap.Get() {
ks := strings.Split(k, "_")
if len(ks) == 2 {
if i, err = strconv.ParseInt(ks[0], 10, 64); err != nil {
continue
}
if i >= max {
max = i
}
if i < min {
min = i
}
}
}
for i := min; i <= max; i++ {
s := fmt.Sprintf("%d", i)
if v, ok := this.statMap.GetValue(s + "_" + CONST_STAT_FILE_TOTAL_SIZE_KEY); ok {
var info StatDateFileInfo
info.Date = s
switch v.(type) {
case int64:
info.TotalSize = v.(int64)
total.TotalSize = total.TotalSize + v.(int64)
}
if v, ok := this.statMap.GetValue(s + "_" + CONST_STAT_FILE_COUNT_KEY); ok {
switch v.(type) {
case int64:
info.FileCount = v.(int64)
total.FileCount = total.FileCount + v.(int64)
}
}
rows = append(rows, info)
}
}
total.Date = "all"
rows = append(rows, total)
return rows
}
func (this *Server) RegisterExit() {
c := make(chan os.Signal)
signal.Notify(c, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
go func() {
for s := range c {
switch s {
case syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT:
this.ldb.Close()
log.Info("Exit", s)
os.Exit(1)
}
}
}()
}
func (this *Server) AppendToQueue(fileInfo *FileInfo) {
for (len(this.queueToPeers) + CONST_QUEUE_SIZE/10) > CONST_QUEUE_SIZE {
time.Sleep(time.Millisecond * 50)
}
this.queueToPeers <- *fileInfo
}
func (this *Server) AppendToDownloadQueue(fileInfo *FileInfo) {
for (len(this.queueFromPeers) + CONST_QUEUE_SIZE/10) > CONST_QUEUE_SIZE {
time.Sleep(time.Millisecond * 50)
}
this.queueFromPeers <- *fileInfo
}
func (this *Server) ConsumerDownLoad() {
ConsumerFunc := func() {
for {
fileInfo := <-this.queueFromPeers
if len(fileInfo.Peers) <= 0 {
log.Warn("Peer is null", fileInfo)
continue
}
for _, peer := range fileInfo.Peers {
if strings.Contains(peer, "127.0.0.1") {
log.Warn("sync error with 127.0.0.1", fileInfo)
continue
}
if peer != this.host {
this.DownloadFromPeer(peer, &fileInfo)
break
}
}
}
}
for i := 0; i < 200; i++ {
go ConsumerFunc()
}
}
func (this *Server) RemoveDownloading() {
RemoveDownloadFunc := func() {
for {
iter := this.ldb.NewIterator(util.BytesPrefix([]byte("downloading_")), nil)
for iter.Next() {
key := iter.Key()
keys := strings.Split(string(key), "_")
if len(keys) == 3 {
if t, err := strconv.ParseInt(keys[1], 10, 64); err == nil && time.Now().Unix()-t > 60*10 {
os.Remove(DOCKER_DIR + keys[2])
}
}
}
iter.Release()
time.Sleep(time.Minute * 3)
}
}
go RemoveDownloadFunc()
}
func (this *Server) ConsumerLog() {
go func() {
var (
fileLog *FileLog
)
for {
fileLog = <-this.queueFileLog
this.saveFileMd5Log(fileLog.FileInfo, fileLog.FileName)
}
}()
}
func (this *Server) LoadSearchDict() {
go func() {
log.Info("Load search dict ....")
f, err := os.Open(CONST_SEARCH_FILE_NAME)
if err != nil {
log.Error(err)
return
}
defer f.Close()
r := bufio.NewReader(f)
for {
line, isprefix, err := r.ReadLine()
for isprefix && err == nil {
kvs := strings.Split(string(line), "\t")
if len(kvs) == 2 {
this.searchMap.Put(kvs[0], kvs[1])
}
}
}
log.Info("finish load search dict")
}()
}
func (this *Server) SaveSearchDict() {
var (
err error
fp *os.File
searchDict map[string]interface{}
k string
v interface{}
)
this.lockMap.LockKey(CONST_SEARCH_FILE_NAME)
defer this.lockMap.UnLockKey(CONST_SEARCH_FILE_NAME)
searchDict = this.searchMap.Get()
fp, err = os.OpenFile(CONST_SEARCH_FILE_NAME, os.O_RDWR, 0755)
if err != nil {
log.Error(err)
return
}
defer fp.Close()
for k, v = range searchDict {
fp.WriteString(fmt.Sprintf("%s\t%s", k, v.(string)))
}
}
func (this *Server) ConsumerPostToPeer() {
ConsumerFunc := func() {
for {
fileInfo := <-this.queueToPeers
this.postFileToPeer(&fileInfo)
}
}
for i := 0; i < 200; i++ {
go ConsumerFunc()
}
}
func (this *Server) AutoRepair(forceRepair bool) {
if this.lockMap.IsLock("AutoRepair") {
log.Warn("Lock AutoRepair")
return
}
this.lockMap.LockKey("AutoRepair")
defer this.lockMap.UnLockKey("AutoRepair")
AutoRepairFunc := func(forceRepair bool) {
var (
dateStats []StatDateFileInfo
err error
countKey string
md5s string
localSet mapset.Set
remoteSet mapset.Set
allSet mapset.Set
tmpSet mapset.Set
fileInfo *FileInfo
)
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("AutoRepair")
log.Error(re)
log.Error(string(buffer))
}
}()
Update := func(peer string, dateStat StatDateFileInfo) {
//从远端拉数据过来
req := httplib.Get(fmt.Sprintf("%s%s?date=%s&force=%s", peer, this.getRequestURI("sync"), dateStat.Date, "1"))
req.SetTimeout(time.Second*5, time.Second*5)
if _, err = req.String(); err != nil {
log.Error(err)
}
log.Info(fmt.Sprintf("syn file from %s date %s", peer, dateStat.Date))
}
for _, peer := range Config().Peers {
req := httplib.Post(fmt.Sprintf("%s%s", peer, this.getRequestURI("stat")))
req.Param("inner", "1")
req.SetTimeout(time.Second*5, time.Second*15)
if err = req.ToJSON(&dateStats); err != nil {
log.Error(err)
continue
}
for _, dateStat := range dateStats {
if dateStat.Date == "all" {
continue
}
countKey = dateStat.Date + "_" + CONST_STAT_FILE_COUNT_KEY
if v, ok := this.statMap.GetValue(countKey); ok {
switch v.(type) {
case int64:
if v.(int64) != dateStat.FileCount || forceRepair {
//不相等,找差异
//TODO
req := httplib.Post(fmt.Sprintf("%s%s", peer, this.getRequestURI("get_md5s_by_date")))
req.SetTimeout(time.Second*15, time.Second*60)
req.Param("date", dateStat.Date)
if md5s, err = req.String(); err != nil {
continue
}
if localSet, err = this.GetMd5sByDate(dateStat.Date, CONST_FILE_Md5_FILE_NAME); err != nil {
log.Error(err)
continue
}
remoteSet = this.util.StrToMapSet(md5s, ",")
allSet = localSet.Union(remoteSet)
md5s = this.util.MapSetToStr(allSet.Difference(localSet), ",")
req = httplib.Post(fmt.Sprintf("%s%s", peer, this.getRequestURI("receive_md5s")))
req.SetTimeout(time.Second*15, time.Second*60)
req.Param("md5s", md5s)
req.String()
tmpSet = allSet.Difference(remoteSet)
for v := range tmpSet.Iter() {
if v != nil {
if fileInfo, err = this.GetFileInfoFromLevelDB(v.(string)); err != nil {
log.Error(err)
continue
}
this.AppendToQueue(fileInfo)
}
}
//Update(peer,dateStat)
}
}
} else {
Update(peer, dateStat)
}
}
}
}
AutoRepairFunc(forceRepair)
}
func (this *Server) CleanLogLevelDBByDate(date string, filename string) {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("CleanLogLevelDBByDate")
log.Error(re)
log.Error(string(buffer))
}
}()
var (
err error
keyPrefix string
keys mapset.Set
)
keys = mapset.NewSet()
keyPrefix = "%s_%s_"
keyPrefix = fmt.Sprintf(keyPrefix, date, filename)
iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
for iter.Next() {
keys.Add(string(iter.Value()))
}
iter.Release()
for key := range keys.Iter() {
err = this.RemoveKeyFromLevelDB(key.(string), this.logDB)
if err != nil {
log.Error(err)
}
}
}
func (this *Server) CleanAndBackUp() {
Clean := func() {
var (
filenames []string
yesterday string
)
if this.curDate != this.util.GetToDay() {
filenames = []string{CONST_Md5_QUEUE_FILE_NAME, CONST_Md5_ERROR_FILE_NAME, CONST_REMOME_Md5_FILE_NAME}
yesterday = this.util.GetDayFromTimeStamp(time.Now().AddDate(0, 0, -1).Unix())
for _, filename := range filenames {
this.CleanLogLevelDBByDate(yesterday, filename)
}
this.BackUpMetaDataByDate(yesterday)
this.curDate = this.util.GetToDay()
}
}
go func() {
for {
time.Sleep(time.Hour * 6)
Clean()
}
}()
}
func (this *Server) LoadFileInfoByDate(date string, filename string) (mapset.Set, error) {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("LoadFileInfoByDate")
log.Error(re)
log.Error(string(buffer))
}
}()
var (
err error
keyPrefix string
fileInfos mapset.Set
)
fileInfos = mapset.NewSet()
keyPrefix = "%s_%s_"
keyPrefix = fmt.Sprintf(keyPrefix, date, filename)
iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
for iter.Next() {
var fileInfo FileInfo
if err = json.Unmarshal(iter.Value(), &fileInfo); err != nil {
continue
}
fileInfos.Add(&fileInfo)
}
iter.Release()
return fileInfos, nil
}
func (this *Server) LoadQueueSendToPeer() {
if queue, err := this.LoadFileInfoByDate(this.util.GetToDay(), CONST_Md5_QUEUE_FILE_NAME); err != nil {
log.Error(err)
} else {
for fileInfo := range queue.Iter() {
//this.queueFromPeers <- *fileInfo.(*FileInfo)
this.AppendToDownloadQueue(fileInfo.(*FileInfo))
}
}
}
func (this *Server) CheckClusterStatus() {
check := func() {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("CheckClusterStatus")
log.Error(re)
log.Error(string(buffer))
}
}()
var (
status JsonResult
err error
subject string
body string
req *httplib.BeegoHTTPRequest
)
for _, peer := range Config().Peers {
req = httplib.Get(fmt.Sprintf("%s%s", peer, this.getRequestURI("status")))
req.SetTimeout(time.Second*5, time.Second*5)
err = req.ToJSON(&status)
if status.Status != "ok" {
for _, to := range Config().AlarmReceivers {
subject = "fastdfs server error"
if err != nil {
body = fmt.Sprintf("%s\nserver:%s\nerror:\n%s", subject, peer, err.Error())
} else {
body = fmt.Sprintf("%s\nserver:%s\n", subject, peer)
}
if err = this.SendToMail(to, subject, body, "text"); err != nil {
log.Error(err)
}
}
if Config().AlarmUrl != "" {
req = httplib.Post(Config().AlarmUrl)
req.SetTimeout(time.Second*10, time.Second*10)
req.Param("message", body)
req.Param("subject", subject)
if _, err = req.String(); err != nil {
log.Error(err)
}
}
}
}
}
go func() {
for {
time.Sleep(time.Minute * 10)
check()
}
}()
}
func (this *Server) RepairFileInfo(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
)
if !this.IsPeer(r) {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
if !Config().EnableMigrate {
w.Write([]byte("please set enable_migrate=true"))
return
}
result.Status = "ok"
result.Message = "repair job start,don't try again,very danger "
go this.RepairFileInfoFromFile()
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) Reload(w http.ResponseWriter, r *http.Request) {
var (
err error
data []byte
cfg GloablConfig
action string
cfgjson string
result JsonResult
)
result.Status = "fail"
r.ParseForm()
if !this.IsPeer(r) {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
cfgjson = r.FormValue("cfg")
action = r.FormValue("action")
_ = cfgjson
if action == "get" {
result.Data = Config()
result.Status = "ok"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if action == "set" {
if cfgjson == "" {
result.Message = "(error)parameter cfg(json) require"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if err = json.Unmarshal([]byte(cfgjson), &cfg); err != nil {
log.Error(err)
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
result.Status = "ok"
cfgjson = this.util.JsonEncodePretty(cfg)
this.util.WriteFile(CONST_CONF_FILE_NAME, cfgjson)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if action == "reload" {
if data, err = ioutil.ReadFile(CONST_CONF_FILE_NAME); err != nil {
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if err = json.Unmarshal(data, &cfg); err != nil {
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
ParseConfig(CONST_CONF_FILE_NAME)
this.initComponent(true)
result.Status = "ok"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if action == "" {
w.Write([]byte("(error)action support set(json) get reload"))
}
}
func (this *Server) RemoveEmptyDir(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
)
result.Status = "ok"
if this.IsPeer(r) {
go this.util.RemoveEmptyDir(DATA_DIR)
go this.util.RemoveEmptyDir(STORE_DIR)
result.Message = "clean job start ..,don't try again!!!"
w.Write([]byte(this.util.JsonEncodePretty(result)))
} else {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
}
func (this *Server) BackUp(w http.ResponseWriter, r *http.Request) {
var (
err error
date string
result JsonResult
inner string
url string
)
result.Status = "ok"
r.ParseForm()
date = r.FormValue("date")
inner = r.FormValue("inner")
if date == "" {
date = this.util.GetToDay()
}
if this.IsPeer(r) {
if inner != "1" {
for _, peer := range Config().Peers {
backUp := func(peer string, date string) {
url = fmt.Sprintf("%s%s", peer, this.getRequestURI("backup"))
req := httplib.Post(url)
req.Param("date", date)
req.Param("inner", "1")
req.SetTimeout(time.Second*5, time.Second*600)
if _, err = req.String(); err != nil {
log.Error(err)
}
}
go backUp(peer, date)
}
}
go this.BackUpMetaDataByDate(date)
result.Message = "back job start..."
w.Write([]byte(this.util.JsonEncodePretty(result)))
} else {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
}
// Notice: performance is poor,just for low capacity,but low memory , if you want to high performance,use searchMap for search,but memory ....
func (this *Server) Search(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
err error
kw string
count int
fileInfos []FileInfo
md5s []string
)
kw = r.FormValue("kw")
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
iter := this.ldb.NewIterator(nil, nil)
for iter.Next() {
var fileInfo FileInfo
value := iter.Value()
if err = json.Unmarshal(value, &fileInfo); err != nil {
log.Error(err)
continue
}
if strings.Contains(fileInfo.Name, kw) && !this.util.Contains(fileInfo.Md5, md5s) {
count = count + 1
fileInfos = append(fileInfos, fileInfo)
md5s = append(md5s, fileInfo.Md5)
}
if count >= 100 {
break
}
}
iter.Release()
err = iter.Error()
if err != nil {
log.Error()
}
//fileInfos=this.SearchDict(kw) // serch file from map for huge capacity
result.Status = "ok"
result.Data = fileInfos
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) SearchDict(kw string) []FileInfo {
var (
fileInfos []FileInfo
fileInfo *FileInfo
)
for dict := range this.searchMap.Iter() {
if strings.Contains(dict.Val.(string), kw) {
if fileInfo, _ = this.GetFileInfoFromLevelDB(dict.Key); fileInfo != nil {
fileInfos = append(fileInfos, *fileInfo)
}
}
}
return fileInfos
}
func (this *Server) ListDir(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
dir string
filesInfo []os.FileInfo
err error
filesResult []FileInfoResult
tmpDir string
)
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
dir = r.FormValue("dir")
//if dir == "" {
// result.Message = "dir can't null"
// w.Write([]byte(this.util.JsonEncodePretty(result)))
// return
//}
dir = strings.Replace(dir, ".", "", -1)
if tmpDir, err = os.Readlink(dir); err == nil {
dir = tmpDir
}
filesInfo, err = ioutil.ReadDir(DOCKER_DIR + STORE_DIR_NAME + "/" + dir)
if err != nil {
log.Error(err)
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
for _, f := range filesInfo {
fi := FileInfoResult{
Name: f.Name(),
Size: f.Size(),
IsDir: f.IsDir(),
ModTime: f.ModTime().Unix(),
Path: dir,
Md5: this.util.MD5(STORE_DIR_NAME + "/" + dir + "/" + f.Name()),
}
filesResult = append(filesResult, fi)
}
result.Status = "ok"
result.Data = filesResult
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
func (this *Server) VerifyGoogleCode(secret string, code string, discrepancy int64) bool {
var (
goauth *googleAuthenticator.GAuth
)
goauth = googleAuthenticator.NewGAuth()
if ok, err := goauth.VerifyCode(secret, code, discrepancy); ok {
return ok
} else {
log.Error(err)
return ok
}
}
func (this *Server) GenGoogleCode(w http.ResponseWriter, r *http.Request) {
var (
err error
result JsonResult
secret string
goauth *googleAuthenticator.GAuth
)
r.ParseForm()
goauth = googleAuthenticator.NewGAuth()
secret = r.FormValue("secret")
result.Status = "ok"
result.Message = "ok"
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if result.Data, err = goauth.GetCode(secret); err != nil {
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) GenGoogleSecret(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
)
result.Status = "ok"
result.Message = "ok"
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
GetSeed := func(length int) string {
seeds := "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"
s := ""
random.Seed(time.Now().UnixNano())
for i := 0; i < length; i++ {
s += string(seeds[random.Intn(32)])
}
return s
}
result.Data = GetSeed(16)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) Report(w http.ResponseWriter, r *http.Request) {
var (
reportFileName string
result JsonResult
html string
)
result.Status = "ok"
r.ParseForm()
if this.IsPeer(r) {
reportFileName = STATIC_DIR + "/report.html"
if this.util.IsExist(reportFileName) {
if data, err := this.util.ReadBinFile(reportFileName); err != nil {
log.Error(err)
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
} else {
html = string(data)
if Config().SupportGroupManage {
html = strings.Replace(html, "{group}", "/"+Config().Group, 1)
} else {
html = strings.Replace(html, "{group}", "", 1)
}
w.Write([]byte(html))
return
}
} else {
w.Write([]byte(fmt.Sprintf("%s is not found", reportFileName)))
}
} else {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
}
}
func (this *Server) Repair(w http.ResponseWriter, r *http.Request) {
var (
force string
forceRepair bool
result JsonResult
)
result.Status = "ok"
r.ParseForm()
force = r.FormValue("force")
if force == "1" {
forceRepair = true
}
if this.IsPeer(r) {
go this.AutoRepair(forceRepair)
result.Message = "repair job start..."
w.Write([]byte(this.util.JsonEncodePretty(result)))
} else {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
}
func (this *Server) Status(w http.ResponseWriter, r *http.Request) {
var (
status JsonResult
sts map[string]interface{}
today string
sumset mapset.Set
ok bool
v interface{}
)
memStat := new(runtime.MemStats)
runtime.ReadMemStats(memStat)
today = this.util.GetToDay()
sts = make(map[string]interface{})
sts["Fs.QueueFromPeers"] = len(this.queueFromPeers)
sts["Fs.QueueToPeers"] = len(this.queueToPeers)
sts["Fs.QueueFileLog"] = len(this.queueFileLog)
for _, k := range []string{CONST_FILE_Md5_FILE_NAME, CONST_Md5_ERROR_FILE_NAME, CONST_Md5_QUEUE_FILE_NAME} {
k2 := fmt.Sprintf("%s_%s", today, k)
if v, ok = this.sumMap.GetValue(k2); ok {
sumset = v.(mapset.Set)
if k == CONST_Md5_QUEUE_FILE_NAME {
sts["Fs.QueueSetSize"] = sumset.Cardinality()
}
if k == CONST_Md5_ERROR_FILE_NAME {
sts["Fs.ErrorSetSize"] = sumset.Cardinality()
}
if k == CONST_FILE_Md5_FILE_NAME {
sts["Fs.FileSetSize"] = sumset.Cardinality()
}
}
}
sts["Fs.AutoRepair"] = Config().AutoRepair
sts["Fs.RefreshInterval"] = Config().RefreshInterval
sts["Fs.Peers"] = Config().Peers
sts["Fs.Local"] = this.host
sts["Fs.FileStats"] = this.GetStat()
sts["Fs.ShowDir"] = Config().ShowDir
sts["Sys.NumGoroutine"] = runtime.NumGoroutine()
sts["Sys.NumCpu"] = runtime.NumCPU()
sts["Sys.Alloc"] = memStat.Alloc
sts["Sys.TotalAlloc"] = memStat.TotalAlloc
sts["Sys.HeapAlloc"] = memStat.HeapAlloc
sts["Sys.Frees"] = memStat.Frees
sts["Sys.HeapObjects"] = memStat.HeapObjects
sts["Sys.NumGC"] = memStat.NumGC
sts["Sys.GCCPUFraction"] = memStat.GCCPUFraction
sts["Sys.GCSys"] = memStat.GCSys
//sts["Sys.MemInfo"] = memStat
status.Status = "ok"
status.Data = sts
w.Write([]byte(this.util.JsonEncodePretty(status)))
}
func (this *Server) HeartBeat(w http.ResponseWriter, r *http.Request) {
}
func (this *Server) Index(w http.ResponseWriter, r *http.Request) {
var (
uploadUrl string
uploadBigUrl string
uppy string
)
uploadUrl = "/upload"
uploadBigUrl = CONST_BIG_UPLOAD_PATH_SUFFIX
if Config().EnableWebUpload {
if Config().SupportGroupManage {
uploadUrl = fmt.Sprintf("/%s/upload", Config().Group)
uploadBigUrl = fmt.Sprintf("/%s%s", Config().Group, CONST_BIG_UPLOAD_PATH_SUFFIX)
}
uppy = `<html>
<head>
<meta charset="utf-8" />
<title>go-fastdfs</title>
<style>form { bargin } .form-line { display:block;height: 30px;margin:8px; } #stdUpload {background: #fafafa;border-radius: 10px;width: 745px; }</style>
<link href="https://transloadit.edgly.net/releases/uppy/v0.30.0/dist/uppy.min.css" rel="stylesheet"></head>
<body>
<div>标准上传(强列建议使用这种方式)</div>
<div id="stdUpload">
<form action="%s" method="post" enctype="multipart/form-data">
<span class="form-line">文件(file):
<input type="file" id="file" name="file" /></span>
<span class="form-line">场景(scene):
<input type="text" id="scene" name="scene" value="%s" /></span>
<span class="form-line">输出(output):
<input type="text" id="output" name="output" value="json" /></span>
<span class="form-line">自定义路径(path):
<input type="text" id="path" name="path" value="" /></span>
<span class="form-line">google认证码(code):
<input type="text" id="code" name="code" value="" /></span>
<span class="form-line">自定义认证(auth_token):
<input type="text" id="auth_token" name="auth_token" value="" /></span>
<input type="submit" name="submit" value="upload" />
</form>
</div>
<div>断点续传(如果文件很大时可以考虑)</div>
<div>
<div id="drag-drop-area"></div>
<script src="https://transloadit.edgly.net/releases/uppy/v0.30.0/dist/uppy.min.js"></script>
<script>var uppy = Uppy.Core().use(Uppy.Dashboard, {
inline: true,
target: '#drag-drop-area'
}).use(Uppy.Tus, {
endpoint: '%s'
})
uppy.on('complete', (result) => {
// console.log(result) console.log('Upload complete! We’ve uploaded these files:', result.successful)
})
uppy.setMeta({ auth_token: '9ee60e59-cb0f-4578-aaba-29b9fc2919ca',callback_url:'http://127.0.0.1/callback' })//这里是传递上传的认证参数,callback_url参数中 id为文件的ID,info 文转的基本信息json
</script>
</div>
</body>
</html>`
uppyFileName := STATIC_DIR + "/uppy.html"
if this.util.IsExist(uppyFileName) {
if data, err := this.util.ReadBinFile(uppyFileName); err != nil {
log.Error(err)
} else {
uppy = string(data)
}
} else {
this.util.WriteFile(uppyFileName, uppy)
}
fmt.Fprintf(w,
fmt.Sprintf(uppy, uploadUrl, Config().DefaultScene, uploadBigUrl))
} else {
w.Write([]byte("web upload deny"))
}
}
func init() {
flag.Parse()
if *v {
fmt.Printf("%s\n%s\n%s\n%s\n", VERSION, BUILD_TIME, GO_VERSION, GIT_VERSION)
os.Exit(0)
}
DOCKER_DIR = os.Getenv("GO_FASTDFS_DIR")
if DOCKER_DIR != "" {
if !strings.HasSuffix(DOCKER_DIR, "/") {
DOCKER_DIR = DOCKER_DIR + "/"
}
}
STORE_DIR = DOCKER_DIR + STORE_DIR_NAME
CONF_DIR = DOCKER_DIR + CONF_DIR_NAME
DATA_DIR = DOCKER_DIR + DATA_DIR_NAME
LOG_DIR = DOCKER_DIR + LOG_DIR_NAME
STATIC_DIR = DOCKER_DIR + STATIC_DIR_NAME
LARGE_DIR_NAME = "haystack"
LARGE_DIR = STORE_DIR + "/haystack"
CONST_LEVELDB_FILE_NAME = DATA_DIR + "/fileserver.db"
CONST_LOG_LEVELDB_FILE_NAME = DATA_DIR + "/log.db"
CONST_STAT_FILE_NAME = DATA_DIR + "/stat.json"
CONST_CONF_FILE_NAME = CONF_DIR + "/cfg.json"
CONST_SEARCH_FILE_NAME = DATA_DIR + "/search.txt"
FOLDERS = []string{DATA_DIR, STORE_DIR, CONF_DIR, STATIC_DIR}
logAccessConfigStr = strings.Replace(logAccessConfigStr, "{DOCKER_DIR}", DOCKER_DIR, -1)
logConfigStr = strings.Replace(logConfigStr, "{DOCKER_DIR}", DOCKER_DIR, -1)
for _, folder := range FOLDERS {
os.MkdirAll(folder, 0775)
}
server = NewServer()
peerId := fmt.Sprintf("%d", server.util.RandInt(0, 9))
if !server.util.FileExists(CONST_CONF_FILE_NAME) {
peer := "http://" + server.util.GetPulicIP() + ":8080"
cfg := fmt.Sprintf(cfgJson, peerId, peer, peer)
server.util.WriteFile(CONST_CONF_FILE_NAME, cfg)
}
if logger, err := log.LoggerFromConfigAsBytes([]byte(logConfigStr)); err != nil {
panic(err)
} else {
log.ReplaceLogger(logger)
}
if _logacc, err := log.LoggerFromConfigAsBytes([]byte(logAccessConfigStr)); err == nil {
logacc = _logacc
log.Info("succes init log access")
} else {
log.Error(err.Error())
}
ParseConfig(CONST_CONF_FILE_NAME)
if Config().QueueSize == 0 {
Config().QueueSize = CONST_QUEUE_SIZE
}
if Config().PeerId == "" {
Config().PeerId = peerId
}
staticHandler = http.StripPrefix("/"+Config().Group+"/", http.FileServer(http.Dir(STORE_DIR)))
server.initComponent(false)
}
func (this *Server) test() {
testLock := func() {
wg := sync.WaitGroup{}
tt := func(i int, wg *sync.WaitGroup) {
//if server.lockMap.IsLock("xx") {
// return
//}
//fmt.Println("timeer len",len(server.lockMap.Get()))
//time.Sleep(time.Nanosecond*10)
server.lockMap.LockKey("xx")
defer server.lockMap.UnLockKey("xx")
//time.Sleep(time.Nanosecond*1)
//fmt.Println("xx", i)
wg.Done()
}
go func() {
for {
time.Sleep(time.Second * 1)
fmt.Println("timeer len", len(server.lockMap.Get()), server.lockMap.Get())
}
}()
fmt.Println(len(server.lockMap.Get()))
for i := 0; i < 10000; i++ {
wg.Add(1)
go tt(i, &wg)
}
fmt.Println(len(server.lockMap.Get()))
fmt.Println(len(server.lockMap.Get()))
server.lockMap.LockKey("abc")
fmt.Println("lock")
time.Sleep(time.Second * 5)
server.lockMap.UnLockKey("abc")
server.lockMap.LockKey("abc")
server.lockMap.UnLockKey("abc")
}
_ = testLock
testFile := func() {
var (
err error
f *os.File
)
f, err = os.OpenFile("tt", os.O_CREATE|os.O_RDWR, 0777)
if err != nil {
fmt.Println(err)
}
f.WriteAt([]byte("1"), 100)
f.Seek(0, 2)
f.Write([]byte("2"))
//fmt.Println(f.Seek(0, 2))
//fmt.Println(f.Seek(3, 2))
//fmt.Println(f.Seek(3, 0))
//fmt.Println(f.Seek(3, 1))
//fmt.Println(f.Seek(3, 0))
//f.Write([]byte("1"))
}
_ = testFile
//testFile()
//testLock()
}
type hookDataStore struct {
tusd.DataStore
}
type httpError struct {
error
statusCode int
}
func (err httpError) StatusCode() int {
return err.statusCode
}
func (err httpError) Body() []byte {
return []byte(err.Error())
}
func (store hookDataStore) NewUpload(info tusd.FileInfo) (id string, err error) {
var (
jsonResult JsonResult
)
if Config().AuthUrl != "" {
if auth_token, ok := info.MetaData["auth_token"]; !ok {
msg := "token auth fail,auth_token is not in http header Upload-Metadata," +
"in uppy uppy.setMeta({ auth_token: '9ee60e59-cb0f-4578-aaba-29b9fc2919ca' })"
log.Error(msg, fmt.Sprintf("current header:%v", info.MetaData))
return "", httpError{error: errors.New(msg), statusCode: 401}
} else {
req := httplib.Post(Config().AuthUrl)
req.Param("auth_token", auth_token)
req.SetTimeout(time.Second*5, time.Second*10)
content, err := req.String()
content = strings.TrimSpace(content)
if strings.HasPrefix(content, "{") && strings.HasSuffix(content, "}") {
if err = json.Unmarshal([]byte(content), &jsonResult); err != nil {
log.Error(err)
return "", httpError{error: errors.New(err.Error() + content), statusCode: 401}
}
if jsonResult.Data != "ok" {
return "", httpError{error: errors.New(content), statusCode: 401}
}
} else {
if err != nil {
log.Error(err)
return "", err
}
if strings.TrimSpace(content) != "ok" {
return "", httpError{error: errors.New(content), statusCode: 401}
}
}
}
}
return store.DataStore.NewUpload(info)
}
func (this *Server) initTus() {
var (
err error
fileLog *os.File
bigDir string
)
BIG_DIR := STORE_DIR + "/_big/" + Config().PeerId
os.MkdirAll(BIG_DIR, 0775)
os.MkdirAll(LOG_DIR, 0775)
store := filestore.FileStore{
Path: BIG_DIR,
}
if fileLog, err = os.OpenFile(LOG_DIR+"/tusd.log", os.O_CREATE|os.O_RDWR, 0666); err != nil {
log.Error(err)
panic("initTus")
}
go func() {
for {
if fi, err := fileLog.Stat(); err != nil {
log.Error(err)
} else {
if fi.Size() > 1024*1024*500 {
//500M
this.util.CopyFile(LOG_DIR+"/tusd.log", LOG_DIR+"/tusd.log.2")
fileLog.Seek(0, 0)
fileLog.Truncate(0)
fileLog.Seek(0, 2)
}
}
time.Sleep(time.Second * 30)
}
}()
l := slog.New(fileLog, "[tusd] ", slog.LstdFlags)
bigDir = CONST_BIG_UPLOAD_PATH_SUFFIX
if Config().SupportGroupManage {
bigDir = fmt.Sprintf("/%s%s", Config().Group, CONST_BIG_UPLOAD_PATH_SUFFIX)
}
composer := tusd.NewStoreComposer()
// support raw tus upload and download
store.GetReaderExt = func(id string) (io.Reader, error) {
var (
offset int64
err error
length int
buffer []byte
fi *FileInfo
)
if fi, err = this.GetFileInfoFromLevelDB(id); err != nil {
log.Error(err)
return nil, err
} else {
fp := DOCKER_DIR + fi.Path + "/" + fi.ReName
if this.util.FileExists(fp) {
log.Info(fmt.Sprintf("download:%s", fp))
return os.Open(fp)
}
ps := strings.Split(fp, ",")
if len(ps) > 2 && this.util.FileExists(ps[0]) {
if length, err = strconv.Atoi(ps[2]); err != nil {
return nil, err
}
if offset, err = strconv.ParseInt(ps[1], 10, 64); err != nil {
return nil, err
}
if buffer, err = this.util.ReadFileByOffSet(ps[0], offset, length); err != nil {
return nil, err
}
if buffer[0] == '1' {
bufferReader := bytes.NewBuffer(buffer[1:])
return bufferReader, nil
} else {
msg := "data no sync"
log.Error(msg)
return nil, errors.New(msg)
}
}
return nil, errors.New(fmt.Sprintf("%s not found", fp))
}
}
store.UseIn(composer)
SetupPreHooks := func(composer *tusd.StoreComposer) {
composer.UseCore(hookDataStore{
DataStore: composer.Core,
})
}
SetupPreHooks(composer)
handler, err := tusd.NewHandler(tusd.Config{
Logger: l,
BasePath: bigDir,
StoreComposer: composer,
NotifyCompleteUploads: true,
RespectForwardedHeaders: true,
})
notify := func(handler *tusd.Handler) {
for {
select {
case info := <-handler.CompleteUploads:
log.Info("CompleteUploads", info)
name := ""
if v, ok := info.MetaData["filename"]; ok {
name = v
}
var err error
md5sum := ""
oldFullPath := BIG_DIR + "/" + info.ID + ".bin"
infoFullPath := BIG_DIR + "/" + info.ID + ".info"
if md5sum, err = this.util.GetFileSumByName(oldFullPath, Config().FileSumArithmetic); err != nil {
log.Error(err)
continue
}
ext := path.Ext(name)
filename := md5sum + ext
timeStamp := time.Now().Unix()
fpath := time.Now().Format("/20060102/15/04/")
newFullPath := STORE_DIR + "/" + Config().DefaultScene + fpath + Config().PeerId + "/" + filename
if fi, err := this.GetFileInfoFromLevelDB(md5sum); err != nil {
log.Error(err)
} else {
if fi.Md5 != "" {
if _, err := this.SaveFileInfoToLevelDB(info.ID, fi, this.ldb); err != nil {
log.Error(err)
}
log.Info(fmt.Sprintf("file is found md5:%s", fi.Md5))
log.Info("remove file:", oldFullPath)
log.Info("remove file:", infoFullPath)
os.Remove(oldFullPath)
os.Remove(infoFullPath)
continue
}
}
fpath = STORE_DIR_NAME + "/" + Config().DefaultScene + fpath + Config().PeerId
os.MkdirAll(DOCKER_DIR+fpath, 0775)
fileInfo := &FileInfo{
Name: name,
Path: fpath,
ReName: filename,
Size: info.Size,
TimeStamp: timeStamp,
Md5: md5sum,
Peers: []string{this.host},
OffSet: -1,
}
if err = os.Rename(oldFullPath, newFullPath); err != nil {
log.Error(err)
continue
}
log.Info(fileInfo)
os.Remove(infoFullPath)
if _, err = this.SaveFileInfoToLevelDB(info.ID, fileInfo, this.ldb); err != nil {
//assosiate file id
log.Error(err)
}
this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME)
go this.postFileToPeer(fileInfo)
callBack := func(info tusd.FileInfo, fileInfo *FileInfo) {
if callback_url, ok := info.MetaData["callback_url"]; ok {
req := httplib.Post(callback_url)
req.SetTimeout(time.Second*10, time.Second*10)
req.Param("info", server.util.JsonEncodePretty(fileInfo))
req.Param("id", info.ID)
if _, err := req.String(); err != nil {
log.Error(err)
}
}
}
go callBack(info, fileInfo)
}
}
}
go notify(handler)
if err != nil {
log.Error(err)
}
http.Handle(bigDir, http.StripPrefix(bigDir, handler))
}
func (this *Server) FormatStatInfo() {
var (
data []byte
err error
count int64
stat map[string]interface{}
)
if this.util.FileExists(CONST_STAT_FILE_NAME) {
if data, err = this.util.ReadBinFile(CONST_STAT_FILE_NAME); err != nil {
log.Error(err)
} else {
if err = json.Unmarshal(data, &stat); err != nil {
log.Error(err)
} else {
for k, v := range stat {
switch v.(type) {
case float64:
vv := strings.Split(fmt.Sprintf("%f", v), ".")[0]
if count, err = strconv.ParseInt(vv, 10, 64); err != nil {
log.Error(err)
} else {
this.statMap.Put(k, count)
}
default:
this.statMap.Put(k, v)
}
}
}
}
} else {
this.RepairStatByDate(this.util.GetToDay())
}
}
func (this *Server) initComponent(isReload bool) {
var (
ip string
)
ip = this.util.GetPulicIP()
if Config().Host == "" {
if len(strings.Split(Config().Addr, ":")) == 2 {
server.host = fmt.Sprintf("http://%s:%s", ip, strings.Split(Config().Addr, ":")[1])
Config().Host = server.host
}
} else {
if strings.HasPrefix(Config().Host, "http") {
server.host = Config().Host
} else {
server.host = "http://" + Config().Host
}
}
ex, _ := regexp.Compile("\\d+\\.\\d+\\.\\d+\\.\\d+")
var peers []string
for _, peer := range Config().Peers {
if this.util.Contains(ip, ex.FindAllString(peer, -1)) ||
this.util.Contains("127.0.0.1", ex.FindAllString(peer, -1)) {
continue
}
if strings.HasPrefix(peer, "http") {
peers = append(peers, peer)
} else {
peers = append(peers, "http://"+peer)
}
}
Config().Peers = peers
if !isReload {
this.FormatStatInfo()
if Config().EnableTus {
this.initTus()
}
}
for _, s := range Config().Scenes {
kv := strings.Split(s, ":")
if len(kv) == 2 {
this.sceneMap.Put(kv[0], kv[1])
}
}
}
type HttpHandler struct {
}
func (HttpHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) {
status_code := "200"
defer func(t time.Time) {
logStr := fmt.Sprintf("[Access] %s | %v | %s | %s | %s | %s |%s",
time.Now().Format("2006/01/02 - 15:04:05"),
res.Header(),
time.Since(t).String(),
server.util.GetClientIp(req),
req.Method,
status_code,
req.RequestURI,
)
logacc.Info(logStr)
}(time.Now())
defer func() {
if err := recover(); err != nil {
status_code = "500"
res.WriteHeader(500)
print(err)
buff := debug.Stack()
log.Error(err)
log.Error(string(buff))
}
}()
if Config().EnableCrossOrigin {
server.CrossOrigin(res, req)
}
http.DefaultServeMux.ServeHTTP(res, req)
}
func (this *Server) Main() {
go func() {
for {
this.CheckFileAndSendToPeer(this.util.GetToDay(), CONST_Md5_ERROR_FILE_NAME, false)
//fmt.Println("CheckFileAndSendToPeer")
time.Sleep(time.Second * time.Duration(Config().RefreshInterval))
//this.util.RemoveEmptyDir(STORE_DIR)
}
}()
go this.CleanAndBackUp()
go this.CheckClusterStatus()
go this.LoadQueueSendToPeer()
go this.ConsumerPostToPeer()
go this.ConsumerLog()
go this.ConsumerDownLoad()
go this.RemoveDownloading()
//go this.LoadSearchDict()
if Config().EnableMigrate {
go this.RepairFileInfoFromFile()
}
if Config().AutoRepair {
go func() {
for {
time.Sleep(time.Minute * 3)
this.AutoRepair(false)
time.Sleep(time.Minute * 60)
}
}()
}
groupRoute := ""
if Config().SupportGroupManage {
groupRoute = "/" + Config().Group
}
uploadPage := "upload.html"
if groupRoute == "" {
http.HandleFunc(fmt.Sprintf("%s", "/"), this.Index)
http.HandleFunc(fmt.Sprintf("/%s", uploadPage), this.Index)
} else {
http.HandleFunc(fmt.Sprintf("%s", groupRoute), this.Index)
http.HandleFunc(fmt.Sprintf("%s/%s", groupRoute, uploadPage), this.Index)
}
http.HandleFunc(fmt.Sprintf("%s/check_files_exist", groupRoute), this.CheckFilesExist)
http.HandleFunc(fmt.Sprintf("%s/check_file_exist", groupRoute), this.CheckFileExist)
http.HandleFunc(fmt.Sprintf("%s/upload", groupRoute), this.Upload)
http.HandleFunc(fmt.Sprintf("%s/delete", groupRoute), this.RemoveFile)
http.HandleFunc(fmt.Sprintf("%s/get_file_info", groupRoute), this.GetFileInfo)
http.HandleFunc(fmt.Sprintf("%s/sync", groupRoute), this.Sync)
http.HandleFunc(fmt.Sprintf("%s/stat", groupRoute), this.Stat)
http.HandleFunc(fmt.Sprintf("%s/repair_stat", groupRoute), this.RepairStatWeb)
http.HandleFunc(fmt.Sprintf("%s/status", groupRoute), this.Status)
http.HandleFunc(fmt.Sprintf("%s/repair", groupRoute), this.Repair)
http.HandleFunc(fmt.Sprintf("%s/report", groupRoute), this.Report)
http.HandleFunc(fmt.Sprintf("%s/backup", groupRoute), this.BackUp)
http.HandleFunc(fmt.Sprintf("%s/search", groupRoute), this.Search)
http.HandleFunc(fmt.Sprintf("%s/list_dir", groupRoute), this.ListDir)
http.HandleFunc(fmt.Sprintf("%s/remove_empty_dir", groupRoute), this.RemoveEmptyDir)
http.HandleFunc(fmt.Sprintf("%s/repair_fileinfo", groupRoute), this.RepairFileInfo)
http.HandleFunc(fmt.Sprintf("%s/reload", groupRoute), this.Reload)
http.HandleFunc(fmt.Sprintf("%s/syncfile_info", groupRoute), this.SyncFileInfo)
http.HandleFunc(fmt.Sprintf("%s/get_md5s_by_date", groupRoute), this.GetMd5sForWeb)
http.HandleFunc(fmt.Sprintf("%s/receive_md5s", groupRoute), this.ReceiveMd5s)
http.HandleFunc(fmt.Sprintf("%s/gen_google_secret", groupRoute), this.GenGoogleSecret)
http.HandleFunc(fmt.Sprintf("%s/gen_google_code", groupRoute), this.GenGoogleCode)
http.HandleFunc("/"+Config().Group+"/", this.Download)
fmt.Println("Listen on " + Config().Addr)
err := http.ListenAndServe(Config().Addr, new(HttpHandler))
log.Error(err)
fmt.Println(err)
}
func main() {
server.Main()
}
|
[
"\"GO_FASTDFS_DIR\""
] |
[] |
[
"GO_FASTDFS_DIR"
] |
[]
|
["GO_FASTDFS_DIR"]
|
go
| 1 | 0 | |
crm/settings.py
|
import os
from celery.schedules import crontab
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
# Set in local_settings.py
SECRET_KEY = 'SECRET_SECRET_SECRET'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DEBUG_STATUS', True)
ALLOWED_HOSTS = ['*']
# Application definition
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login/'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'simple_pagination',
'compressor',
# 'haystack',
'common',
'accounts',
'cases',
'contacts',
'emails',
'leads',
'opportunity',
'planner',
'sorl.thumbnail',
'phonenumber_field',
'storages',
'marketing',
'tasks',
'invoices',
'events',
'teams',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'crm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates"), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'crm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'dj_crm',
'USER': 'postgres',
'PASSWORD': 'root',
'HOST': os.getenv('DB_HOST', '127.0.0.1'),
'PORT': os.getenv('DB_PORT', '5432')
}
}
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static"), ]
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, STATIC_URL)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# EMAIL_HOST = 'localhost'
# EMAIL_PORT = 25
# AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend', )
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = os.getenv('SG_USER', '')
EMAIL_HOST_PASSWORD = os.getenv('SG_PWD', '')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
AUTH_USER_MODEL = 'common.User'
STORAGE_TYPE = os.getenv('STORAGE_TYPE', 'normal')
if STORAGE_TYPE == 'normal':
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (BASE_DIR + '/static',)
COMPRESS_ROOT = BASE_DIR + '/static/'
elif STORAGE_TYPE == 's3-storage':
AWS_STORAGE_BUCKET_NAME = AWS_BUCKET_NAME = os.getenv('AWSBUCKETNAME', '')
AM_ACCESS_KEY = AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID', '')
AM_PASS_KEY = AWS_SECRET_ACCESS_KEY = os.getenv(
'AWS_SECRET_ACCESS_KEY', '')
S3_DOMAIN = AWS_S3_CUSTOM_DOMAIN = str(
AWS_BUCKET_NAME) + '.s3.amazonaws.com'
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=86400',
}
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
DEFAULT_S3_PATH = "media"
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATIC_S3_PATH = "static"
COMPRESS_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter', 'compressor.filters.cssmin.CSSMinFilter']
COMPRESS_JS_FILTERS = ['compressor.filters.jsmin.JSMinFilter']
COMPRESS_REBUILD_TIMEOUT = 5400
MEDIA_ROOT = '/%s/' % DEFAULT_S3_PATH
MEDIA_URL = '//%s/%s/' % (S3_DOMAIN, DEFAULT_S3_PATH)
STATIC_ROOT = "/%s/" % STATIC_S3_PATH
STATIC_URL = 'https://%s/' % (S3_DOMAIN)
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
CORS_ORIGIN_ALLOW_ALL = True
AWS_IS_GZIPPED = True
AWS_ENABLED = True
AWS_S3_SECURE_URLS = True
COMPRESS_ROOT = BASE_DIR + '/static/'
COMPRESS_ENABLED = True
COMPRESS_OFFLINE_CONTEXT = {
'STATIC_URL': 'STATIC_URL',
}
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter', 'compressor.filters.cssmin.CSSMinFilter']
COMPRESS_REBUILD_TIMEOUT = 5400
COMPRESS_OUTPUT_DIR = 'CACHE'
COMPRESS_URL = STATIC_URL
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
('text/x-sass', 'sass {infile} {outfile}'),
('text/x-scss', 'sass {infile} {outfile}'),
)
COMPRESS_OFFLINE_CONTEXT = {
'STATIC_URL': 'STATIC_URL',
}
DEFAULT_FROM_EMAIL = '[email protected]'
# celery Tasks
CELERY_BROKER_URL = 'redis://localhost:6379'
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_BEAT_SCHEDULE = {
"runs-campaign-for-every-thiry-minutes": {
"task": "marketing.tasks.run_all_campaigns",
"schedule": crontab(minute=30, hour='*')
},
"runs-campaign-for-every-five-minutes": {
"task": "marketing.tasks.list_all_bounces_unsubscribes",
"schedule": crontab(minute='*/5')
},
"runs-scheduled-campaigns-for-every-one-hour": {
"task": "marketing.tasks.send_scheduled_campaigns",
"schedule": crontab(hour='*/1')
},
"runs-scheduled-emails-for-accounts-every-one-minute": {
"task": "accounts.tasks.send_scheduled_emails",
"schedule": crontab(minute='*/1')
}
}
MAIL_SENDER = 'AMAZON'
INACTIVE_MAIL_SENDER = 'MANDRILL'
AM_ACCESS_KEY = os.getenv('AM_ACCESS_KEY', '')
AM_PASS_KEY = os.getenv('AM_PASS_KEY', '')
AWS_REGION = os.getenv('AWS_REGION', '')
MGUN_API_URL = os.getenv('MGUN_API_URL', '')
MGUN_API_KEY = os.getenv('MGUN_API_KEY', '')
SG_USER = os.getenv('SG_USER', '')
SG_PWD = os.getenv('SG_PWD', '')
MANDRILL_API_KEY = os.getenv('MANDRILL_API_KEY', '')
ADMIN_EMAIL = "[email protected]"
URL_FOR_LINKS = "http://demo.django-crm.io"
try:
from .dev_settings import *
except ImportError:
pass
GP_CLIENT_ID = os.getenv('GP_CLIENT_ID', False)
GP_CLIENT_SECRET = os.getenv('GP_CLIENT_SECRET', False)
ENABLE_GOOGLE_LOGIN = os.getenv('ENABLE_GOOGLE_LOGIN', False)
MARKETING_REPLY_EMAIL = '[email protected]'
PASSWORD_RESET_TIMEOUT_DAYS = 3
SENTRY_ENABLED = os.getenv('SENTRY_ENABLED', False)
if SENTRY_ENABLED and not DEBUG:
if os.getenv('SENTRYDSN') is not None:
RAVEN_CONFIG = {
'dsn': os.getenv('SENTRYDSN', ''),
}
INSTALLED_APPS = INSTALLED_APPS + [
'raven.contrib.django.raven_compat',
]
MIDDLEWARE = [
'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
'raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware',
] + MIDDLEWARE
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
},
}
# HAYSTACK_CONNECTIONS = {
# 'default': {
# # 'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
# 'ENGINE': 'marketing.search_backends.CustomElasticsearchSearchEngine',
# 'URL': 'http://127.0.0.1:9200/',
# 'INDEX_NAME': 'haystack',
# },
# }
# HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
# HAYSTACK_SEARCH_RESULTS_PER_PAGE = 10
# ELASTICSEARCH_INDEX_SETTINGS = {
# "settings": {
# "analysis": {
# "analyzer": {
# "ngram_analyzer": {
# "type": "custom",
# "tokenizer": "custom_ngram_tokenizer",
# "filter": ["asciifolding", "lowercase"]
# },
# "edgengram_analyzer": {
# "type": "custom",
# "tokenizer": "custom_edgengram_tokenizer",
# "filter": ["asciifolding", "lowercase"]
# }
# },
# "tokenizer": {
# "custom_ngram_tokenizer": {
# "type": "nGram",
# "min_gram": 3,
# "max_gram": 12,
# "token_chars": ["letter", "digit"]
# },
# "custom_edgengram_tokenizer": {
# "type": "edgeNGram",
# "min_gram": 2,
# "max_gram": 12,
# "token_chars": ["letter", "digit"]
# }
# }
# }
# }
# }
# HAYSTACK_DEFAULT_OPERATOR = 'AND'
# Load the local settings file if it exists
if os.path.isfile('crm/local_settings.py'):
from .local_settings import *
else:
print("No local settings file found")
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
PASSWORD_RESET_MAIL_FROM_USER = os.getenv(
'PASSWORD_RESET_MAIL_FROM_USER', '[email protected]')
|
[] |
[] |
[
"SENTRYDSN",
"DB_HOST",
"DB_PORT",
"GP_CLIENT_ID",
"MANDRILL_API_KEY",
"SG_USER",
"ENABLE_GOOGLE_LOGIN",
"AWS_REGION",
"AM_PASS_KEY",
"MGUN_API_KEY",
"STORAGE_TYPE",
"AWSBUCKETNAME",
"GP_CLIENT_SECRET",
"SENTRY_ENABLED",
"DEBUG_STATUS",
"PASSWORD_RESET_MAIL_FROM_USER",
"MGUN_API_URL",
"AWS_SECRET_ACCESS_KEY",
"SG_PWD",
"AWS_ACCESS_KEY_ID",
"AM_ACCESS_KEY"
] |
[]
|
["SENTRYDSN", "DB_HOST", "DB_PORT", "GP_CLIENT_ID", "MANDRILL_API_KEY", "SG_USER", "ENABLE_GOOGLE_LOGIN", "AWS_REGION", "AM_PASS_KEY", "MGUN_API_KEY", "STORAGE_TYPE", "AWSBUCKETNAME", "GP_CLIENT_SECRET", "SENTRY_ENABLED", "DEBUG_STATUS", "PASSWORD_RESET_MAIL_FROM_USER", "MGUN_API_URL", "AWS_SECRET_ACCESS_KEY", "SG_PWD", "AWS_ACCESS_KEY_ID", "AM_ACCESS_KEY"]
|
python
| 21 | 0 | |
internal/pkg/runtime/engines/singularity/process_linux.go
|
// Copyright (c) 2018-2019, Sylabs Inc. All rights reserved.
// This software is licensed under a 3-clause BSD license. Please consult the
// LICENSE.md file distributed with the sources of this project regarding your
// rights to use or distribute this software.
package singularity
import (
"encoding/json"
"fmt"
"net"
"os"
"os/exec"
"os/signal"
"reflect"
"strings"
"syscall"
"unsafe"
"github.com/sylabs/singularity/internal/pkg/security"
"github.com/sylabs/singularity/internal/pkg/util/user"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sylabs/singularity/internal/pkg/instance"
"github.com/sylabs/singularity/internal/pkg/sylog"
"golang.org/x/crypto/ssh/terminal"
)
func (engine *EngineOperations) checkExec() error {
shell := engine.EngineConfig.GetShell()
if shell == "" {
shell = "/bin/sh"
}
args := engine.EngineConfig.OciConfig.Process.Args
env := engine.EngineConfig.OciConfig.Process.Env
// match old behavior of searching path
oldpath := os.Getenv("PATH")
defer func() {
os.Setenv("PATH", oldpath)
engine.EngineConfig.OciConfig.Process.Args = args
engine.EngineConfig.OciConfig.Process.Env = env
}()
for _, keyval := range env {
if strings.HasPrefix(keyval, "PATH=") {
os.Setenv("PATH", keyval[5:])
break
}
}
// If args[0] is an absolute path, exec.LookPath() looks for
// this file directly instead of within PATH
if _, err := exec.LookPath(args[0]); err == nil {
return nil
}
// If args[0] isn't executable (either via PATH or absolute path),
// look for alternative approaches to handling it
switch args[0] {
case "/.singularity.d/actions/exec":
if p, err := exec.LookPath("/.exec"); err == nil {
args[0] = p
return nil
}
if p, err := exec.LookPath(args[1]); err == nil {
sylog.Warningf("container does not have %s, calling %s directly", args[0], args[1])
args[1] = p
args = args[1:]
return nil
}
return fmt.Errorf("no executable %s found", args[1])
case "/.singularity.d/actions/shell":
if p, err := exec.LookPath("/.shell"); err == nil {
args[0] = p
return nil
}
if p, err := exec.LookPath(shell); err == nil {
sylog.Warningf("container does not have %s, calling %s directly", args[0], shell)
args[0] = p
return nil
}
return fmt.Errorf("no %s found inside container", shell)
case "/.singularity.d/actions/run":
if p, err := exec.LookPath("/.run"); err == nil {
args[0] = p
return nil
}
if p, err := exec.LookPath("/singularity"); err == nil {
args[0] = p
return nil
}
return fmt.Errorf("no run driver found inside container")
case "/.singularity.d/actions/start":
if _, err := exec.LookPath(shell); err != nil {
return fmt.Errorf("no %s found inside container, can't run instance", shell)
}
args = []string{shell, "-c", `echo "instance start script not found"`}
return nil
case "/.singularity.d/actions/test":
if p, err := exec.LookPath("/.test"); err == nil {
args[0] = p
return nil
}
return fmt.Errorf("no test driver found inside container")
}
return fmt.Errorf("no %s found inside container", args[0])
}
// StartProcess starts the process
func (engine *EngineOperations) StartProcess(masterConn net.Conn) error {
isInstance := engine.EngineConfig.GetInstance()
bootInstance := isInstance && engine.EngineConfig.GetBootInstance()
shimProcess := false
if err := os.Chdir(engine.EngineConfig.OciConfig.Process.Cwd); err != nil {
if err := os.Chdir(engine.EngineConfig.GetHomeDest()); err != nil {
os.Chdir("/")
}
}
if err := engine.checkExec(); err != nil {
return err
}
if engine.EngineConfig.File.MountDev == "minimal" || engine.EngineConfig.GetContain() {
// If on a terminal, reopen /dev/console so /proc/self/fd/[0-2
// will point to /dev/console. This is needed so that tty and
// ttyname() on el6 will return the correct answer. Newer
// ttyname() functions might work because they will search
// /dev if the value of /proc/self/fd/X doesn't exist, but
// they won't work if another /dev/pts/X is allocated in its
// place. Also, programs that don't use ttyname() and instead
// directly do readlink() on /proc/self/fd/X need this.
for fd := 0; fd <= 2; fd++ {
if !terminal.IsTerminal(fd) {
continue
}
consfile, err := os.OpenFile("/dev/console", os.O_RDWR, 0600)
if err != nil {
sylog.Debugf("Could not open minimal /dev/console, skipping replacing tty descriptors")
break
}
sylog.Debugf("Replacing tty descriptors with /dev/console")
consfd := int(consfile.Fd())
for ; fd <= 2; fd++ {
if !terminal.IsTerminal(fd) {
continue
}
syscall.Close(fd)
syscall.Dup3(consfd, fd, 0)
}
consfile.Close()
break
}
}
args := engine.EngineConfig.OciConfig.Process.Args
env := engine.EngineConfig.OciConfig.Process.Env
if engine.EngineConfig.OciConfig.Linux != nil {
namespaces := engine.EngineConfig.OciConfig.Linux.Namespaces
for _, ns := range namespaces {
if ns.Type == specs.PIDNamespace {
if !engine.EngineConfig.GetNoInit() {
shimProcess = true
}
break
}
}
}
for _, img := range engine.EngineConfig.GetImageList() {
if err := syscall.Close(int(img.Fd)); err != nil {
return fmt.Errorf("failed to close file descriptor for %s", img.Path)
}
}
for _, fd := range engine.EngineConfig.GetOpenFd() {
if err := syscall.Close(fd); err != nil {
return fmt.Errorf("aborting failed to close file descriptor: %s", err)
}
}
if err := security.Configure(&engine.EngineConfig.OciConfig.Spec); err != nil {
return fmt.Errorf("failed to apply security configuration: %s", err)
}
if (!isInstance && !shimProcess) || bootInstance || engine.EngineConfig.GetInstanceJoin() {
err := syscall.Exec(args[0], args, env)
return fmt.Errorf("exec %s failed: %s", args[0], err)
}
// Spawn and wait container process, signal handler
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
cmd.Env = env
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: isInstance,
}
errChan := make(chan error, 1)
statusChan := make(chan syscall.WaitStatus, 1)
signals := make(chan os.Signal, 1)
if err := cmd.Start(); err != nil {
return fmt.Errorf("exec %s failed: %s", args[0], err)
}
go func() {
errChan <- cmd.Wait()
}()
// Modify argv argument and program name shown in /proc/self/comm
name := "sinit"
argv0str := (*reflect.StringHeader)(unsafe.Pointer(&os.Args[0]))
argv0 := (*[1 << 30]byte)(unsafe.Pointer(argv0str.Data))[:argv0str.Len]
progname := make([]byte, argv0str.Len)
if len(name) > argv0str.Len {
return fmt.Errorf("program name too short")
}
copy(progname, name)
copy(argv0, progname)
ptr := unsafe.Pointer(&progname[0])
if _, _, err := syscall.Syscall(syscall.SYS_PRCTL, syscall.PR_SET_NAME, uintptr(ptr), 0); err != 0 {
return syscall.Errno(err)
}
// Manage all signals
signal.Notify(signals)
masterConn.Close()
for {
select {
case s := <-signals:
sylog.Debugf("Received signal %s", s.String())
switch s {
case syscall.SIGCHLD:
for {
var status syscall.WaitStatus
wpid, err := syscall.Wait4(-1, &status, syscall.WNOHANG, nil)
if wpid <= 0 || err != nil {
break
}
if wpid == cmd.Process.Pid && err == nil {
statusChan <- status
}
}
default:
signal := s.(syscall.Signal)
if isInstance {
if err := syscall.Kill(-cmd.Process.Pid, signal); err == syscall.ESRCH {
sylog.Debugf("No child process, exiting ...")
os.Exit(128 + int(signal))
}
}
}
case err := <-errChan:
if e, ok := err.(*exec.ExitError); ok {
status, ok := e.Sys().(syscall.WaitStatus)
if !ok {
return fmt.Errorf("command exit with error: %s", err)
}
statusChan <- status
} else if e, ok := err.(*os.SyscallError); ok {
// handle possible race with Wait4 call above by ignoring ECHILD
// error because child process was already catched
if e.Err.(syscall.Errno) != syscall.ECHILD {
sylog.Fatalf("error while waiting container process: %s", e.Error())
}
}
if !isInstance {
if len(statusChan) > 0 {
status := <-statusChan
if status.Signaled() {
os.Exit(128 + int(status.Signal()))
}
os.Exit(status.ExitStatus())
} else if err == nil {
os.Exit(0)
}
sylog.Fatalf("command exited with unknown error: %s", err)
}
}
}
}
// PostStartProcess will execute code in master context after execution of container
// process, typically to write instance state/config files or execute post start OCI hook
func (engine *EngineOperations) PostStartProcess(pid int) error {
sylog.Debugf("Post start process")
if engine.EngineConfig.GetInstance() {
uid := os.Getuid()
name := engine.CommonConfig.ContainerID
if err := os.Chdir("/"); err != nil {
return fmt.Errorf("failed to change directory to /: %s", err)
}
file, err := instance.Add(name, instance.SingSubDir)
if err != nil {
return err
}
file.Config, err = json.Marshal(engine.CommonConfig)
if err != nil {
return err
}
pw, err := user.GetPwUID(uint32(uid))
if err != nil {
return err
}
file.User = pw.Name
file.Pid = pid
file.PPid = os.Getpid()
file.Image = engine.EngineConfig.GetImage()
for _, ns := range engine.EngineConfig.OciConfig.Linux.Namespaces {
if ns.Type == specs.UserNamespace {
file.UserNs = true
break
}
}
return file.Update()
}
return nil
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
069-social-network-built-in-go-and-gopherjs/gopherface.go
|
package main
import (
"log"
"os"
"github.com/aditya43/golang/69-social-network-built-in-go-and-gopherjs/common"
"github.com/aditya43/golang/69-social-network-built-in-go-and-gopherjs/common/datastore"
"github.com/aditya43/golang/69-social-network-built-in-go-and-gopherjs/endpoints"
"github.com/aditya43/golang/69-social-network-built-in-go-and-gopherjs/handlers"
"github.com/aditya43/golang/69-social-network-built-in-go-and-gopherjs/middleware"
"go.isomorphicgo.org/go/isokit"
"net/http"
ghandlers "github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/justinas/alice"
)
const (
// WEBSERVERPORT = ":8443"
WEBSERVERPORT = ":8080"
)
var WebAppRoot = os.Getenv("GOPHERFACE_APP_ROOT")
func main() {
db, err := datastore.NewDatastore(datastore.MYSQL, "gopherface:gopherface@tcp(database:3306)/gopherfacedb")
if err != nil {
log.Print(err)
}
defer db.Close()
env := common.Env{}
isokit.TemplateFilesPath = WebAppRoot + "/templates"
isokit.TemplateFileExtension = ".html"
ts := isokit.NewTemplateSet()
ts.GatherTemplates()
env.TemplateSet = ts
env.DB = db
r := mux.NewRouter()
r.HandleFunc("/", handlers.HomeHandler)
r.Handle("/signup", handlers.SignUpHandler(&env)).Methods("GET", "POST")
r.Handle("/login", handlers.LoginHandler(&env)).Methods("GET", "POST")
r.HandleFunc("/logout", handlers.LogoutHandler).Methods("GET", "POST")
r.Handle("/feed", middleware.GatedContentHandler(handlers.FeedHandler(&env))).Methods("GET")
r.Handle("/friends", middleware.GatedContentHandler(handlers.FriendsHandler(&env))).Methods("GET")
r.Handle("/myprofile", middleware.GatedContentHandler(handlers.MyProfileHandler(&env))).Methods("GET")
r.Handle("/profile/{username}", middleware.GatedContentHandler(handlers.ProfileHandler(&env))).Methods("GET")
// Register REST API Endpoints
r.Handle("/restapi/get-user-profile", middleware.GatedContentHandler(endpoints.GetUserProfileEndpoint(&env))).Methods("GET", "POST")
r.Handle("/restapi/save-user-profile", middleware.GatedContentHandler(endpoints.SaveUserProfileEndpoint(&env))).Methods("POST")
r.Handle("/restapi/save-user-profile-image", middleware.GatedContentHandler(endpoints.SaveUserProfileImageEndpoint(&env))).Methods("POST")
r.Handle("/restapi/find-gophers", middleware.GatedContentHandler(endpoints.FindGophersEndpoint(&env))).Methods("GET", "POST")
r.Handle("/restapi/follow-gopher", middleware.GatedContentHandler(endpoints.FollowGopherEndpoint(&env))).Methods("GET", "POST")
r.Handle("/restapi/unfollow-gopher", middleware.GatedContentHandler(endpoints.UnfollowGopherEndpoint(&env))).Methods("GET", "POST")
r.Handle("/restapi/get-friends-list", middleware.GatedContentHandler(endpoints.FriendsListEndpoint(&env))).Methods("GET", "POST")
r.Handle("/restapi/save-post", middleware.GatedContentHandler(endpoints.SavePostEndpoint(&env))).Methods("GET", "POST")
r.Handle("/restapi/fetch-posts", middleware.GatedContentHandler(endpoints.FetchPostsEndpoint(&env))).Methods("GET", "POST")
r.Handle("/restapi/get-gopher-profile", middleware.GatedContentHandler(endpoints.GetGopherProfileEndpoint(&env))).Methods("GET", "POST")
r.Handle("/js/client.js", isokit.GopherjsScriptHandler(WebAppRoot))
r.Handle("/js/client.js.map", isokit.GopherjsScriptMapHandler(WebAppRoot))
r.Handle("/template-bundle", handlers.TemplateBundleHandler(&env))
r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(WebAppRoot+"/static"))))
loggedRouter := ghandlers.LoggingHandler(os.Stdout, r)
stdChain := alice.New(middleware.PanicRecoveryHandler)
http.Handle("/", stdChain.Then(loggedRouter))
// err = http.ListenAndServeTLS(WEBSERVERPORT, WebAppRoot+"/certs/gopherfacecert.pem", WebAppRoot+"/certs/gopherfacekey.pem", nil)
// http.Handle("/", r)
err = http.ListenAndServe(WEBSERVERPORT, nil)
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
|
[
"\"GOPHERFACE_APP_ROOT\""
] |
[] |
[
"GOPHERFACE_APP_ROOT"
] |
[]
|
["GOPHERFACE_APP_ROOT"]
|
go
| 1 | 0 | |
zk/test_util.go
|
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zk
import (
"bufio"
"errors"
"log"
"os"
"os/exec"
"path"
"sync"
"time"
"github.com/samuel/go-zookeeper/zk"
"github.com/stretchr/testify/suite"
"github.com/uber-go/tally"
"go.uber.org/zap"
)
// EmbeddedZkServer is the connect string for embedded ZK server
const EmbeddedZkServer = "localhost:2181"
var mu = sync.Mutex{}
// BaseZkTestSuite provides utility to test Zookeeper functions without Helix admin
type BaseZkTestSuite struct {
suite.Suite
EmbeddedZkPath string
ZkConnectString string
}
// SetupSuite ensures ZK server is up
func (s *BaseZkTestSuite) SetupSuite() {
s.ZkConnectString = EmbeddedZkServer
if s.EmbeddedZkPath == "" {
s.EmbeddedZkPath = path.Join(os.Getenv("APP_ROOT"), "zk/embedded")
}
err := EnsureZookeeperUp(s.EmbeddedZkPath)
s.NoError(err)
}
// CreateAndConnectClient creates ZK client and connects to ZK server
func (s *BaseZkTestSuite) CreateAndConnectClient() *Client {
zkClient := NewClient(
zap.NewNop(), tally.NoopScope, WithZkSvr(s.ZkConnectString), WithSessionTimeout(time.Second))
err := zkClient.Connect()
s.NoError(err)
return zkClient
}
// EnsureZookeeperUp starts the embedded (test) Zookeeper if not running.
func EnsureZookeeperUp(scriptRelativeDirPath string) error {
mu.Lock()
defer mu.Unlock()
if isEmbeddedZookeeperStarted(3 * time.Second) {
return nil
}
err := startEmbeddedZookeeper(scriptRelativeDirPath)
if err != nil {
log.Println("Unable to start Zookeeper server: ", err)
return err
}
log.Println("Zookeeper server is up.")
return nil
}
// StopZookeeper stops the embedded (test) Zookeeper if running.
func StopZookeeper(scriptRelativeDirPath string) error {
mu.Lock()
defer mu.Unlock()
_, _, err := zk.Connect([]string{EmbeddedZkServer}, time.Second)
if err != nil {
return nil
}
err = stopEmbeddedZookeeper(scriptRelativeDirPath)
if err != nil {
log.Println("Unable to stop Zookeeper server: ", err)
return err
}
log.Println("Zookeeper server is stopped.")
return nil
}
func isEmbeddedZookeeperStarted(timeout time.Duration) bool {
zkConn, _, err := zk.Connect([]string{EmbeddedZkServer}, time.Second)
if err == nil && zkConn != nil {
defer zkConn.Close()
done := time.After(timeout)
loop:
for { // zk.Connect is async
if zkConn.State() == zk.StateHasSession {
return true
}
select {
case <-done:
break loop
default:
}
time.Sleep(100 * time.Millisecond)
}
}
log.Printf("Unable to connect Zookeeper %s: %v\n", EmbeddedZkServer, err)
return false
}
func startEmbeddedZookeeper(scriptRelativeDirPath string) error {
err := runCmd(scriptRelativeDirPath, "start.sh")
if err != nil {
return err
} else if !isEmbeddedZookeeperStarted(5 * time.Second) {
return errors.New("embedded zk is not started")
}
return nil
}
func stopEmbeddedZookeeper(scriptRelativeDirPath string) error {
return runCmd(scriptRelativeDirPath, "stop.sh")
}
func runCmd(scriptRelativeDirPath, scriptFileName string) error {
cmd := exec.Cmd{
Path: "/bin/bash",
Args: []string{"/bin/bash", scriptFileName},
Dir: scriptRelativeDirPath,
}
cmdReader, err := cmd.StdoutPipe()
if err != nil {
log.Println("Error creating StdoutPipe: ", err)
return err
}
err = cmd.Start()
if err != nil {
return err
}
in := bufio.NewScanner(cmdReader)
go func() {
for in.Scan() {
log.Println(in.Text())
}
}()
time.Sleep(5 * time.Second) // wait some time as cmd.Start doesn't wait
return nil
}
|
[
"\"APP_ROOT\""
] |
[] |
[
"APP_ROOT"
] |
[]
|
["APP_ROOT"]
|
go
| 1 | 0 | |
credentials/credentials.go
|
package credentials
import (
"conn-script/types"
"encoding/json"
"github.com/joho/godotenv"
"github.com/urfave/cli/v2"
"io/ioutil"
"log"
"os"
"path/filepath"
)
func GetCredentials(hostname string) (*types.Credential, error) {
var credentials types.Hostname
filename, err := GetCredentialsFile()
if err != nil {
log.Fatal(err)
}
jsonFile, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
defer jsonFile.Close()
byteValue, _ := ioutil.ReadAll(jsonFile)
err = json.Unmarshal(byteValue, &credentials.Credentials)
if err != nil {
log.Fatal(err)
}
res := credentials.Credentials[hostname]
return res, nil
}
func GetCredentialsTim(hostname string) (*types.CredentialTim, error) {
var credentials types.HostnameTim
filename, err := GetCredentialsFile()
if err != nil {
log.Fatal(err)
}
jsonFile, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
defer jsonFile.Close()
byteValue, _ := ioutil.ReadAll(jsonFile)
err = json.Unmarshal(byteValue, &credentials.Credentials)
if err != nil {
log.Fatal(err)
}
res := credentials.Credentials[hostname]
return res, nil
}
func ChangeCredentials(credentials types.Credential, name string) error {
var hostname map[string]types.Credential
filename, err := GetCredentialsFile()
if err != nil {
log.Fatal(err)
}
jsonFile, _ := os.Open(filename)
file, _ := ioutil.ReadAll(jsonFile)
errMarshal := json.Unmarshal(file, &hostname)
if errMarshal != nil {
log.Fatal(errMarshal)
}
credentialsChecked, err := CheckEmptyField(credentials, name)
if err != nil {
log.Fatal(err)
}
hostname[name] = credentialsChecked
jsonString, err := json.MarshalIndent(hostname, "", " ")
if err != nil {
log.Fatal(err)
}
err = ioutil.WriteFile("credentials.json", jsonString, 0644)
if err != nil {
log.Fatal(err)
}
return nil
}
func CreateCredentialVar(c *cli.Context) types.Credential {
var credentials = types.Credential{
User: c.String("u"),
Password: c.String("p"),
Description: c.String("d"),
EnvType: c.String("e"),
}
return credentials
}
func CheckEmptyField(credentials types.Credential, name string) (types.Credential, error) {
var hostname map[string]types.Credential
filename, err := GetCredentialsFile()
if err != nil {
log.Fatal(err)
}
jsonFile, _ := os.Open(filename)
file, _ := ioutil.ReadAll(jsonFile)
errMarshal := json.Unmarshal(file, &hostname)
if errMarshal != nil {
log.Fatal(errMarshal)
}
if credentials.EnvType == "" {
credentials.EnvType = hostname[name].EnvType
}
if credentials.Description == "" {
credentials.Description = hostname[name].Description
}
if credentials.User == "" {
credentials.User = hostname[name].User
}
if credentials.Password == "" {
credentials.Password = hostname[name].Password
}
return credentials, nil
}
func CheckBalabit(bt string) string {
var res string
err := godotenv.Load(".env")
if err != nil {
log.Fatal(err)
}
if bt == "1" {
res = os.Getenv("BT1")
} else if bt == "2" {
res = os.Getenv("BT2")
} else if bt == "3" {
res = os.Getenv("BT3")
} else if bt == "3_vpn" {
res = os.Getenv("BT3_VPN")
} else if bt == "" {
res = os.Getenv("BT2")
}
return res
}
func GetCredentialsFile() (string, error) {
var path, err = filepath.Abs("credentials.json")
if err != nil {
log.Fatal(err)
}
return path, nil
}
|
[
"\"BT1\"",
"\"BT2\"",
"\"BT3\"",
"\"BT3_VPN\"",
"\"BT2\""
] |
[] |
[
"BT3_VPN",
"BT2",
"BT1",
"BT3"
] |
[]
|
["BT3_VPN", "BT2", "BT1", "BT3"]
|
go
| 4 | 0 | |
tests/conftest.py
|
import aiohttp
import pytest
import os
from fpl import FPL
from fpl.models import Fixture, H2HLeague, User, ClassicLeague, Team, Gameweek
from tests.test_classic_league import classic_league_data
from tests.test_fixture import fixture_data
from tests.test_h2h_league import h2h_league_data
from tests.test_team import team_data
from tests.test_user import user_data
from tests.test_gameweek import gameweek_data
try:
from.temp_env_var import TEMP_ENV_VARS, ENV_VARS_TO_SUSPEND
except ImportError:
TEMP_ENV_VARS = {}
ENV_VARS_TO_SUSPEND = []
@pytest.fixture(scope="session", autouse=True)
def tests_setup_and_teardown():
# Will be executed before the first test
old_environ = dict(os.environ)
os.environ.update(TEMP_ENV_VARS)
for env_var in ENV_VARS_TO_SUSPEND:
os.environ.pop(env_var, default=None)
yield
# Will be executed after the last test
os.environ.clear()
os.environ.update(old_environ)
@pytest.fixture()
async def fpl():
session = aiohttp.ClientSession()
fpl = FPL(session)
yield fpl
await session.close()
@pytest.fixture()
async def classic_league():
session = aiohttp.ClientSession()
yield ClassicLeague(classic_league_data, session)
await session.close()
@pytest.fixture()
async def gameweek():
return Gameweek(gameweek_data)
@pytest.fixture()
async def player(fpl):
yield await fpl.get_player(345, include_summary=True)
@pytest.fixture()
async def settings(fpl):
yield await fpl.game_settings()
@pytest.fixture()
async def team():
session = aiohttp.ClientSession()
yield Team(team_data, session)
await session.close()
@pytest.fixture()
def fixture():
return Fixture(fixture_data)
@pytest.fixture()
async def h2h_league():
session = aiohttp.ClientSession()
yield H2HLeague(h2h_league_data, session)
await session.close()
@pytest.fixture()
async def user():
session = aiohttp.ClientSession()
yield User(user_data, session)
await session.close()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
lambda/main.go
|
package main
import (
"database/sql"
"log"
"os"
"entgo.io/ent/dialect"
entsql "entgo.io/ent/dialect/sql"
"github.com/aws/aws-lambda-go/lambda"
_ "github.com/jackc/pgx/v4/stdlib"
"entgo-aws-appsync/ent"
"entgo-aws-appsync/internal/handler"
)
func main() {
// open the daatabase connection using the pgx driver
db, err := sql.Open("pgx", os.Getenv("DATABASE_URL"))
if err != nil {
log.Fatalf("failed opening database connection: %v", err)
}
// initiate the ent database client for the Postgres database
client := ent.NewClient(ent.Driver(entsql.OpenDB(dialect.Postgres, db)))
defer client.Close()
// register our event handler to lissten on Lambda events
lambda.Start(handler.New(client).Handle)
}
|
[
"\"DATABASE_URL\""
] |
[] |
[
"DATABASE_URL"
] |
[]
|
["DATABASE_URL"]
|
go
| 1 | 0 | |
ingest/ingest_pipeline.py
|
"""Ingest Pipeline for ingesting expression, metadata and cluster
files into MongoDB.
DESCRIPTION
This CLI extracts and transforms different file types then writes them into
a remote MongoDB instance.
PREREQUISITES
See https://github.com/broadinstitute/scp-ingest-pipeline#prerequisites
DEVELOPER SETUP (see README.md#Install and ../scripts/setup_mongo_dev.sh)
EXAMPLES
# Takes expression file and stores it into MongoDB
# Ingest cluster file
python ingest_pipeline.py --study-id 5d276a50421aa9117c982845 --study-file-id 5dd5ae25421aa910a723a337 ingest_cluster --cluster-file ../tests/data/test_1k_cluster_Data.csv --ingest-cluster --name cluster1 --domain-ranges "{'x':[-1, 1], 'y':[-1, 1], 'z':[-1, 1]}"
# Ingest Cell Metadata file
python ingest_pipeline.py --study-id 5d276a50421aa9117c982845 --study-file-id 5dd5ae25421aa910a723a337 ingest_cell_metadata --cell-metadata-file ../tests/data/valid_no_array_v2.0.0.txt --study-accession SCP123 --ingest-cell-metadata
# Ingest Cell Metadata file against convention
!! Please note that you must have a pre-configured BigQuery table available
python ingest_pipeline.py --study-id 5d276a50421aa9117c982845 --study-file-id 5dd5ae25421aa910a723a337 ingest_cell_metadata --cell-metadata-file ../tests/data/valid_no_array_v2.0.0.txt --study-accession SCP123 --ingest-cell-metadata --validate-convention --bq-dataset cell_metadata --bq-table alexandria_convention
# Ingest dense file
python ingest_pipeline.py --study-id 5d276a50421aa9117c982845 --study-file-id 5dd5ae25421aa910a723a337 ingest_expression --taxon-name 'Homo sapiens' --taxon-common-name human --ncbi-taxid 9606 --matrix-file ../tests/data/dense_matrix_19_genes_1000_cells.txt --matrix-file-type dense
# Ingest loom file
python ingest_pipeline.py --study-id 5d276a50421aa9117c982845 --study-file-id 5dd5ae25421aa910a723a337 ingest_expression --matrix-file ../tests/data/test_loom.loom --matrix-file-type loom --taxon-name 'Homo Sapiens' --taxon-common-name human
# Subsample cluster and metadata file
python ingest_pipeline.py --study-id 5d276a50421aa9117c982845 --study-file-id 5dd5ae25421aa910a723a337 ingest_subsample --cluster-file ../tests/data/test_1k_cluster_Data.csv --name custer1 --cell-metadata-file ../tests/data/test_1k_metadata_Data.csv --subsample
# Ingest mtx files
python ingest_pipeline.py --study-id 5d276a50421aa9117c982845 --study-file-id 5dd5ae25421aa910a723a337 ingest_expression --taxon-name 'Homo sapiens' --taxon-common-name human --matrix-file ../tests/data/mtx/matrix.mtx --matrix-file-type mtx --gene-file ../tests/data/genes.tsv --barcode-file ../tests/data/barcodes.tsv
# Differential Expression analysis (dense matrix)
python ingest_pipeline.py --study-id addedfeed000000000000000 --study-file-id dec0dedfeed1111111111111 differential_expression --annotation-name cell_type__ontology_label --annotation-type group --annotation-scope study --matrix-file-path ../tests/data/differential_expression/de_integration.tsv --matrix-file-type dense --annotation-file ../tests/data/differential_expression/de_integration_unordered_metadata.tsv --cluster-file ../tests/data/differential_expression/de_integration_cluster.tsv --cluster-name de_integration --study-accession SCPdev --differential-expression
# Differential Expression analysis (sparse matrix)
python ingest_pipeline.py --study-id addedfeed000000000000000 --study-file-id dec0dedfeed1111111111111 differential_expression --annotation-name cell_type__ontology_label --annotation-type group --annotation-scope study --matrix-file-path ../tests/data/differential_expression/sparse/sparsemini_matrix.mtx --gene-file ../tests/data/differential_expression/sparse/sparsemini_features.tsv --barcode-file ../tests/data/differential_expression/sparse/sparsemini_barcodes.tsv --matrix-file-type mtx --cell-metadata-file ../tests/data/differential_expression/sparse/sparsemini_metadata.txt --cluster-file ../tests/data/differential_expression/sparse/sparsemini_cluster.txt --cluster-name de_sparse_integration --study-accession SCPsparsemini --differential-expression
"""
import json
import logging
import os
import re
import sys
import re
from contextlib import nullcontext
from typing import Dict, Generator, List, Tuple, Union
from wsgiref.simple_server import WSGIRequestHandler # noqa: F401
from bson.objectid import ObjectId
# from google.cloud.logging.resource import Resource
try:
# Used when importing internally and in tests
from ingest_files import IngestFiles
# For Mixpanel logging
import config
from monitoring.mixpanel_log import custom_metric
from monitoring.metrics_service import MetricsService
# For tracing
from opencensus.ext.stackdriver.trace_exporter import StackdriverExporter
from opencensus.trace.samplers import AlwaysOnSampler
from opencensus.trace.tracer import Tracer
from pymongo import MongoClient
from subsample import SubSample
from validation.validate_metadata import (
report_issues,
validate_input_metadata,
write_metadata_to_bq,
)
from cell_metadata import CellMetadata
from cli_parser import create_parser, validate_arguments
from clusters import Clusters
from expression_files.mtx import MTXIngestor
from expression_files.dense_ingestor import DenseIngestor
from monitor import setup_logger, log_exception
from de import DifferentialExpression
except ImportError:
# Used when importing as external package, e.g. imports in single_cell_portal code
from .ingest_files import IngestFiles
from . import config
from .monitoring.metrics_service import MetricsService
from .subsample import SubSample
from .monitoring.mixpanel_log import custom_metric
from .validation.validate_metadata import (
validate_input_metadata,
report_issues,
write_metadata_to_bq,
)
from .monitor import setup_logger, log_exception
from .cell_metadata import CellMetadata
from .clusters import Clusters
from .expression_files.dense_ingestor import DenseIngestor
from .expression_files.mtx import MTXIngestor
from .cli_parser import create_parser, validate_arguments
from .de import DifferentialExpression
class IngestPipeline:
# File location for metadata json convention
JSON_CONVENTION = (
"../schema/alexandria_convention/alexandria_convention_schema.json"
)
# Logger provides more details for trouble shooting
dev_logger = setup_logger(__name__, "log.txt", format="support_configs")
user_logger = setup_logger(__name__ + ".usr", "user_log.txt", level=logging.ERROR)
def __init__(
self,
study_id: str,
study_file_id: str,
matrix_file: str = None,
matrix_file_path: str = None,
matrix_file_type: str = None,
cell_metadata_file: str = None,
cluster_file: str = None,
subsample=False,
ingest_cell_metadata=False,
ingest_cluster=False,
differential_expression=False,
**kwargs,
):
"""Initializes variables in Ingest Pipeline"""
self.study_id = study_id
self.study_file_id = study_file_id
self.matrix_file = matrix_file
self.matrix_file_path = matrix_file_path
self.matrix_file_type = matrix_file_type
if os.environ.get("DATABASE_HOST") is not None:
# Needed to run tests in CircleCI.
# TODO (SCP-2000): Integrate MongoDB emulator to test_ingest.py, then remove this
self.db = self.get_mongo_db()
else:
self.db = None
self.cluster_file = cluster_file
self.kwargs = kwargs
self.cell_metadata_file = cell_metadata_file
self.props = {}
if "GOOGLE_CLOUD_PROJECT" in os.environ:
# instantiate trace exporter
exporter = StackdriverExporter(
project_id=os.environ["GOOGLE_CLOUD_PROJECT"]
)
self.tracer = Tracer(exporter=exporter, sampler=AlwaysOnSampler())
else:
self.tracer = nullcontext()
if ingest_cell_metadata or differential_expression:
self.cell_metadata = self.initialize_file_connection(
"cell_metadata", cell_metadata_file
)
if ingest_cluster or differential_expression:
self.cluster = self.initialize_file_connection("cluster", cluster_file)
if subsample:
self.cluster_file = cluster_file
self.cell_metadata_file = cell_metadata_file
# Will be replaced by MongoConnection as defined in SCP-2629
def get_mongo_db(self):
host = os.environ["DATABASE_HOST"]
user = os.environ["MONGODB_USERNAME"]
password = os.environ["MONGODB_PASSWORD"]
db_name = os.environ["DATABASE_NAME"]
client = MongoClient(
host,
username=user,
password=password,
authSource=db_name,
authMechanism="SCRAM-SHA-1",
)
return client[db_name]
def initialize_file_connection(self, file_type, file_path):
"""Initializes connection to file.
Returns:
File object.
"""
file_connections = {"cell_metadata": CellMetadata, "cluster": Clusters}
try:
return file_connections.get(file_type)(
file_path,
self.study_id,
self.study_file_id,
tracer=self.tracer,
**self.kwargs,
)
except ValueError as v:
# Caution: recording errorTypes in this manner can clobber other collected errors.
# ValueErrors during file connection indicate file cannot be processed
# this logging approach should not lose collected file validation information
if str(v).startswith("could not convert"):
config.get_metric_properties().update(
{"errorTypes": ["content:type:not-numeric"]}
)
elif str(v).startswith("Unable to parse"):
config.get_metric_properties().update(
{"errorTypes": ["format:cap:unique"]}
)
else:
config.get_metric_properties().update(
{"errorTypes": ["parse:unhandled"]}
)
self.report_validation("failure")
raise ValueError(v)
def insert_many(self, collection_name, documents):
if not config.bypass_mongo_writes():
self.db[collection_name].insert_many(documents)
def insert_one(self, collection_name, model):
if not config.bypass_mongo_writes():
linear_id = self.db[collection_name].insert_one(model).inserted_id
return linear_id
# @profile
def load(
self,
collection_name,
model,
set_data_array_fn,
*set_data_array_fn_args,
**set_data_array_fn_kwargs,
):
documents = []
try:
# hack to avoid inserting invalid CellMetadata object from first column
# TODO: implement method similar to kwargs solution in ingest_expression
if (
collection_name == "cell_metadata"
and model["name"] == "NAME"
and model["annotation_type"] == "TYPE"
):
linear_id = ObjectId(self.study_id)
else:
linear_id = self.insert_one(collection_name, model)
for data_array_model in set_data_array_fn(
linear_id, *set_data_array_fn_args, **set_data_array_fn_kwargs
):
documents.append(data_array_model)
# only insert documents if present
if len(documents) > 0:
self.insert_many("data_arrays", documents)
except Exception as e:
log_exception(IngestPipeline.dev_logger, IngestPipeline.user_logger, e)
return 1
return 0
def load_subsample(
self, parent_collection_name, subsampled_data, set_data_array_fn, scope
):
"""Loads subsampled data into MongoDB"""
documents = []
try:
for key_value in subsampled_data[0].items():
annot_name = subsampled_data[1][0]
annot_type = subsampled_data[1][1]
sample_size = subsampled_data[2]
query = {
"study_id": ObjectId(self.study_id),
"study_file_id": ObjectId(self.study_file_id),
}
# Query mongo for linear_id and 'name' of parent
# Then return 'name' and 'id' fields from query results
parent_data = self.db[parent_collection_name].find_one(
query, {"name": 1}
)
for model in set_data_array_fn(
(
key_value[0], # NAMES, x, y, or z
# Cluster name provided from parent
parent_data["name"],
key_value[1], # Subsampled data/values
ObjectId(self.study_file_id),
ObjectId(self.study_id),
parent_data["_id"],
),
{
"subsample_annotation": f"{annot_name}--{annot_type}--{scope}",
"subsample_threshold": sample_size,
},
):
documents.append(model)
self.db["data_arrays"].insert_many(documents)
except Exception as e:
log_exception(IngestPipeline.dev_logger, IngestPipeline.user_logger, e)
return 1
return 0
def upload_metadata_to_bq(self):
"""Uploads metadata to BigQuery"""
if self.kwargs["validate_convention"] is not None:
if (
self.kwargs["validate_convention"]
and self.kwargs["bq_dataset"]
and self.kwargs["bq_table"]
):
write_status = write_metadata_to_bq(
self.cell_metadata,
self.kwargs["bq_dataset"],
self.kwargs["bq_table"],
)
return write_status
else:
IngestPipeline.dev_logger.error(
"Erroneous call to upload_metadata_to_bq"
)
return 1
return 0
@custom_metric(config.get_metric_properties)
def ingest_expression(self) -> int:
"""
Ingests expression files.
"""
self.expression_ingestor = None
try:
if MTXIngestor.matches_file_type(self.matrix_file_type):
self.expression_ingestor = MTXIngestor(
self.matrix_file, self.study_id, self.study_file_id, **self.kwargs
)
if DenseIngestor.matches_file_type(self.matrix_file_type):
self.expression_ingestor = DenseIngestor(
self.matrix_file,
self.study_id,
self.study_file_id,
tracer=self.tracer,
**self.kwargs,
)
self.expression_ingestor.execute_ingest()
except Exception as e:
self.report_validation("failure")
log_exception(IngestPipeline.dev_logger, IngestPipeline.user_logger, e)
return 1
self.report_validation("success")
return 0
# More work needs to be done to fully remove ingest from IngestPipeline
# Tracked in SCP-3023
@custom_metric(config.get_metric_properties)
def ingest_cell_metadata(self):
"""Ingests cell metadata files into Firestore."""
validate_against_convention = False
if self.kwargs["validate_convention"] is not None:
if self.kwargs["validate_convention"]:
validate_against_convention = True
self.cell_metadata.preprocess(validate_against_convention)
if self.cell_metadata.validate(validate_against_convention):
IngestPipeline.dev_logger.info("Cell metadata file format valid")
# Check file against metadata convention
if validate_against_convention:
if self.cell_metadata.conforms_to_metadata_convention():
IngestPipeline.dev_logger.info(
"Cell metadata file conforms to metadata convention"
)
else:
config.get_metric_properties().update(self.cell_metadata.props)
self.report_validation("failure")
return 1
self.report_validation("success")
for metadata_model in self.cell_metadata.execute_ingest():
IngestPipeline.dev_logger.info(
f"Attempting to load cell metadata header : {metadata_model.annot_header}"
)
status = self.load(
self.cell_metadata.COLLECTION_NAME,
metadata_model.model,
self.cell_metadata.set_data_array,
metadata_model.annot_header,
)
if status != 0:
IngestPipeline.user_logger.error(
f"Loading cell metadata header : {metadata_model.annot_header} failed. Exiting program"
)
return status
return status if status is not None else 1
else:
report_issues(self.cell_metadata)
config.get_metric_properties().update(self.cell_metadata.props)
self.report_validation("failure")
IngestPipeline.user_logger.error("Cell metadata file format invalid")
return 1
@custom_metric(config.get_metric_properties)
def ingest_cluster(self):
"""Ingests cluster files."""
if self.cluster.validate():
self.report_validation("success")
annotation_model = self.cluster.transform()
status = self.load(
self.cluster.COLLECTION_NAME,
annotation_model,
self.cluster.get_data_array_annot,
)
if status != 0:
return status
# Incorrect file format
else:
report_issues(self.cluster)
config.get_metric_properties().update(self.cluster.props)
self.report_validation("failure")
IngestPipeline.user_logger.error("Cluster file format invalid")
return 1
return status
@custom_metric(config.get_metric_properties)
def subsample(self):
"""Method for subsampling cluster and metadata files"""
subsample = SubSample(
cluster_file=self.cluster_file, cell_metadata_file=self.cell_metadata_file
)
for data in subsample.subsample("cluster"):
load_status = self.load_subsample(
Clusters.COLLECTION_NAME, data, subsample.set_data_array, "cluster"
)
if load_status != 0:
return load_status
if self.cell_metadata_file is not None:
try:
subsample.prepare_cell_metadata()
# Get cell names from cluster and metadata files
# strip of whitespace that pandas might add
cluster_cell_names = map(
lambda s: s.strip(), SubSample.get_cell_names(subsample.file)
)
metadata_cell_names = map(
lambda s: s.strip(),
SubSample.get_cell_names(subsample.cell_metadata.file),
)
# Check that cell names in cluster file exist in cell metadata file
if SubSample.has_cells_in_metadata_file(
metadata_cell_names, cluster_cell_names
):
for data in subsample.subsample("study"):
load_status = self.load_subsample(
Clusters.COLLECTION_NAME,
data,
subsample.set_data_array,
"study",
)
if load_status != 0:
return load_status
else:
# Caution: recording errorTypes in this manner can clobber other collected errors.
# In subsampling, known failure modes are ValueErrors which stop processing so
# this logging approach should not lose file validation information
config.get_metric_properties().update(
{"errorTypes": ["content:missing:values-across-files"]}
)
self.report_validation("failure")
raise ValueError(
"Cluster file has cell names that are not present in cell metadata file."
)
except Exception as e:
# ToDo ingest.props["errorType"] = "subsample:"
log_exception(IngestPipeline.dev_logger, IngestPipeline.user_logger, e)
return 1
return 0
def calculate_de(self):
""" Run differential expression analysis """
try:
de = DifferentialExpression(
cluster=self.cluster,
cell_metadata=self.cell_metadata,
matrix_file_path=self.matrix_file_path,
matrix_file_type=self.matrix_file_type,
**self.kwargs,
)
de.execute_de()
except Exception as e:
log_exception(IngestPipeline.dev_logger, IngestPipeline.user_logger, e)
return 1
# ToDo: surface failed DE for analytics (SCP-4206)
return 0
def report_validation(self, status):
self.props["status"] = status
config.get_metric_properties().update(self.props)
MetricsService.log("file-validation", config.get_metric_properties())
def run_ingest(ingest, arguments, parsed_args):
"""Runs Ingest Pipeline as indicated by CLI or importing (test) module
"""
status = []
status_cell_metadata = None
# TODO: Add validation for gene file types
if "matrix_file" in arguments:
config.set_parent_event_name("ingest-pipeline:expression:ingest")
status.append(ingest.ingest_expression())
elif "ingest_cell_metadata" in arguments:
if arguments["ingest_cell_metadata"]:
config.set_parent_event_name("ingest-pipeline:cell_metadata:ingest")
status_cell_metadata = ingest.ingest_cell_metadata()
status.append(status_cell_metadata)
if parsed_args.bq_table is not None and status_cell_metadata == 0:
status_metadata_bq = ingest.upload_metadata_to_bq()
status.append(status_metadata_bq)
elif "ingest_cluster" in arguments:
if arguments["ingest_cluster"]:
config.set_parent_event_name("ingest-pipeline:cluster:ingest")
status.append(ingest.ingest_cluster())
elif "subsample" in arguments:
if arguments["subsample"]:
config.set_parent_event_name("ingest-pipeline:subsample:ingest")
status_subsample = ingest.subsample()
status.append(status_subsample)
elif "differential_expression" in arguments:
config.set_parent_event_name("ingest-pipeline:differential-expression")
status_de = ingest.calculate_de()
status.append(status_de)
print(f'STATUS post-DE {status}')
return status, status_cell_metadata
def get_delocalization_info(arguments):
""" extract info on study file for delocalization decision-making
"""
for argument in list(arguments.keys()):
captured_argument = re.match("(\w*file)$", argument)
if captured_argument is not None:
study_file_id = arguments["study_file_id"]
matched_argument = captured_argument.groups()[0]
file_path = arguments[matched_argument]
# Need 1 argument that has a path to identify google bucket
# Break after first argument
break
return file_path, study_file_id
def exit_pipeline(ingest, status, status_cell_metadata, arguments):
"""Logs any errors, then exits Ingest Pipeline with standard OS code
"""
if len(status) > 0:
# for successful DE jobs, need to delocalize results
if "differential_expression" in arguments and all(i < 1 for i in status):
file_path, study_file_id = get_delocalization_info(arguments)
# append status?
if IngestFiles.is_remote_file(file_path):
files_to_match = DifferentialExpression.string_for_output_match(
arguments
)
DifferentialExpression.delocalize_de_files(
file_path, study_file_id, files_to_match
)
# all non-DE ingest jobs can exit on success
elif all(i < 1 for i in status):
sys.exit(os.EX_OK)
else:
file_path, study_file_id = get_delocalization_info(arguments)
if IngestFiles.is_remote_file(file_path):
if "differential_expression" in arguments:
log_path = (
f"parse_logs/differential_expression/{study_file_id}/log.txt"
)
else:
log_path = f"parse_logs/{study_file_id}/log.txt"
# Delocalize support log
IngestFiles.delocalize_file(
study_file_id, arguments["study_id"], file_path, "log.txt", log_path
)
# Delocalize user log
IngestFiles.delocalize_file(
study_file_id,
arguments["study_id"],
file_path,
"user_log.txt",
log_path,
)
if status_cell_metadata is not None:
if status_cell_metadata > 0 and ingest.cell_metadata.is_remote_file:
# PAPI jobs failing metadata validation against convention report
# will have "unexpected exit status 65 was not ignored"
# EX_DATAERR (65) The input data was incorrect in some way.
# note that failure to load to MongoDB also triggers this error
sys.exit(os.EX_DATAERR)
sys.exit(1)
def main() -> None:
"""Enables running Ingest Pipeline via CLI
Args:
None
Returns:
None
"""
parsed_args = create_parser().parse_args()
validate_arguments(parsed_args)
arguments = vars(parsed_args)
if "differential_expression" in arguments:
# DE may use metadata or cluster file for annots BUT
# IngestPipeline initialization assumes a "cell_metadata_file"
arguments["cell_metadata_file"] = arguments["annotation_file"]
# IngestPipeline initialization expects "name" and not "cluster_name"
arguments["name"] = arguments["cluster_name"]
# Initialize global variables for current ingest job
config.init(
arguments["study_id"],
arguments["study_file_id"],
arguments["user_metrics_uuid"],
)
ingest = IngestPipeline(**arguments)
status, status_cell_metadata = run_ingest(ingest, arguments, parsed_args)
# Print metrics properties
metrics_dump = config.get_metric_properties().get_properties()
for key in metrics_dump.keys():
print(f'{key}: {metrics_dump[key]}')
# Log Mixpanel events
MetricsService.log(config.get_parent_event_name(), config.get_metric_properties())
# Exit pipeline
exit_pipeline(ingest, status, status_cell_metadata, arguments)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"MONGODB_PASSWORD",
"DATABASE_NAME",
"DATABASE_HOST",
"MONGODB_USERNAME",
"GOOGLE_CLOUD_PROJECT"
] |
[]
|
["MONGODB_PASSWORD", "DATABASE_NAME", "DATABASE_HOST", "MONGODB_USERNAME", "GOOGLE_CLOUD_PROJECT"]
|
python
| 5 | 0 | |
api4/apitestlib.go
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package api4
import (
"context"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"github.com/mattermost/mattermost-server/v5/app"
"github.com/mattermost/mattermost-server/v5/config"
"github.com/mattermost/mattermost-server/v5/mlog"
"github.com/mattermost/mattermost-server/v5/model"
"github.com/mattermost/mattermost-server/v5/services/searchengine"
"github.com/mattermost/mattermost-server/v5/store"
"github.com/mattermost/mattermost-server/v5/store/localcachelayer"
"github.com/mattermost/mattermost-server/v5/store/storetest/mocks"
"github.com/mattermost/mattermost-server/v5/testlib"
"github.com/mattermost/mattermost-server/v5/utils"
"github.com/mattermost/mattermost-server/v5/web"
"github.com/mattermost/mattermost-server/v5/wsapi"
s3 "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/stretchr/testify/require"
)
type TestHelper struct {
App *app.App
Server *app.Server
ConfigStore *config.Store
Client *model.Client4
BasicUser *model.User
BasicUser2 *model.User
TeamAdminUser *model.User
BasicTeam *model.Team
BasicChannel *model.Channel
BasicPrivateChannel *model.Channel
BasicPrivateChannel2 *model.Channel
BasicDeletedChannel *model.Channel
BasicChannel2 *model.Channel
BasicPost *model.Post
Group *model.Group
SystemAdminClient *model.Client4
SystemAdminUser *model.User
tempWorkspace string
LocalClient *model.Client4
IncludeCacheLayer bool
}
var mainHelper *testlib.MainHelper
func SetMainHelper(mh *testlib.MainHelper) {
mainHelper = mh
}
func setupTestHelper(dbStore store.Store, searchEngine *searchengine.Broker, enterprise bool, includeCache bool, updateConfig func(*model.Config)) *TestHelper {
tempWorkspace, err := ioutil.TempDir("", "apptest")
if err != nil {
panic(err)
}
memoryStore, err := config.NewMemoryStoreWithOptions(&config.MemoryStoreOptions{IgnoreEnvironmentOverrides: true})
if err != nil {
panic("failed to initialize memory store: " + err.Error())
}
memoryConfig := &model.Config{}
memoryConfig.SetDefaults()
*memoryConfig.PluginSettings.Directory = filepath.Join(tempWorkspace, "plugins")
*memoryConfig.PluginSettings.ClientDirectory = filepath.Join(tempWorkspace, "webapp")
memoryConfig.ServiceSettings.EnableLocalMode = model.NewBool(true)
*memoryConfig.ServiceSettings.LocalModeSocketLocation = filepath.Join(tempWorkspace, "mattermost_local.sock")
*memoryConfig.AnnouncementSettings.AdminNoticesEnabled = false
*memoryConfig.AnnouncementSettings.UserNoticesEnabled = false
if updateConfig != nil {
updateConfig(memoryConfig)
}
memoryStore.Set(memoryConfig)
configStore, err := config.NewStoreFromBacking(memoryStore)
if err != nil {
panic(err)
}
var options []app.Option
options = append(options, app.ConfigStore(configStore))
options = append(options, app.StoreOverride(dbStore))
s, err := app.NewServer(options...)
if err != nil {
panic(err)
}
if includeCache {
// Adds the cache layer to the test store
s.Store = localcachelayer.NewLocalCacheLayer(s.Store, s.Metrics, s.Cluster, s.CacheProvider)
}
th := &TestHelper{
App: app.New(app.ServerConnector(s)),
Server: s,
ConfigStore: configStore,
IncludeCacheLayer: includeCache,
}
if searchEngine != nil {
th.App.SetSearchEngine(searchEngine)
}
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.TeamSettings.MaxUsersPerTeam = 50
*cfg.RateLimitSettings.Enable = false
*cfg.EmailSettings.SendEmailNotifications = true
*cfg.ServiceSettings.SiteURL = ""
// Disable sniffing, otherwise elastic client fails to connect to docker node
// More details: https://github.com/olivere/elastic/wiki/Sniffing
*cfg.ElasticsearchSettings.Sniff = false
})
prevListenAddress := *th.App.Config().ServiceSettings.ListenAddress
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.ListenAddress = ":0" })
if err := th.Server.Start(); err != nil {
panic(err)
}
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.ListenAddress = prevListenAddress })
Init(th.Server, th.Server.AppOptions, th.App.Srv().Router)
InitLocal(th.Server, th.Server.AppOptions, th.App.Srv().LocalRouter)
web.New(th.Server, th.Server.AppOptions, th.App.Srv().Router)
wsapi.Init(th.App.Srv())
th.App.DoAppMigrations()
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.TeamSettings.EnableOpenServer = true })
// Disable strict password requirements for test
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PasswordSettings.MinimumLength = 5
*cfg.PasswordSettings.Lowercase = false
*cfg.PasswordSettings.Uppercase = false
*cfg.PasswordSettings.Symbol = false
*cfg.PasswordSettings.Number = false
})
if enterprise {
th.App.Srv().SetLicense(model.NewTestLicense())
} else {
th.App.Srv().SetLicense(nil)
}
th.Client = th.CreateClient()
th.SystemAdminClient = th.CreateClient()
// Verify handling of the supported true/false values by randomizing on each run.
rand.Seed(time.Now().UTC().UnixNano())
trueValues := []string{"1", "t", "T", "TRUE", "true", "True"}
falseValues := []string{"0", "f", "F", "FALSE", "false", "False"}
trueString := trueValues[rand.Intn(len(trueValues))]
falseString := falseValues[rand.Intn(len(falseValues))]
mlog.Debug("Configured Client4 bool string values", mlog.String("true", trueString), mlog.String("false", falseString))
th.Client.SetBoolString(true, trueString)
th.Client.SetBoolString(false, falseString)
th.LocalClient = th.CreateLocalClient(*memoryConfig.ServiceSettings.LocalModeSocketLocation)
if th.tempWorkspace == "" {
th.tempWorkspace = tempWorkspace
}
th.App.InitServer()
return th
}
func SetupEnterprise(tb testing.TB) *TestHelper {
if testing.Short() {
tb.SkipNow()
}
if mainHelper == nil {
tb.SkipNow()
}
dbStore := mainHelper.GetStore()
dbStore.DropAllTables()
dbStore.MarkSystemRanUnitTests()
searchEngine := mainHelper.GetSearchEngine()
th := setupTestHelper(dbStore, searchEngine, true, true, nil)
th.InitLogin()
return th
}
func Setup(tb testing.TB) *TestHelper {
if testing.Short() {
tb.SkipNow()
}
if mainHelper == nil {
tb.SkipNow()
}
dbStore := mainHelper.GetStore()
dbStore.DropAllTables()
dbStore.MarkSystemRanUnitTests()
searchEngine := mainHelper.GetSearchEngine()
th := setupTestHelper(dbStore, searchEngine, false, true, nil)
th.InitLogin()
return th
}
func SetupConfig(tb testing.TB, updateConfig func(cfg *model.Config)) *TestHelper {
if testing.Short() {
tb.SkipNow()
}
if mainHelper == nil {
tb.SkipNow()
}
dbStore := mainHelper.GetStore()
dbStore.DropAllTables()
dbStore.MarkSystemRanUnitTests()
searchEngine := mainHelper.GetSearchEngine()
th := setupTestHelper(dbStore, searchEngine, false, true, updateConfig)
th.InitLogin()
return th
}
func SetupConfigWithStoreMock(tb testing.TB, updateConfig func(cfg *model.Config)) *TestHelper {
th := setupTestHelper(testlib.GetMockStoreForSetupFunctions(), nil, false, false, updateConfig)
emptyMockStore := mocks.Store{}
emptyMockStore.On("Close").Return(nil)
th.App.Srv().Store = &emptyMockStore
return th
}
func SetupWithStoreMock(tb testing.TB) *TestHelper {
th := setupTestHelper(testlib.GetMockStoreForSetupFunctions(), nil, false, false, nil)
emptyMockStore := mocks.Store{}
emptyMockStore.On("Close").Return(nil)
th.App.Srv().Store = &emptyMockStore
return th
}
func SetupEnterpriseWithStoreMock(tb testing.TB) *TestHelper {
th := setupTestHelper(testlib.GetMockStoreForSetupFunctions(), nil, true, false, nil)
emptyMockStore := mocks.Store{}
emptyMockStore.On("Close").Return(nil)
th.App.Srv().Store = &emptyMockStore
return th
}
func (me *TestHelper) ShutdownApp() {
done := make(chan bool)
go func() {
me.Server.Shutdown()
close(done)
}()
select {
case <-done:
case <-time.After(30 * time.Second):
// panic instead of fatal to terminate all tests in this package, otherwise the
// still running App could spuriously fail subsequent tests.
panic("failed to shutdown App within 30 seconds")
}
}
func (me *TestHelper) TearDown() {
utils.DisableDebugLogForTest()
if me.IncludeCacheLayer {
// Clean all the caches
me.App.Srv().InvalidateAllCaches()
}
me.ShutdownApp()
utils.EnableDebugLogForTest()
}
var initBasicOnce sync.Once
var userCache struct {
SystemAdminUser *model.User
TeamAdminUser *model.User
BasicUser *model.User
BasicUser2 *model.User
}
func (me *TestHelper) InitLogin() *TestHelper {
me.waitForConnectivity()
// create users once and cache them because password hashing is slow
initBasicOnce.Do(func() {
me.SystemAdminUser = me.CreateUser()
me.App.UpdateUserRoles(me.SystemAdminUser.Id, model.SYSTEM_USER_ROLE_ID+" "+model.SYSTEM_ADMIN_ROLE_ID, false)
me.SystemAdminUser, _ = me.App.GetUser(me.SystemAdminUser.Id)
userCache.SystemAdminUser = me.SystemAdminUser.DeepCopy()
me.TeamAdminUser = me.CreateUser()
me.App.UpdateUserRoles(me.TeamAdminUser.Id, model.SYSTEM_USER_ROLE_ID, false)
me.TeamAdminUser, _ = me.App.GetUser(me.TeamAdminUser.Id)
userCache.TeamAdminUser = me.TeamAdminUser.DeepCopy()
me.BasicUser = me.CreateUser()
me.BasicUser, _ = me.App.GetUser(me.BasicUser.Id)
userCache.BasicUser = me.BasicUser.DeepCopy()
me.BasicUser2 = me.CreateUser()
me.BasicUser2, _ = me.App.GetUser(me.BasicUser2.Id)
userCache.BasicUser2 = me.BasicUser2.DeepCopy()
})
// restore cached users
me.SystemAdminUser = userCache.SystemAdminUser.DeepCopy()
me.TeamAdminUser = userCache.TeamAdminUser.DeepCopy()
me.BasicUser = userCache.BasicUser.DeepCopy()
me.BasicUser2 = userCache.BasicUser2.DeepCopy()
mainHelper.GetSQLSupplier().GetMaster().Insert(me.SystemAdminUser, me.TeamAdminUser, me.BasicUser, me.BasicUser2)
// restore non hashed password for login
me.SystemAdminUser.Password = "Pa$$word11"
me.TeamAdminUser.Password = "Pa$$word11"
me.BasicUser.Password = "Pa$$word11"
me.BasicUser2.Password = "Pa$$word11"
var wg sync.WaitGroup
wg.Add(2)
go func() {
me.LoginSystemAdmin()
wg.Done()
}()
go func() {
me.LoginTeamAdmin()
wg.Done()
}()
wg.Wait()
return me
}
func (me *TestHelper) InitBasic() *TestHelper {
me.BasicTeam = me.CreateTeam()
me.BasicChannel = me.CreatePublicChannel()
me.BasicPrivateChannel = me.CreatePrivateChannel()
me.BasicPrivateChannel2 = me.CreatePrivateChannel()
me.BasicDeletedChannel = me.CreatePublicChannel()
me.BasicChannel2 = me.CreatePublicChannel()
me.BasicPost = me.CreatePost()
me.LinkUserToTeam(me.BasicUser, me.BasicTeam)
me.LinkUserToTeam(me.BasicUser2, me.BasicTeam)
me.App.AddUserToChannel(me.BasicUser, me.BasicChannel)
me.App.AddUserToChannel(me.BasicUser2, me.BasicChannel)
me.App.AddUserToChannel(me.BasicUser, me.BasicChannel2)
me.App.AddUserToChannel(me.BasicUser2, me.BasicChannel2)
me.App.AddUserToChannel(me.BasicUser, me.BasicPrivateChannel)
me.App.AddUserToChannel(me.BasicUser2, me.BasicPrivateChannel)
me.App.AddUserToChannel(me.BasicUser, me.BasicDeletedChannel)
me.App.AddUserToChannel(me.BasicUser2, me.BasicDeletedChannel)
me.App.UpdateUserRoles(me.BasicUser.Id, model.SYSTEM_USER_ROLE_ID, false)
me.Client.DeleteChannel(me.BasicDeletedChannel.Id)
me.LoginBasic()
me.Group = me.CreateGroup()
return me
}
func (me *TestHelper) waitForConnectivity() {
for i := 0; i < 1000; i++ {
conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", me.App.Srv().ListenAddr.Port))
if err == nil {
conn.Close()
return
}
time.Sleep(time.Millisecond * 20)
}
panic("unable to connect")
}
func (me *TestHelper) CreateClient() *model.Client4 {
return model.NewAPIv4Client(fmt.Sprintf("http://localhost:%v", me.App.Srv().ListenAddr.Port))
}
// ToDo: maybe move this to NewAPIv4SocketClient and reuse it in mmctl
func (me *TestHelper) CreateLocalClient(socketPath string) *model.Client4 {
httpClient := &http.Client{
Transport: &http.Transport{
Dial: func(network, addr string) (net.Conn, error) {
return net.Dial("unix", socketPath)
},
},
}
return &model.Client4{
ApiUrl: "http://_" + model.API_URL_SUFFIX,
HttpClient: httpClient,
}
}
func (me *TestHelper) CreateWebSocketClient() (*model.WebSocketClient, *model.AppError) {
return model.NewWebSocketClient4(fmt.Sprintf("ws://localhost:%v", me.App.Srv().ListenAddr.Port), me.Client.AuthToken)
}
func (me *TestHelper) CreateWebSocketSystemAdminClient() (*model.WebSocketClient, *model.AppError) {
return model.NewWebSocketClient4(fmt.Sprintf("ws://localhost:%v", me.App.Srv().ListenAddr.Port), me.SystemAdminClient.AuthToken)
}
func (me *TestHelper) CreateWebSocketClientWithClient(client *model.Client4) (*model.WebSocketClient, *model.AppError) {
return model.NewWebSocketClient4(fmt.Sprintf("ws://localhost:%v", me.App.Srv().ListenAddr.Port), client.AuthToken)
}
func (me *TestHelper) CreateBotWithSystemAdminClient() *model.Bot {
return me.CreateBotWithClient((me.SystemAdminClient))
}
func (me *TestHelper) CreateBotWithClient(client *model.Client4) *model.Bot {
bot := &model.Bot{
Username: GenerateTestUsername(),
DisplayName: "a bot",
Description: "bot",
}
utils.DisableDebugLogForTest()
rbot, resp := client.CreateBot(bot)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
return rbot
}
func (me *TestHelper) CreateUser() *model.User {
return me.CreateUserWithClient(me.Client)
}
func (me *TestHelper) CreateTeam() *model.Team {
return me.CreateTeamWithClient(me.Client)
}
func (me *TestHelper) CreateTeamWithClient(client *model.Client4) *model.Team {
id := model.NewId()
team := &model.Team{
DisplayName: "dn_" + id,
Name: GenerateTestTeamName(),
Email: me.GenerateTestEmail(),
Type: model.TEAM_OPEN,
}
utils.DisableDebugLogForTest()
rteam, resp := client.CreateTeam(team)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
return rteam
}
func (me *TestHelper) CreateUserWithClient(client *model.Client4) *model.User {
id := model.NewId()
user := &model.User{
Email: me.GenerateTestEmail(),
Username: GenerateTestUsername(),
Nickname: "nn_" + id,
FirstName: "f_" + id,
LastName: "l_" + id,
Password: "Pa$$word11",
}
utils.DisableDebugLogForTest()
ruser, response := client.CreateUser(user)
if response.Error != nil {
panic(response.Error)
}
ruser.Password = "Pa$$word11"
_, err := me.App.Srv().Store.User().VerifyEmail(ruser.Id, ruser.Email)
if err != nil {
return nil
}
utils.EnableDebugLogForTest()
return ruser
}
func (me *TestHelper) CreatePublicChannel() *model.Channel {
return me.CreateChannelWithClient(me.Client, model.CHANNEL_OPEN)
}
func (me *TestHelper) CreatePrivateChannel() *model.Channel {
return me.CreateChannelWithClient(me.Client, model.CHANNEL_PRIVATE)
}
func (me *TestHelper) CreateChannelWithClient(client *model.Client4, channelType string) *model.Channel {
return me.CreateChannelWithClientAndTeam(client, channelType, me.BasicTeam.Id)
}
func (me *TestHelper) CreateChannelWithClientAndTeam(client *model.Client4, channelType string, teamId string) *model.Channel {
id := model.NewId()
channel := &model.Channel{
DisplayName: "dn_" + id,
Name: GenerateTestChannelName(),
Type: channelType,
TeamId: teamId,
}
utils.DisableDebugLogForTest()
rchannel, resp := client.CreateChannel(channel)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
return rchannel
}
func (me *TestHelper) CreatePost() *model.Post {
return me.CreatePostWithClient(me.Client, me.BasicChannel)
}
func (me *TestHelper) CreatePinnedPost() *model.Post {
return me.CreatePinnedPostWithClient(me.Client, me.BasicChannel)
}
func (me *TestHelper) CreateMessagePost(message string) *model.Post {
return me.CreateMessagePostWithClient(me.Client, me.BasicChannel, message)
}
func (me *TestHelper) CreatePostWithClient(client *model.Client4, channel *model.Channel) *model.Post {
id := model.NewId()
post := &model.Post{
ChannelId: channel.Id,
Message: "message_" + id,
}
utils.DisableDebugLogForTest()
rpost, resp := client.CreatePost(post)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
return rpost
}
func (me *TestHelper) CreatePinnedPostWithClient(client *model.Client4, channel *model.Channel) *model.Post {
id := model.NewId()
post := &model.Post{
ChannelId: channel.Id,
Message: "message_" + id,
IsPinned: true,
}
utils.DisableDebugLogForTest()
rpost, resp := client.CreatePost(post)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
return rpost
}
func (me *TestHelper) CreateMessagePostWithClient(client *model.Client4, channel *model.Channel, message string) *model.Post {
post := &model.Post{
ChannelId: channel.Id,
Message: message,
}
utils.DisableDebugLogForTest()
rpost, resp := client.CreatePost(post)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
return rpost
}
func (me *TestHelper) CreateMessagePostNoClient(channel *model.Channel, message string, createAtTime int64) *model.Post {
post, err := me.App.Srv().Store.Post().Save(&model.Post{
UserId: me.BasicUser.Id,
ChannelId: channel.Id,
Message: message,
CreateAt: createAtTime,
})
if err != nil {
panic(err)
}
return post
}
func (me *TestHelper) CreateDmChannel(user *model.User) *model.Channel {
utils.DisableDebugLogForTest()
var err *model.AppError
var channel *model.Channel
if channel, err = me.App.GetOrCreateDirectChannel(me.BasicUser.Id, user.Id); err != nil {
mlog.Error(err.Error())
time.Sleep(time.Second)
panic(err)
}
utils.EnableDebugLogForTest()
return channel
}
func (me *TestHelper) LoginBasic() {
me.LoginBasicWithClient(me.Client)
}
func (me *TestHelper) LoginBasic2() {
me.LoginBasic2WithClient(me.Client)
}
func (me *TestHelper) LoginTeamAdmin() {
me.LoginTeamAdminWithClient(me.Client)
}
func (me *TestHelper) LoginSystemAdmin() {
me.LoginSystemAdminWithClient(me.SystemAdminClient)
}
func (me *TestHelper) LoginBasicWithClient(client *model.Client4) {
utils.DisableDebugLogForTest()
_, resp := client.Login(me.BasicUser.Email, me.BasicUser.Password)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) LoginBasic2WithClient(client *model.Client4) {
utils.DisableDebugLogForTest()
_, resp := client.Login(me.BasicUser2.Email, me.BasicUser2.Password)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) LoginTeamAdminWithClient(client *model.Client4) {
utils.DisableDebugLogForTest()
_, resp := client.Login(me.TeamAdminUser.Email, me.TeamAdminUser.Password)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) LoginSystemAdminWithClient(client *model.Client4) {
utils.DisableDebugLogForTest()
_, resp := client.Login(me.SystemAdminUser.Email, me.SystemAdminUser.Password)
if resp.Error != nil {
panic(resp.Error)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) UpdateActiveUser(user *model.User, active bool) {
utils.DisableDebugLogForTest()
_, err := me.App.UpdateActive(user, active)
if err != nil {
mlog.Error(err.Error())
time.Sleep(time.Second)
panic(err)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) LinkUserToTeam(user *model.User, team *model.Team) {
utils.DisableDebugLogForTest()
err := me.App.JoinUserToTeam(team, user, "")
if err != nil {
mlog.Error(err.Error())
time.Sleep(time.Second)
panic(err)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) AddUserToChannel(user *model.User, channel *model.Channel) *model.ChannelMember {
utils.DisableDebugLogForTest()
member, err := me.App.AddUserToChannel(user, channel)
if err != nil {
mlog.Error(err.Error())
time.Sleep(time.Second)
panic(err)
}
utils.EnableDebugLogForTest()
return member
}
func (me *TestHelper) GenerateTestEmail() string {
if *me.App.Config().EmailSettings.SMTPServer != "localhost" && os.Getenv("CI_INBUCKET_PORT") == "" {
return strings.ToLower("success+" + model.NewId() + "@simulator.amazonses.com")
}
return strings.ToLower(model.NewId() + "@localhost")
}
func (me *TestHelper) CreateGroup() *model.Group {
id := model.NewId()
group := &model.Group{
Name: model.NewString("n-" + id),
DisplayName: "dn_" + id,
Source: model.GroupSourceLdap,
RemoteId: "ri_" + id,
}
utils.DisableDebugLogForTest()
group, err := me.App.CreateGroup(group)
if err != nil {
panic(err)
}
utils.EnableDebugLogForTest()
return group
}
// TestForSystemAdminAndLocal runs a test function for both
// SystemAdmin and Local clients. Several endpoints work in the same
// way when used by a fully privileged user and through the local
// mode, so this helper facilitates checking both
func (me *TestHelper) TestForSystemAdminAndLocal(t *testing.T, f func(*testing.T, *model.Client4), name ...string) {
var testName string
if len(name) > 0 {
testName = name[0] + "/"
}
t.Run(testName+"SystemAdminClient", func(t *testing.T) {
f(t, me.SystemAdminClient)
})
t.Run(testName+"LocalClient", func(t *testing.T) {
f(t, me.LocalClient)
})
}
// TestForAllClients runs a test function for all the clients
// registered in the TestHelper
func (me *TestHelper) TestForAllClients(t *testing.T, f func(*testing.T, *model.Client4), name ...string) {
var testName string
if len(name) > 0 {
testName = name[0] + "/"
}
t.Run(testName+"Client", func(t *testing.T) {
f(t, me.Client)
})
t.Run(testName+"SystemAdminClient", func(t *testing.T) {
f(t, me.SystemAdminClient)
})
t.Run(testName+"LocalClient", func(t *testing.T) {
f(t, me.LocalClient)
})
}
func GenerateTestUsername() string {
return "fakeuser" + model.NewRandomString(10)
}
func GenerateTestTeamName() string {
return "faketeam" + model.NewRandomString(6)
}
func GenerateTestChannelName() string {
return "fakechannel" + model.NewRandomString(10)
}
func GenerateTestAppName() string {
return "fakeoauthapp" + model.NewRandomString(10)
}
func GenerateTestId() string {
return model.NewId()
}
func CheckUserSanitization(t *testing.T, user *model.User) {
t.Helper()
require.Equal(t, "", user.Password, "password wasn't blank")
require.Empty(t, user.AuthData, "auth data wasn't blank")
require.Equal(t, "", user.MfaSecret, "mfa secret wasn't blank")
}
func CheckEtag(t *testing.T, data interface{}, resp *model.Response) {
t.Helper()
require.Empty(t, data)
require.Equal(t, resp.StatusCode, http.StatusNotModified, "wrong status code for etag")
}
func CheckNoError(t *testing.T, resp *model.Response) {
t.Helper()
require.Nil(t, resp.Error, "expected no error")
}
func checkHTTPStatus(t *testing.T, resp *model.Response, expectedStatus int, expectError bool) {
t.Helper()
require.NotNilf(t, resp, "Unexpected nil response, expected http:%v, expectError:%v", expectedStatus, expectError)
if expectError {
require.NotNil(t, resp.Error, "Expected a non-nil error and http status:%v, got nil, %v", expectedStatus, resp.StatusCode)
} else {
require.Nil(t, resp.Error, "Expected no error and http status:%v, got %q, http:%v", expectedStatus, resp.Error, resp.StatusCode)
}
require.Equalf(t, expectedStatus, resp.StatusCode, "Expected http status:%v, got %v (err: %q)", expectedStatus, resp.StatusCode, resp.Error)
}
func CheckOKStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusOK, false)
}
func CheckCreatedStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusCreated, false)
}
func CheckForbiddenStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusForbidden, true)
}
func CheckUnauthorizedStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusUnauthorized, true)
}
func CheckNotFoundStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusNotFound, true)
}
func CheckBadRequestStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusBadRequest, true)
}
func CheckNotImplementedStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusNotImplemented, true)
}
func CheckRequestEntityTooLargeStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusRequestEntityTooLarge, true)
}
func CheckInternalErrorStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusInternalServerError, true)
}
func CheckServiceUnavailableStatus(t *testing.T, resp *model.Response) {
t.Helper()
checkHTTPStatus(t, resp, http.StatusServiceUnavailable, true)
}
func CheckErrorMessage(t *testing.T, resp *model.Response, errorId string) {
t.Helper()
require.NotNilf(t, resp.Error, "should have errored with message: %s", errorId)
require.Equalf(t, errorId, resp.Error.Id, "incorrect error message, actual: %s, expected: %s", resp.Error.Id, errorId)
}
func CheckStartsWith(t *testing.T, value, prefix, message string) {
require.True(t, strings.HasPrefix(value, prefix), message, value)
}
// Similar to s3.New() but allows initialization of signature v2 or signature v4 client.
// If signV2 input is false, function always returns signature v4.
//
// Additionally this function also takes a user defined region, if set
// disables automatic region lookup.
func s3New(endpoint, accessKey, secretKey string, secure bool, signV2 bool, region string) (*s3.Client, error) {
var creds *credentials.Credentials
if signV2 {
creds = credentials.NewStatic(accessKey, secretKey, "", credentials.SignatureV2)
} else {
creds = credentials.NewStatic(accessKey, secretKey, "", credentials.SignatureV4)
}
opts := s3.Options{
Creds: creds,
Secure: secure,
Region: region,
}
return s3.New(endpoint, &opts)
}
func (me *TestHelper) cleanupTestFile(info *model.FileInfo) error {
cfg := me.App.Config()
if *cfg.FileSettings.DriverName == model.IMAGE_DRIVER_S3 {
endpoint := *cfg.FileSettings.AmazonS3Endpoint
accessKey := *cfg.FileSettings.AmazonS3AccessKeyId
secretKey := *cfg.FileSettings.AmazonS3SecretAccessKey
secure := *cfg.FileSettings.AmazonS3SSL
signV2 := *cfg.FileSettings.AmazonS3SignV2
region := *cfg.FileSettings.AmazonS3Region
s3Clnt, err := s3New(endpoint, accessKey, secretKey, secure, signV2, region)
if err != nil {
return err
}
bucket := *cfg.FileSettings.AmazonS3Bucket
if err := s3Clnt.RemoveObject(context.Background(), bucket, info.Path, s3.RemoveObjectOptions{}); err != nil {
return err
}
if info.ThumbnailPath != "" {
if err := s3Clnt.RemoveObject(context.Background(), bucket, info.ThumbnailPath, s3.RemoveObjectOptions{}); err != nil {
return err
}
}
if info.PreviewPath != "" {
if err := s3Clnt.RemoveObject(context.Background(), bucket, info.PreviewPath, s3.RemoveObjectOptions{}); err != nil {
return err
}
}
} else if *cfg.FileSettings.DriverName == model.IMAGE_DRIVER_LOCAL {
if err := os.Remove(*cfg.FileSettings.Directory + info.Path); err != nil {
return err
}
if info.ThumbnailPath != "" {
if err := os.Remove(*cfg.FileSettings.Directory + info.ThumbnailPath); err != nil {
return err
}
}
if info.PreviewPath != "" {
if err := os.Remove(*cfg.FileSettings.Directory + info.PreviewPath); err != nil {
return err
}
}
}
return nil
}
func (me *TestHelper) MakeUserChannelAdmin(user *model.User, channel *model.Channel) {
utils.DisableDebugLogForTest()
if cm, err := me.App.Srv().Store.Channel().GetMember(channel.Id, user.Id); err == nil {
cm.SchemeAdmin = true
if _, err = me.App.Srv().Store.Channel().UpdateMember(cm); err != nil {
utils.EnableDebugLogForTest()
panic(err)
}
} else {
utils.EnableDebugLogForTest()
panic(err)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) UpdateUserToTeamAdmin(user *model.User, team *model.Team) {
utils.DisableDebugLogForTest()
if tm, err := me.App.Srv().Store.Team().GetMember(team.Id, user.Id); err == nil {
tm.SchemeAdmin = true
if _, err = me.App.Srv().Store.Team().UpdateMember(tm); err != nil {
utils.EnableDebugLogForTest()
panic(err)
}
} else {
utils.EnableDebugLogForTest()
mlog.Error(err.Error())
time.Sleep(time.Second)
panic(err)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) UpdateUserToNonTeamAdmin(user *model.User, team *model.Team) {
utils.DisableDebugLogForTest()
if tm, err := me.App.Srv().Store.Team().GetMember(team.Id, user.Id); err == nil {
tm.SchemeAdmin = false
if _, err = me.App.Srv().Store.Team().UpdateMember(tm); err != nil {
utils.EnableDebugLogForTest()
panic(err)
}
} else {
utils.EnableDebugLogForTest()
mlog.Error(err.Error())
time.Sleep(time.Second)
panic(err)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) SaveDefaultRolePermissions() map[string][]string {
utils.DisableDebugLogForTest()
results := make(map[string][]string)
for _, roleName := range []string{
"system_user",
"system_admin",
"team_user",
"team_admin",
"channel_user",
"channel_admin",
} {
role, err1 := me.App.GetRoleByName(roleName)
if err1 != nil {
utils.EnableDebugLogForTest()
panic(err1)
}
results[roleName] = role.Permissions
}
utils.EnableDebugLogForTest()
return results
}
func (me *TestHelper) RestoreDefaultRolePermissions(data map[string][]string) {
utils.DisableDebugLogForTest()
for roleName, permissions := range data {
role, err1 := me.App.GetRoleByName(roleName)
if err1 != nil {
utils.EnableDebugLogForTest()
panic(err1)
}
if strings.Join(role.Permissions, " ") == strings.Join(permissions, " ") {
continue
}
role.Permissions = permissions
_, err2 := me.App.UpdateRole(role)
if err2 != nil {
utils.EnableDebugLogForTest()
panic(err2)
}
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) RemovePermissionFromRole(permission string, roleName string) {
utils.DisableDebugLogForTest()
role, err1 := me.App.GetRoleByName(roleName)
if err1 != nil {
utils.EnableDebugLogForTest()
panic(err1)
}
var newPermissions []string
for _, p := range role.Permissions {
if p != permission {
newPermissions = append(newPermissions, p)
}
}
if strings.Join(role.Permissions, " ") == strings.Join(newPermissions, " ") {
utils.EnableDebugLogForTest()
return
}
role.Permissions = newPermissions
_, err2 := me.App.UpdateRole(role)
if err2 != nil {
utils.EnableDebugLogForTest()
panic(err2)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) AddPermissionToRole(permission string, roleName string) {
utils.DisableDebugLogForTest()
role, err1 := me.App.GetRoleByName(roleName)
if err1 != nil {
utils.EnableDebugLogForTest()
panic(err1)
}
for _, existingPermission := range role.Permissions {
if existingPermission == permission {
utils.EnableDebugLogForTest()
return
}
}
role.Permissions = append(role.Permissions, permission)
_, err2 := me.App.UpdateRole(role)
if err2 != nil {
utils.EnableDebugLogForTest()
panic(err2)
}
utils.EnableDebugLogForTest()
}
func (me *TestHelper) SetupTeamScheme() *model.Scheme {
return me.SetupScheme(model.SCHEME_SCOPE_TEAM)
}
func (me *TestHelper) SetupChannelScheme() *model.Scheme {
return me.SetupScheme(model.SCHEME_SCOPE_CHANNEL)
}
func (me *TestHelper) SetupScheme(scope string) *model.Scheme {
scheme := model.Scheme{
Name: model.NewId(),
DisplayName: model.NewId(),
Scope: scope,
}
if scheme, err := me.App.CreateScheme(&scheme); err == nil {
return scheme
} else {
panic(err)
}
}
|
[
"\"CI_INBUCKET_PORT\""
] |
[] |
[
"CI_INBUCKET_PORT"
] |
[]
|
["CI_INBUCKET_PORT"]
|
go
| 1 | 0 | |
webhook.py
|
import datetime
import time
import traceback
import pytz
from flask import request
from werkzeug.exceptions import HTTPException
from setup import *
# ------------- server boot time -------------
boot_time = time.time()
boot_date = datetime.datetime.now(tz=pytz.timezone("Europe/Moscow"))
# -------------- Exception handler --------------
@app.errorhandler(Exception)
def handle_exception(e):
if isinstance(e, HTTPException):
return e
logger.error(traceback.format_exc())
return "Oops", 500
# -------------- status webpage --------------
@app.route('/')
def status():
password = request.args.get("password")
if password != ADMIN_PASSWORD:
logger.info('Status page loaded without password')
return "<h1>Access denied!<h1>", 403
return f'<p>Server uptime: {datetime.timedelta(seconds=time.time() - boot_time)}</p>' \
f'<p>Server last boot at {boot_date}'
# ------------- webhook ----------------
@app.route('/' + WEBHOOK_TOKEN, methods=['POST'])
def getMessage():
# temp = request.stream.read().decode("utf-8")
temp = request.get_data().decode("utf-8")
temp = telebot.types.Update.de_json(temp)
logger.debug('New message received. raw: %s', temp)
bot.process_new_updates([temp])
return "!", 200
@app.route("/set_webhook")
def webhook_on():
password = request.args.get("password")
if password != ADMIN_PASSWORD:
logger.info('Set_webhook page loaded without password')
return "<h1>Access denied!<h1>", 403
bot.remove_webhook()
url = 'https://' + os.environ.get('HOST') + '/' + WEBHOOK_TOKEN
bot.set_webhook(url=url)
logger.info(f'Webhook is ON! Url: %s', url)
return "<h1>WebHook is ON!</h1>", 200
@app.route("/remove_webhook")
def webhook_off():
password = request.args.get("password")
if password != ADMIN_PASSWORD:
logger.info('Remove_webhook page loaded without password')
return "<h1>Access denied!<h1>", 403
bot.remove_webhook()
logger.info('WebHook is OFF!')
return "<h1>WebHook is OFF!</h1>", 200
|
[] |
[] |
[
"HOST"
] |
[]
|
["HOST"]
|
python
| 1 | 0 | |
elasticsearch/docker/tests/conftest.py
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Description:
# Configuration used for testing open distro for elasticsearch in docker
from subprocess import run
import os
import pytest
def pytest_addoption(parser):
"""Customize testinfra with config options via cli args"""
# By default run tests in clustered mode, but allow dev mode with --single-node"""
parser.addoption('--single-node', action='store_true',
help='non-clustered version')
# Bind-mount a user specified dir for the data dir
parser.addoption('--mount-datavolume1', action='store',
help='The host dir to be bind-mounted on data dir for the first node')
# Bind-mount a user specified dir for the data dir
parser.addoption('--mount-datavolume2', action='store',
help='The host dir to be bind-mounted on data dir for the second node')
# Let us override the Dockerfile's USER; akin to specifying `--user` in the docker run.
parser.addoption('--process-uid', action='store',
help='Used to override the Dockerfile\'s USER')
def pytest_configure(config):
# Named volumes used by default for persistence of each container
(datavolume1, datavolume2) = ("esdata1", "esdata2")
# Our default is not to override uid; empty strings for --user are ignored by Docker.
process_uid = ''
compose_flags = ('-f docker-compose.yml -f tests/docker-compose.yml up -d').split(' ')
if config.getoption('--single-node'):
compose_flags.append('elasticsearch1')
# Use a host dir for the data volume of Elasticsearch, if specified
if config.getoption('--mount-datavolume1'):
datavolume1 = config.getoption('--mount-datavolume1')
if config.getoption('--mount-datavolume2'):
datavolume2 = config.getoption('--mount-datavolume2')
if config.getoption('--process-uid'):
process_uid = config.getoption('--process-uid')
env_vars = os.environ
env_vars['DATA_VOLUME1'] = datavolume1
env_vars['DATA_VOLUME2'] = datavolume2
env_vars['PROCESS_UID'] = process_uid
run(['docker-compose'] + compose_flags, env=env_vars)
def pytest_unconfigure(config):
run(['docker-compose', '-f', 'docker-compose.yml', 'down', '-v'])
run(['docker-compose', '-f', 'docker-compose.yml', 'rm', '-f', '-v'])
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
python/setup.py
|
# type: ignore
# ^ otherwise `pytest --mypy` complains; see “No stub for setuptools?” at
# https://github.com/python/typeshed/issues/2171
# setup.py for hfst_optimized_lookup
#
# The intent here is:
# - When building from a git checkout, run Cython on the `.pyx` file to
# get a `.cpp` file to go to pypi
# - When installing from pypi, ready-to-compile C++ files are included,
# with no need to run Cython or even have it installed
#
# See also:
# - https://martinsosic.com/development/2016/02/08/wrapping-c-library-as-python-module.html
# - https://discuss.python.org/t/building-extension-modules-the-2020-way/5950/25
# - https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html#basic-setup-py
# - https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html#distributing-cython-modules
import os
import platform
import sys
from distutils.version import LooseVersion
from pathlib import Path
from setuptools import setup, Extension
from sysconfig import get_config_vars
def file1_is_newer_than_file2(file1, file2):
"""Like `file1 -nt file2` in bash; returns true if file2 doesn’t exist.
Good for file-is-out-of-date checks, as logic reflects make’s default.
"""
path1 = Path(file1)
path2 = Path(file2)
if not path2.exists():
return True
if not path1.exists():
return False
return path1.stat().st_mtime > path2.stat().st_mtime
# The importable name of the python package, not the PyPI package name
# which has hyphens instead of underscores and goes in `setup(name=)`.
packages = ["hfst_optimized_lookup"]
cython_source_stem = "hfst_optimized_lookup/_hfst_optimized_lookup"
use_cython = file1_is_newer_than_file2(
f"{cython_source_stem}.pyx", f"{cython_source_stem}.cpp"
)
ext = ".pyx" if use_cython else ".cpp"
sources = [
f"{cython_source_stem}{ext}",
"hfst_optimized_lookup/hfst-optimized-lookup.cc",
]
extensions = [
Extension(
"hfst_optimized_lookup._hfst_optimized_lookup", sources=sources, language="c++"
)
]
if use_cython:
from Cython.Build import cythonize
extensions = cythonize(extensions, language_level=3)
# The Python that runs setup.py might have been compiled to target old
# versions of macOS on which the C++ library had a different name.
#
# Normally when building an extension you’d want to use the same compiler
# options as were used to build the current Python, but not if those
# compiler options mean that no C++ header files will be found, resulting
# in the error:
#
# clang: warning: include path for libstdc++ headers not found; pass '-stdlib=libc++' on the command line to use the libc++ standard library instead [-Wstdlibcxx-not-found]
# hfst_optimized_lookup/_hfst_optimized_lookup.cpp:644:10: fatal error: 'ios' file not found
# #include "ios"
# ^~~~~
# 1 error generated.
#
# Setting MACOSX_DEPLOYMENT_TARGET overrides this; according to the
# clang(1) man page, “If -mmacosx-version-min is unspecified, the default
# deployment target is read from this environment variable.’
#
# You can check what version of macOS your Python targets by running:
#
# import sysconfig
# print(sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET'))
#
# This next conditional is largely borrowed from
# https://github.com/pandas-dev/pandas/blob/a27244dc1993/setup.py#L427-L447
# (BSD-3-Clause)
#
# Also see
# https://github.com/Homebrew/brew/blob/master/docs/C%2B%2B-Standard-Libraries.md
if sys.platform == "darwin" and "MACOSX_DEPLOYMENT_TARGET" not in os.environ:
current_system = platform.mac_ver()[0]
python_target = get_config_vars().get("MACOSX_DEPLOYMENT_TARGET", current_system)
if (
LooseVersion(str(python_target)) < "10.9"
and LooseVersion(current_system) >= "10.9"
):
os.environ["MACOSX_DEPLOYMENT_TARGET"] = "10.9"
setup(
ext_modules=extensions,
packages=packages,
# `include_package_data` is one way of triggering an install of the
# `py.typed` marker file
include_package_data=True,
url="https://github.com/UAlbertaALTLab/hfst-optimized-lookup",
author="Andrew Neitsch",
author_email="[email protected]",
description="A pip-installable library version of hfst-optimized-lookup from https://hfst.github.io/",
# https://packaging.python.org/guides/making-a-pypi-friendly-readme/#including-your-readme-in-your-package-s-metadata
long_description=(Path(__file__).parent / "README.md").read_text(),
long_description_content_type="text/markdown",
license="Apache-2.0",
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Topic :: Text Processing :: Linguistic",
],
)
|
[] |
[] |
[
"MACOSX_DEPLOYMENT_TARGET"
] |
[]
|
["MACOSX_DEPLOYMENT_TARGET"]
|
python
| 1 | 0 | |
electrum/gui/qt/util.py
|
import asyncio
import os.path
import time
import sys
import platform
import queue
import traceback
import os
import webbrowser
from decimal import Decimal
from functools import partial, lru_cache
from typing import (NamedTuple, Callable, Optional, TYPE_CHECKING, Union, List, Dict, Any,
Sequence, Iterable)
from PyQt5.QtGui import (QFont, QColor, QCursor, QPixmap, QStandardItem, QImage,
QPalette, QIcon, QFontMetrics, QShowEvent, QPainter, QHelpEvent)
from PyQt5.QtCore import (Qt, QPersistentModelIndex, QModelIndex, pyqtSignal,
QCoreApplication, QItemSelectionModel, QThread,
QSortFilterProxyModel, QSize, QLocale, QAbstractItemModel,
QEvent, QRect, QPoint, QObject)
from PyQt5.QtWidgets import (QPushButton, QLabel, QMessageBox, QHBoxLayout,
QAbstractItemView, QVBoxLayout, QLineEdit,
QStyle, QDialog, QGroupBox, QButtonGroup, QRadioButton,
QFileDialog, QWidget, QToolButton, QTreeView, QPlainTextEdit,
QHeaderView, QApplication, QToolTip, QTreeWidget, QStyledItemDelegate,
QMenu, QStyleOptionViewItem, QLayout, QLayoutItem,
QGraphicsEffect, QGraphicsScene, QGraphicsPixmapItem)
from electrum.i18n import _, languages
from electrum.util import FileImportFailed, FileExportFailed, make_aiohttp_session, resource_path
from electrum.invoices import PR_UNPAID, PR_PAID, PR_EXPIRED, PR_INFLIGHT, PR_UNKNOWN, PR_FAILED, PR_ROUTING, PR_UNCONFIRMED
if TYPE_CHECKING:
from .main_window import ElectrumWindow
from .installwizard import InstallWizard
from electrum.simple_config import SimpleConfig
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
dialogs = []
pr_icons = {
PR_UNKNOWN:"warning.png",
PR_UNPAID:"unpaid.png",
PR_PAID:"confirmed.png",
PR_EXPIRED:"expired.png",
PR_INFLIGHT:"unconfirmed.png",
PR_FAILED:"warning.png",
PR_ROUTING:"unconfirmed.png",
PR_UNCONFIRMED:"unconfirmed.png",
}
# filter tx files in QFileDialog:
TRANSACTION_FILE_EXTENSION_FILTER_ANY = "Transaction (*.txn *.psbt);;All files (*)"
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX = "Partial Transaction (*.psbt)"
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX = "Complete Transaction (*.txn)"
TRANSACTION_FILE_EXTENSION_FILTER_SEPARATE = (f"{TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX};;"
f"{TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX};;"
f"All files (*)")
class EnterButton(QPushButton):
def __init__(self, text, func):
QPushButton.__init__(self, text)
self.func = func
self.clicked.connect(func)
def keyPressEvent(self, e):
if e.key() in [Qt.Key_Return, Qt.Key_Enter]:
self.func()
class ThreadedButton(QPushButton):
def __init__(self, text, task, on_success=None, on_error=None):
QPushButton.__init__(self, text)
self.task = task
self.on_success = on_success
self.on_error = on_error
self.clicked.connect(self.run_task)
def run_task(self):
self.setEnabled(False)
self.thread = TaskThread(self)
self.thread.add(self.task, self.on_success, self.done, self.on_error)
def done(self):
self.setEnabled(True)
self.thread.stop()
class WWLabel(QLabel):
def __init__ (self, text="", parent=None):
QLabel.__init__(self, text, parent)
self.setWordWrap(True)
self.setTextInteractionFlags(Qt.TextSelectableByMouse)
class HelpLabel(QLabel):
def __init__(self, text, help_text):
QLabel.__init__(self, text)
self.help_text = help_text
self.app = QCoreApplication.instance()
self.font = QFont()
def mouseReleaseEvent(self, x):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text)
def enterEvent(self, event):
self.font.setUnderline(True)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
return QLabel.enterEvent(self, event)
def leaveEvent(self, event):
self.font.setUnderline(False)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
return QLabel.leaveEvent(self, event)
class HelpButton(QToolButton):
def __init__(self, text):
QToolButton.__init__(self)
self.setText('?')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text,
rich_text=True)
class InfoButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, 'Info')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(6 * char_width_in_lineedit())
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Info'),
text=self.help_text,
rich_text=True)
class Buttons(QHBoxLayout):
def __init__(self, *buttons):
QHBoxLayout.__init__(self)
self.addStretch(1)
for b in buttons:
if b is None:
continue
self.addWidget(b)
class CloseButton(QPushButton):
def __init__(self, dialog):
QPushButton.__init__(self, _("Close"))
self.clicked.connect(dialog.close)
self.setDefault(True)
class CopyButton(QPushButton):
def __init__(self, text_getter, app):
QPushButton.__init__(self, _("Copy"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
class CopyCloseButton(QPushButton):
def __init__(self, text_getter, app, dialog):
QPushButton.__init__(self, _("Copy and Close"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
self.clicked.connect(dialog.close)
self.setDefault(True)
class OkButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("OK"))
self.clicked.connect(dialog.accept)
self.setDefault(True)
class CancelButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("Cancel"))
self.clicked.connect(dialog.reject)
class MessageBoxMixin(object):
def top_level_window_recurse(self, window=None, test_func=None):
window = window or self
classes = (WindowModalDialog, QMessageBox)
if test_func is None:
test_func = lambda x: True
for n, child in enumerate(window.children()):
# Test for visibility as old closed dialogs may not be GC-ed.
# Only accept children that confirm to test_func.
if isinstance(child, classes) and child.isVisible() \
and test_func(child):
return self.top_level_window_recurse(child, test_func=test_func)
return window
def top_level_window(self, test_func=None):
return self.top_level_window_recurse(test_func)
def question(self, msg, parent=None, title=None, icon=None, **kwargs) -> bool:
Yes, No = QMessageBox.Yes, QMessageBox.No
return Yes == self.msg_box(icon=icon or QMessageBox.Question,
parent=parent,
title=title or '',
text=msg,
buttons=Yes|No,
defaultButton=No,
**kwargs)
def show_warning(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
title or _('Warning'), msg, **kwargs)
def show_error(self, msg, parent=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
_('Error'), msg, **kwargs)
def show_critical(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Critical, parent,
title or _('Critical Error'), msg, **kwargs)
def show_message(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Information, parent,
title or _('Information'), msg, **kwargs)
def msg_box(self, icon, parent, title, text, *, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
parent = parent or self.top_level_window()
return custom_message_box(icon=icon,
parent=parent,
title=title,
text=text,
buttons=buttons,
defaultButton=defaultButton,
rich_text=rich_text,
checkbox=checkbox)
def custom_message_box(*, icon, parent, title, text, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
if type(icon) is QPixmap:
d = QMessageBox(QMessageBox.Information, title, str(text), buttons, parent)
d.setIconPixmap(icon)
else:
d = QMessageBox(icon, title, str(text), buttons, parent)
d.setWindowModality(Qt.WindowModal)
d.setDefaultButton(defaultButton)
if rich_text:
d.setTextInteractionFlags(Qt.TextSelectableByMouse | Qt.LinksAccessibleByMouse)
# set AutoText instead of RichText
# AutoText lets Qt figure out whether to render as rich text.
# e.g. if text is actually plain text and uses "\n" newlines;
# and we set RichText here, newlines would be swallowed
d.setTextFormat(Qt.AutoText)
else:
d.setTextInteractionFlags(Qt.TextSelectableByMouse)
d.setTextFormat(Qt.PlainText)
if checkbox is not None:
d.setCheckBox(checkbox)
return d.exec_()
class WindowModalDialog(QDialog, MessageBoxMixin):
'''Handy wrapper; window modal dialogs are better for our multi-window
daemon model as other wallet windows can still be accessed.'''
def __init__(self, parent, title=None):
QDialog.__init__(self, parent)
self.setWindowModality(Qt.WindowModal)
if title:
self.setWindowTitle(title)
class WaitingDialog(WindowModalDialog):
'''Shows a please wait dialog whilst running a task. It is not
necessary to maintain a reference to this dialog.'''
def __init__(self, parent: QWidget, message: str, task, on_success=None, on_error=None):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
self.message_label = QLabel(message)
vbox = QVBoxLayout(self)
vbox.addWidget(self.message_label)
self.accepted.connect(self.on_accepted)
self.show()
self.thread = TaskThread(self)
self.thread.finished.connect(self.deleteLater) # see #3956
self.thread.add(task, on_success, self.accept, on_error)
def wait(self):
self.thread.wait()
def on_accepted(self):
self.thread.stop()
def update(self, msg):
print(msg)
self.message_label.setText(msg)
class BlockingWaitingDialog(WindowModalDialog):
"""Shows a waiting dialog whilst running a task.
Should be called from the GUI thread. The GUI thread will be blocked while
the task is running; the point of the dialog is to provide feedback
to the user regarding what is going on.
"""
def __init__(self, parent: QWidget, message: str, task: Callable[[], Any]):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
self.message_label = QLabel(message)
vbox = QVBoxLayout(self)
vbox.addWidget(self.message_label)
# show popup
self.show()
# refresh GUI; needed for popup to appear and for message_label to get drawn
QCoreApplication.processEvents()
QCoreApplication.processEvents()
# block and run given task
task()
# close popup
self.accept()
def line_dialog(parent, title, label, ok_label, default=None):
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(500)
l = QVBoxLayout()
dialog.setLayout(l)
l.addWidget(QLabel(label))
txt = QLineEdit()
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.text()
def text_dialog(
*,
parent,
title,
header_layout,
ok_label,
default=None,
allow_multi=False,
config: 'SimpleConfig',
):
from .qrtextedit import ScanQRTextEdit
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(600)
l = QVBoxLayout()
dialog.setLayout(l)
if isinstance(header_layout, str):
l.addWidget(QLabel(header_layout))
else:
l.addLayout(header_layout)
txt = ScanQRTextEdit(allow_multi=allow_multi, config=config)
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.toPlainText()
class ChoicesLayout(object):
def __init__(self, msg, choices, on_clicked=None, checked_index=0):
vbox = QVBoxLayout()
if len(msg) > 50:
vbox.addWidget(WWLabel(msg))
msg = ""
gb2 = QGroupBox(msg)
vbox.addWidget(gb2)
vbox2 = QVBoxLayout()
gb2.setLayout(vbox2)
self.group = group = QButtonGroup()
for i,c in enumerate(choices):
button = QRadioButton(gb2)
button.setText(c)
vbox2.addWidget(button)
group.addButton(button)
group.setId(button, i)
if i==checked_index:
button.setChecked(True)
if on_clicked:
group.buttonClicked.connect(partial(on_clicked, self))
self.vbox = vbox
def layout(self):
return self.vbox
def selected_index(self):
return self.group.checkedId()
def address_field(addresses):
hbox = QHBoxLayout()
address_e = QLineEdit()
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
else:
addresses = []
def func():
try:
i = addresses.index(str(address_e.text())) + 1
i = i % len(addresses)
address_e.setText(addresses[i])
except ValueError:
# the user might have changed address_e to an
# address not in the wallet (or to something that isn't an address)
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
button = QPushButton(_('Address'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(address_e)
return hbox, address_e
def filename_field(parent, config, defaultname, select_msg):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Format")))
gb = QGroupBox("format", parent)
b1 = QRadioButton(gb)
b1.setText(_("CSV"))
b1.setChecked(True)
b2 = QRadioButton(gb)
b2.setText(_("json"))
vbox.addWidget(b1)
vbox.addWidget(b2)
hbox = QHBoxLayout()
directory = config.get('io_dir', os.path.expanduser('~'))
path = os.path.join(directory, defaultname)
filename_e = QLineEdit()
filename_e.setText(path)
def func():
text = filename_e.text()
_filter = "*.csv" if defaultname.endswith(".csv") else "*.json" if defaultname.endswith(".json") else None
p = getSaveFileName(
parent=None,
title=select_msg,
filename=text,
filter=_filter,
config=config,
)
if p:
filename_e.setText(p)
button = QPushButton(_('File'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(filename_e)
vbox.addLayout(hbox)
def set_csv(v):
text = filename_e.text()
text = text.replace(".json",".csv") if v else text.replace(".csv",".json")
filename_e.setText(text)
b1.clicked.connect(lambda: set_csv(True))
b2.clicked.connect(lambda: set_csv(False))
return vbox, filename_e, b1
class ElectrumItemDelegate(QStyledItemDelegate):
def __init__(self, tv: 'MyTreeView'):
super().__init__(tv)
self.tv = tv
self.opened = None
def on_closeEditor(editor: QLineEdit, hint):
self.opened = None
self.tv.is_editor_open = False
if self.tv._pending_update:
self.tv.update()
def on_commitData(editor: QLineEdit):
new_text = editor.text()
idx = QModelIndex(self.opened)
row, col = idx.row(), idx.column()
edit_key = self.tv.get_edit_key_from_coordinate(row, col)
assert edit_key is not None, (idx.row(), idx.column())
self.tv.on_edited(idx, edit_key=edit_key, text=new_text)
self.closeEditor.connect(on_closeEditor)
self.commitData.connect(on_commitData)
def createEditor(self, parent, option, idx):
self.opened = QPersistentModelIndex(idx)
self.tv.is_editor_open = True
return super().createEditor(parent, option, idx)
def paint(self, painter: QPainter, option: QStyleOptionViewItem, idx: QModelIndex) -> None:
custom_data = idx.data(MyTreeView.ROLE_CUSTOM_PAINT)
if custom_data is None:
return super().paint(painter, option, idx)
else:
# let's call the default paint method first; to paint the background (e.g. selection)
super().paint(painter, option, idx)
# and now paint on top of that
custom_data.paint(painter, option.rect)
def helpEvent(self, evt: QHelpEvent, view: QAbstractItemView, option: QStyleOptionViewItem, idx: QModelIndex) -> bool:
custom_data = idx.data(MyTreeView.ROLE_CUSTOM_PAINT)
if custom_data is None:
return super().helpEvent(evt, view, option, idx)
else:
if evt.type() == QEvent.ToolTip:
if custom_data.show_tooltip(evt):
return True
return super().helpEvent(evt, view, option, idx)
def sizeHint(self, option: QStyleOptionViewItem, idx: QModelIndex) -> QSize:
custom_data = idx.data(MyTreeView.ROLE_CUSTOM_PAINT)
if custom_data is None:
return super().sizeHint(option, idx)
else:
default_size = super().sizeHint(option, idx)
return custom_data.sizeHint(default_size)
class MyTreeView(QTreeView):
ROLE_CLIPBOARD_DATA = Qt.UserRole + 100
ROLE_CUSTOM_PAINT = Qt.UserRole + 101
ROLE_EDIT_KEY = Qt.UserRole + 102
ROLE_FILTER_DATA = Qt.UserRole + 103
filter_columns: Iterable[int]
def __init__(self, parent: 'ElectrumWindow', create_menu, *,
stretch_column=None, editable_columns=None):
super().__init__(parent)
self.parent = parent
self.config = self.parent.config
self.stretch_column = stretch_column
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(create_menu)
self.setUniformRowHeights(True)
# Control which columns are editable
if editable_columns is None:
editable_columns = []
self.editable_columns = set(editable_columns)
self.setItemDelegate(ElectrumItemDelegate(self))
self.current_filter = ""
self.is_editor_open = False
self.setRootIsDecorated(False) # remove left margin
self.toolbar_shown = False
# When figuring out the size of columns, Qt by default looks at
# the first 1000 rows (at least if resize mode is QHeaderView.ResizeToContents).
# This would be REALLY SLOW, and it's not perfect anyway.
# So to speed the UI up considerably, set it to
# only look at as many rows as currently visible.
self.header().setResizeContentsPrecision(0)
self._pending_update = False
self._forced_update = False
def set_editability(self, items):
for idx, i in enumerate(items):
i.setEditable(idx in self.editable_columns)
def selected_in_column(self, column: int):
items = self.selectionModel().selectedIndexes()
return list(x for x in items if x.column() == column)
def get_role_data_for_current_item(self, *, col, role) -> Any:
idx = self.selectionModel().currentIndex()
idx = idx.sibling(idx.row(), col)
item = self.item_from_index(idx)
if item:
return item.data(role)
def item_from_index(self, idx: QModelIndex) -> Optional[QStandardItem]:
model = self.model()
if isinstance(model, QSortFilterProxyModel):
idx = model.mapToSource(idx)
return model.sourceModel().itemFromIndex(idx)
else:
return model.itemFromIndex(idx)
def original_model(self) -> QAbstractItemModel:
model = self.model()
if isinstance(model, QSortFilterProxyModel):
return model.sourceModel()
else:
return model
def set_current_idx(self, set_current: QPersistentModelIndex):
if set_current:
assert isinstance(set_current, QPersistentModelIndex)
assert set_current.isValid()
self.selectionModel().select(QModelIndex(set_current), QItemSelectionModel.SelectCurrent)
def update_headers(self, headers: Union[List[str], Dict[int, str]]):
# headers is either a list of column names, or a dict: (col_idx->col_name)
if not isinstance(headers, dict): # convert to dict
headers = dict(enumerate(headers))
col_names = [headers[col_idx] for col_idx in sorted(headers.keys())]
self.original_model().setHorizontalHeaderLabels(col_names)
self.header().setStretchLastSection(False)
for col_idx in headers:
sm = QHeaderView.Stretch if col_idx == self.stretch_column else QHeaderView.ResizeToContents
self.header().setSectionResizeMode(col_idx, sm)
def keyPressEvent(self, event):
if self.itemDelegate().opened:
return
if event.key() in [Qt.Key_F2, Qt.Key_Return, Qt.Key_Enter]:
self.on_activated(self.selectionModel().currentIndex())
return
super().keyPressEvent(event)
def on_activated(self, idx):
# on 'enter' we show the menu
pt = self.visualRect(idx).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def edit(self, idx, trigger=QAbstractItemView.AllEditTriggers, event=None):
"""
this is to prevent:
edit: editing failed
from inside qt
"""
return super().edit(idx, trigger, event)
def on_edited(self, idx: QModelIndex, edit_key, *, text: str) -> None:
raise NotImplementedError()
def should_hide(self, row):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
return False
def get_text_from_coordinate(self, row, col) -> str:
idx = self.model().index(row, col)
item = self.item_from_index(idx)
return item.text()
def get_role_data_from_coordinate(self, row, col, *, role) -> Any:
idx = self.model().index(row, col)
item = self.item_from_index(idx)
role_data = item.data(role)
return role_data
def get_edit_key_from_coordinate(self, row, col) -> Any:
# overriding this might allow avoiding storing duplicate data
return self.get_role_data_from_coordinate(row, col, role=self.ROLE_EDIT_KEY)
def get_filter_data_from_coordinate(self, row, col) -> str:
filter_data = self.get_role_data_from_coordinate(row, col, role=self.ROLE_FILTER_DATA)
if filter_data:
return filter_data
txt = self.get_text_from_coordinate(row, col)
txt = txt.lower()
return txt
def hide_row(self, row_num):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
should_hide = self.should_hide(row_num)
if not self.current_filter and should_hide is None:
# no filters at all, neither date nor search
self.setRowHidden(row_num, QModelIndex(), False)
return
for column in self.filter_columns:
filter_data = self.get_filter_data_from_coordinate(row_num, column)
if self.current_filter in filter_data:
# the filter matched, but the date filter might apply
self.setRowHidden(row_num, QModelIndex(), bool(should_hide))
break
else:
# we did not find the filter in any columns, hide the item
self.setRowHidden(row_num, QModelIndex(), True)
def filter(self, p=None):
if p is not None:
p = p.lower()
self.current_filter = p
self.hide_rows()
def hide_rows(self):
for row in range(self.model().rowCount()):
self.hide_row(row)
def create_toolbar(self, config=None):
hbox = QHBoxLayout()
buttons = self.get_toolbar_buttons()
for b in buttons:
b.setVisible(False)
hbox.addWidget(b)
hide_button = QPushButton('x')
hide_button.setVisible(False)
hide_button.pressed.connect(lambda: self.show_toolbar(False, config))
self.toolbar_buttons = buttons + (hide_button,)
hbox.addStretch()
hbox.addWidget(hide_button)
return hbox
def save_toolbar_state(self, state, config):
pass # implemented in subclasses
def show_toolbar(self, state, config=None):
if state == self.toolbar_shown:
return
self.toolbar_shown = state
if config:
self.save_toolbar_state(state, config)
for b in self.toolbar_buttons:
b.setVisible(state)
if not state:
self.on_hide_toolbar()
def toggle_toolbar(self, config=None):
self.show_toolbar(not self.toolbar_shown, config)
def add_copy_menu(self, menu: QMenu, idx) -> QMenu:
cc = menu.addMenu(_("Copy"))
for column in self.Columns:
column_title = self.original_model().horizontalHeaderItem(column).text()
if not column_title:
continue
item_col = self.item_from_index(idx.sibling(idx.row(), column))
clipboard_data = item_col.data(self.ROLE_CLIPBOARD_DATA)
if clipboard_data is None:
clipboard_data = item_col.text().strip()
cc.addAction(column_title,
lambda text=clipboard_data, title=column_title:
self.place_text_on_clipboard(text, title=title))
return cc
def place_text_on_clipboard(self, text: str, *, title: str = None) -> None:
self.parent.do_copy(text, title=title)
def showEvent(self, e: 'QShowEvent'):
super().showEvent(e)
if e.isAccepted() and self._pending_update:
self._forced_update = True
self.update()
self._forced_update = False
def maybe_defer_update(self) -> bool:
"""Returns whether we should defer an update/refresh."""
defer = (not self._forced_update
and (not self.isVisible() or self.is_editor_open))
# side-effect: if we decide to defer update, the state will become stale:
self._pending_update = defer
return defer
class MySortModel(QSortFilterProxyModel):
def __init__(self, parent, *, sort_role):
super().__init__(parent)
self._sort_role = sort_role
def lessThan(self, source_left: QModelIndex, source_right: QModelIndex):
item1 = self.sourceModel().itemFromIndex(source_left)
item2 = self.sourceModel().itemFromIndex(source_right)
data1 = item1.data(self._sort_role)
data2 = item2.data(self._sort_role)
if data1 is not None and data2 is not None:
return data1 < data2
v1 = item1.text()
v2 = item2.text()
try:
return Decimal(v1) < Decimal(v2)
except:
return v1 < v2
class ButtonsWidget(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.buttons = [] # type: List[QToolButton]
def resizeButtons(self):
frameWidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
x = self.rect().right() - frameWidth - 10
y = self.rect().bottom() - frameWidth
for button in self.buttons:
sz = button.sizeHint()
x -= sz.width()
button.move(x, y - sz.height())
def addButton(self, icon_name, on_click, tooltip):
button = QToolButton(self)
button.setIcon(read_QIcon(icon_name))
button.setIconSize(QSize(25,25))
button.setCursor(QCursor(Qt.PointingHandCursor))
button.setStyleSheet("QToolButton { border: none; hover {border: 1px} pressed {border: 1px} padding: 0px; }")
button.setVisible(True)
button.setToolTip(tooltip)
button.clicked.connect(on_click)
self.buttons.append(button)
return button
def addCopyButton(self, app):
self.app = app
self.addButton("copy.png", self.on_copy, _("Copy to clipboard"))
def on_copy(self):
self.app.clipboard().setText(self.text())
QToolTip.showText(QCursor.pos(), _("Text copied to clipboard"), self)
def addPasteButton(self, app):
self.app = app
self.addButton("copy.png", self.on_paste, _("Paste from clipboard"))
def on_paste(self):
self.setText(self.app.clipboard().text())
class ButtonsLineEdit(QLineEdit, ButtonsWidget):
def __init__(self, text=None):
QLineEdit.__init__(self, text)
self.buttons = []
def resizeEvent(self, e):
o = QLineEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class ButtonsTextEdit(QPlainTextEdit, ButtonsWidget):
def __init__(self, text=None):
QPlainTextEdit.__init__(self, text)
self.setText = self.setPlainText
self.text = self.toPlainText
self.buttons = []
def resizeEvent(self, e):
o = QPlainTextEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class PasswordLineEdit(QLineEdit):
def __init__(self, *args, **kwargs):
QLineEdit.__init__(self, *args, **kwargs)
self.setEchoMode(QLineEdit.Password)
def clear(self):
# Try to actually overwrite the memory.
# This is really just a best-effort thing...
self.setText(len(self.text()) * " ")
super().clear()
class TaskThread(QThread):
'''Thread that runs background tasks. Callbacks are guaranteed
to happen in the context of its parent.'''
class Task(NamedTuple):
task: Callable
cb_success: Optional[Callable]
cb_done: Optional[Callable]
cb_error: Optional[Callable]
doneSig = pyqtSignal(object, object, object)
def __init__(self, parent, on_error=None):
super(TaskThread, self).__init__(parent)
self.on_error = on_error
self.tasks = queue.Queue()
self.doneSig.connect(self.on_done)
self.start()
def add(self, task, on_success=None, on_done=None, on_error=None):
on_error = on_error or self.on_error
self.tasks.put(TaskThread.Task(task, on_success, on_done, on_error))
def run(self):
while True:
task = self.tasks.get() # type: TaskThread.Task
if not task:
break
try:
result = task.task()
self.doneSig.emit(result, task.cb_done, task.cb_success)
except BaseException:
self.doneSig.emit(sys.exc_info(), task.cb_done, task.cb_error)
def on_done(self, result, cb_done, cb_result):
# This runs in the parent's thread.
if cb_done:
cb_done()
if cb_result:
cb_result(result)
def stop(self):
self.tasks.put(None)
self.exit()
self.wait()
class ColorSchemeItem:
def __init__(self, fg_color, bg_color):
self.colors = (fg_color, bg_color)
def _get_color(self, background):
return self.colors[(int(background) + int(ColorScheme.dark_scheme)) % 2]
def as_stylesheet(self, background=False):
css_prefix = "background-" if background else ""
color = self._get_color(background)
return "QWidget {{ {}color:{}; }}".format(css_prefix, color)
def as_color(self, background=False):
color = self._get_color(background)
return QColor(color)
class ColorScheme:
dark_scheme = False
GREEN = ColorSchemeItem("#117c11", "#8af296")
YELLOW = ColorSchemeItem("#897b2a", "#ffff00")
RED = ColorSchemeItem("#7c1111", "#f18c8c")
BLUE = ColorSchemeItem("#123b7c", "#8cb3f2")
DEFAULT = ColorSchemeItem("black", "white")
GRAY = ColorSchemeItem("gray", "gray")
@staticmethod
def has_dark_background(widget):
brightness = sum(widget.palette().color(QPalette.Background).getRgb()[0:3])
return brightness < (255*3/2)
@staticmethod
def update_from_widget(widget, force_dark=False):
if force_dark or ColorScheme.has_dark_background(widget):
ColorScheme.dark_scheme = True
class AcceptFileDragDrop:
def __init__(self, file_type=""):
assert isinstance(self, QWidget)
self.setAcceptDrops(True)
self.file_type = file_type
def validateEvent(self, event):
if not event.mimeData().hasUrls():
event.ignore()
return False
for url in event.mimeData().urls():
if not url.toLocalFile().endswith(self.file_type):
event.ignore()
return False
event.accept()
return True
def dragEnterEvent(self, event):
self.validateEvent(event)
def dragMoveEvent(self, event):
if self.validateEvent(event):
event.setDropAction(Qt.CopyAction)
def dropEvent(self, event):
if self.validateEvent(event):
for url in event.mimeData().urls():
self.onFileAdded(url.toLocalFile())
def onFileAdded(self, fn):
raise NotImplementedError()
def import_meta_gui(electrum_window: 'ElectrumWindow', title, importer, on_success):
filter_ = "JSON (*.json);;All files (*)"
filename = getOpenFileName(
parent=electrum_window,
title=_("Open {} file").format(title),
filter=filter_,
config=electrum_window.config,
)
if not filename:
return
try:
importer(filename)
except FileImportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {} were successfully imported").format(title))
on_success()
def export_meta_gui(electrum_window: 'ElectrumWindow', title, exporter):
filter_ = "JSON (*.json);;All files (*)"
filename = getSaveFileName(
parent=electrum_window,
title=_("Select file to save your {}").format(title),
filename='electrum_{}.json'.format(title),
filter=filter_,
config=electrum_window.config,
)
if not filename:
return
try:
exporter(filename)
except FileExportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {0} were exported to '{1}'")
.format(title, str(filename)))
def getOpenFileName(*, parent, title, filter="", config: 'SimpleConfig') -> Optional[str]:
"""Custom wrapper for getOpenFileName that remembers the path selected by the user."""
directory = config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(parent, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(
*,
parent,
title,
filename,
filter="",
default_extension: str = None,
default_filter: str = None,
config: 'SimpleConfig',
) -> Optional[str]:
"""Custom wrapper for getSaveFileName that remembers the path selected by the user."""
directory = config.get('io_dir', os.path.expanduser('~'))
path = os.path.join(directory, filename)
file_dialog = QFileDialog(parent, title, path, filter)
file_dialog.setAcceptMode(QFileDialog.AcceptSave)
if default_extension:
# note: on MacOS, the selected filter's first extension seems to have priority over this...
file_dialog.setDefaultSuffix(default_extension)
if default_filter:
assert default_filter in filter, f"default_filter={default_filter!r} does not appear in filter={filter!r}"
file_dialog.selectNameFilter(default_filter)
if file_dialog.exec() != QDialog.Accepted:
return None
selected_path = file_dialog.selectedFiles()[0]
if selected_path and directory != os.path.dirname(selected_path):
config.set_key('io_dir', os.path.dirname(selected_path), True)
return selected_path
def icon_path(icon_basename):
return resource_path('gui', 'icons', icon_basename)
@lru_cache(maxsize=1000)
def read_QIcon(icon_basename):
return QIcon(icon_path(icon_basename))
class IconLabel(QWidget):
IconSize = QSize(16, 16)
HorizontalSpacing = 2
def __init__(self, *, text='', final_stretch=True):
super(QWidget, self).__init__()
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.icon = QLabel()
self.label = QLabel(text)
self.label.setTextInteractionFlags(Qt.TextSelectableByMouse)
layout.addWidget(self.label)
layout.addSpacing(self.HorizontalSpacing)
layout.addWidget(self.icon)
if final_stretch:
layout.addStretch()
def setText(self, text):
self.label.setText(text)
def setIcon(self, icon):
self.icon.setPixmap(icon.pixmap(self.IconSize))
self.icon.repaint() # macOS hack for #6269
def get_default_language():
return 'en_UK'
def char_width_in_lineedit() -> int:
char_width = QFontMetrics(QLineEdit().font()).averageCharWidth()
# 'averageCharWidth' seems to underestimate on Windows, hence 'max()'
return max(9, char_width)
def webopen(url: str):
if sys.platform == 'linux' and os.environ.get('APPIMAGE'):
# When on Linux webbrowser.open can fail in AppImage because it can't find the correct libdbus.
# We just fork the process and unset LD_LIBRARY_PATH before opening the URL.
# See #5425
if os.fork() == 0:
del os.environ['LD_LIBRARY_PATH']
webbrowser.open(url)
os._exit(0)
else:
webbrowser.open(url)
class FixedAspectRatioLayout(QLayout):
def __init__(self, parent: QWidget = None, aspect_ratio: float = 1.0):
super().__init__(parent)
self.aspect_ratio = aspect_ratio
self.items: List[QLayoutItem] = []
def set_aspect_ratio(self, aspect_ratio: float = 1.0):
self.aspect_ratio = aspect_ratio
self.update()
def addItem(self, item: QLayoutItem):
self.items.append(item)
def count(self) -> int:
return len(self.items)
def itemAt(self, index: int) -> QLayoutItem:
if index >= len(self.items):
return None
return self.items[index]
def takeAt(self, index: int) -> QLayoutItem:
if index >= len(self.items):
return None
return self.items.pop(index)
def _get_contents_margins_size(self) -> QSize:
margins = self.contentsMargins()
return QSize(margins.left() + margins.right(), margins.top() + margins.bottom())
def setGeometry(self, rect: QRect):
super().setGeometry(rect)
if not self.items:
return
contents = self.contentsRect()
if contents.height() > 0:
c_aratio = contents.width() / contents.height()
else:
c_aratio = 1
s_aratio = self.aspect_ratio
item_rect = QRect(QPoint(0, 0), QSize(
contents.width() if c_aratio < s_aratio else int(contents.height() * s_aratio),
contents.height() if c_aratio > s_aratio else int(contents.width() / s_aratio)
))
content_margins = self.contentsMargins()
free_space = contents.size() - item_rect.size()
for item in self.items:
if free_space.width() > 0 and not item.alignment() & Qt.AlignLeft:
if item.alignment() & Qt.AlignRight:
item_rect.moveRight(contents.width() + content_margins.right())
else:
item_rect.moveLeft(content_margins.left() + (free_space.width() // 2))
else:
item_rect.moveLeft(content_margins.left())
if free_space.height() > 0 and not item.alignment() & Qt.AlignTop:
if item.alignment() & Qt.AlignBottom:
item_rect.moveBottom(contents.height() + content_margins.bottom())
else:
item_rect.moveTop(content_margins.top() + (free_space.height() // 2))
else:
item_rect.moveTop(content_margins.top())
item.widget().setGeometry(item_rect)
def sizeHint(self) -> QSize:
result = QSize()
for item in self.items:
result = result.expandedTo(item.sizeHint())
return self._get_contents_margins_size() + result
def minimumSize(self) -> QSize:
result = QSize()
for item in self.items:
result = result.expandedTo(item.minimumSize())
return self._get_contents_margins_size() + result
def expandingDirections(self) -> Qt.Orientations:
return Qt.Horizontal | Qt.Vertical
def QColorLerp(a: QColor, b: QColor, t: float):
"""
Blends two QColors. t=0 returns a. t=1 returns b. t=0.5 returns evenly mixed.
"""
t = max(min(t, 1.0), 0.0)
i_t = 1.0 - t
return QColor(
int((a.red() * i_t) + (b.red() * t)),
int((a.green() * i_t) + (b.green() * t)),
int((a.blue() * i_t) + (b.blue() * t)),
int((a.alpha() * i_t) + (b.alpha() * t)),
)
class ImageGraphicsEffect(QObject):
"""
Applies a QGraphicsEffect to a QImage
"""
def __init__(self, parent: QObject, effect: QGraphicsEffect):
super().__init__(parent)
assert effect, 'effect must be set'
self.effect = effect
self.graphics_scene = QGraphicsScene()
self.graphics_item = QGraphicsPixmapItem()
self.graphics_item.setGraphicsEffect(effect)
self.graphics_scene.addItem(self.graphics_item)
def apply(self, image: QImage):
assert image, 'image must be set'
result = QImage(image.size(), QImage.Format_ARGB32)
result.fill(Qt.transparent)
painter = QPainter(result)
self.graphics_item.setPixmap(QPixmap.fromImage(image))
self.graphics_scene.render(painter)
self.graphics_item.setPixmap(QPixmap())
return result
if __name__ == "__main__":
app = QApplication([])
t = WaitingDialog(None, 'testing ...', lambda: [time.sleep(1)], lambda x: QMessageBox.information(None, 'done', "done"))
t.start()
app.exec_()
|
[] |
[] |
[
"APPIMAGE",
"LD_LIBRARY_PATH"
] |
[]
|
["APPIMAGE", "LD_LIBRARY_PATH"]
|
python
| 2 | 0 | |
pkg/cluster/cluster.go
|
package cluster
import (
"encoding/json"
"fmt"
"strings"
hserf "github.com/hashicorp/serf/serf"
"github.com/justinbarrick/civitas/pkg/lock"
"github.com/justinbarrick/civitas/pkg/raft"
"github.com/justinbarrick/civitas/pkg/serf"
"log"
"io/ioutil"
"os"
"time"
"github.com/hashicorp/go-discover"
"github.com/hashicorp/mdns"
)
type Cluster struct {
Port int
Addr string
NodeName string
NumInitialNodes int
MDNSService string
DiscoveryConfig []string
raft *raft.Raft
serf *serf.Serf
lock *lock.Lock
}
func (c *Cluster) Start() error {
var err error
serfPort := c.Port
raftPort := c.Port + 1
dsyncPort := c.Port + 2
c.raft, err = raft.NewRaft(c.NodeName, c.Addr, int(raftPort))
if err != nil {
return err
}
c.serf = serf.NewSerf(c.NodeName, c.Addr, int(serfPort))
c.serf.JoinCallback = c.JoinCallback
rpcAddr := fmt.Sprintf("%s:%d", c.Addr, dsyncPort)
c.lock = lock.NewLock(rpcAddr, c.NumInitialNodes)
c.lock.AddNode(lock.NewClient(rpcAddr))
if err := c.serf.Start(); err != nil {
return err
}
if err := c.Announce(); err != nil {
return err
}
go c.DiscoverNodes()
go c.serf.Join()
return nil
}
func (c *Cluster) JoinCallback(event hserf.MemberEvent) {
if !c.raft.Bootstrapped() {
for _, member := range c.serf.Members() {
memberRpcAddr := fmt.Sprintf("%s:%d", member.Addr.String(), member.Port+2)
c.lock.AddNode(lock.NewClient(memberRpcAddr))
}
lockAcquired, err := c.lock.Lock()
if err != nil && err.Error() == "not enough nodes" {
return
} else if err != nil {
log.Fatal(err)
}
if lockAcquired {
if err = c.raft.Bootstrap(); err != nil {
log.Fatal("could not bootstrap raft", err)
}
}
}
if c.raft.Bootstrapped() && c.raft.Leader() {
for _, member := range c.serf.Members() {
if member.Name == c.NodeName {
continue
}
if err := c.raft.AddNode(member.Name, member.Addr, member.Port+1); err != nil {
log.Fatal("error adding member", err)
}
}
}
}
func (c *Cluster) Send(obj interface{}) error {
data, err := json.Marshal(obj)
if err != nil {
return err
}
return c.raft.Apply(data)
}
func (c *Cluster) LogChannel() chan []byte {
return c.raft.LogChannel()
}
func (c *Cluster) NotifyChannel() chan bool {
return c.raft.NotifyChannel()
}
func (c *Cluster) Members() []hserf.Member {
return c.serf.Members()
}
func (c *Cluster) Announce() error {
if c.MDNSService == "" {
return nil
}
service, err := mdns.NewMDNSService(c.NodeName, c.MDNSService, "", "", c.Port, nil, []string{})
if err != nil {
return err
}
_, err = mdns.NewServer(&mdns.Config{Zone: service})
return err
}
func (c *Cluster) DiscoverNodes() {
logger := ioutil.Discard
if os.Getenv("DEBUG") == "1" {
logger = os.Stderr
}
l := log.New(logger, "", log.LstdFlags)
d := discover.Discover{}
discoveryConfig := c.DiscoveryConfig
if c.MDNSService != "" {
discoveryConfig = append(discoveryConfig, fmt.Sprintf("provider=mdns service=%s domain=local", c.MDNSService))
}
seenPeers := map[string]bool{}
for {
for _, cfg := range discoveryConfig {
tmpAddrs, err := d.Addrs(cfg, l)
if err != nil {
log.Println(err)
continue
}
for _, addr := range tmpAddrs {
if seenPeers[addr] {
continue
}
seenPeers[addr] = true
if ! strings.Contains(addr, ":") {
addr = fmt.Sprintf("%s:%d", addr, c.Port)
}
log.Println("Discovered peer:", addr)
c.serf.AddNode(addr)
}
}
time.Sleep(2 * time.Second)
}
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
vendor/github.com/starkandwayne/safe/vault/utils.go
|
package vault
import (
"os"
"regexp"
"runtime"
"strconv"
"strings"
)
// ParsePath splits the given path string into its respective secret path
// and contained key parts
func ParsePath(path string) (secret, key string, version uint64) {
secret = path
if idx := strings.LastIndex(path, "^"); idx >= 0 {
versionString := path[idx+1:]
var err error
version, err = strconv.ParseUint(versionString, 10, 64)
if err == nil {
path = path[:idx]
secret = path
}
}
if idx := strings.LastIndex(path, ":"); idx >= 0 {
secret = path[:idx]
key = path[idx+1:]
}
secret = Canonicalize(secret)
return
}
// EncodePath creates a safe-friendly canonical path for the given arguments
func EncodePath(path, key string, version uint64) string {
if key != "" {
path += ":" + key
}
if version != 0 {
path += "^" + strconv.FormatUint(version, 10)
}
return path
}
// PathHasKey returns true if the given path has a key specified in its syntax.
// False otherwise.
func PathHasKey(path string) bool {
_, key, _ := ParsePath(path)
return key != ""
}
func Canonicalize(p string) string {
p = strings.TrimSuffix(p, "/")
p = strings.TrimPrefix(p, "/")
re := regexp.MustCompile("//+")
p = re.ReplaceAllString(p, "/")
return p
}
func userHomeDir() string {
if runtime.GOOS == "windows" {
home := os.Getenv("USERPROFILE")
if home == "" {
home = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
}
return home
}
return os.Getenv("HOME")
}
|
[
"\"USERPROFILE\"",
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"HOME\""
] |
[] |
[
"HOME",
"HOMEPATH",
"HOMEDRIVE",
"USERPROFILE"
] |
[]
|
["HOME", "HOMEPATH", "HOMEDRIVE", "USERPROFILE"]
|
go
| 4 | 0 | |
sandbox/grist/functions/test_schedule.py
|
from datetime import date, datetime, timedelta
import os
import timeit
import unittest
import moment
import schedule
from functions.date import DTIME
from functions import date as _date
DT = DTIME
TICK = timedelta.resolution
_orig_global_tz_getter = None
class TestSchedule(unittest.TestCase):
def assertDate(self, date_or_dtime, expected_str):
"""Formats date_or_dtime and compares the formatted value."""
return self.assertEqual(date_or_dtime.strftime("%Y-%m-%d %H:%M:%S"), expected_str)
def assertDateIso(self, date_or_dtime, expected_str):
"""Formats date_or_dtime and compares the formatted value."""
return self.assertEqual(date_or_dtime.isoformat(' '), expected_str)
def assertDelta(self, delta, months=0, **timedelta_args):
"""Asserts that the given delta corresponds to the given number of various units."""
self.assertEqual(delta._months, months)
self.assertEqual(delta._timedelta, timedelta(**timedelta_args))
@classmethod
def setUpClass(cls):
global _orig_global_tz_getter # pylint: disable=global-statement
_orig_global_tz_getter = _date._get_global_tz
_date._get_global_tz = lambda: moment.tzinfo('America/New_York')
@classmethod
def tearDownClass(cls):
_date._get_global_tz = _orig_global_tz_getter
def test_round_down_to_unit(self):
RDU = schedule._round_down_to_unit
self.assertDate(RDU(DT("2018-09-04 14:38:11"), "years"), "2018-01-01 00:00:00")
self.assertDate(RDU(DT("2018-01-01 00:00:00"), "years"), "2018-01-01 00:00:00")
self.assertDate(RDU(DT("2018-01-01 00:00:00") - TICK, "years"), "2017-01-01 00:00:00")
self.assertDate(RDU(DT("2018-09-04 14:38:11"), "months"), "2018-09-01 00:00:00")
self.assertDate(RDU(DT("2018-09-01 00:00:00"), "months"), "2018-09-01 00:00:00")
self.assertDate(RDU(DT("2018-09-01 00:00:00") - TICK, "months"), "2018-08-01 00:00:00")
# Note that 9/4 was a Tuesday, so start of the week (Sunday) is 9/2
self.assertDate(RDU(DT("2018-09-04 14:38:11"), "weeks"), "2018-09-02 00:00:00")
self.assertDate(RDU(DT("2018-09-02 00:00:00"), "weeks"), "2018-09-02 00:00:00")
self.assertDate(RDU(DT("2018-09-02 00:00:00") - TICK, "weeks"), "2018-08-26 00:00:00")
self.assertDate(RDU(DT("2018-09-04 14:38:11"), "days"), "2018-09-04 00:00:00")
self.assertDate(RDU(DT("2018-09-04 00:00:00"), "days"), "2018-09-04 00:00:00")
self.assertDate(RDU(DT("2018-09-04 00:00:00") - TICK, "days"), "2018-09-03 00:00:00")
self.assertDate(RDU(DT("2018-09-04 14:38:11"), "hours"), "2018-09-04 14:00:00")
self.assertDate(RDU(DT("2018-09-04 14:00:00"), "hours"), "2018-09-04 14:00:00")
self.assertDate(RDU(DT("2018-09-04 14:00:00") - TICK, "hours"), "2018-09-04 13:00:00")
self.assertDate(RDU(DT("2018-09-04 14:38:11"), "minutes"), "2018-09-04 14:38:00")
self.assertDate(RDU(DT("2018-09-04 14:38:00"), "minutes"), "2018-09-04 14:38:00")
self.assertDate(RDU(DT("2018-09-04 14:38:00") - TICK, "minutes"), "2018-09-04 14:37:00")
self.assertDate(RDU(DT("2018-09-04 14:38:11"), "seconds"), "2018-09-04 14:38:11")
self.assertDate(RDU(DT("2018-09-04 14:38:11") - TICK, "seconds"), "2018-09-04 14:38:10")
with self.assertRaisesRegexp(ValueError, r"Invalid unit inches"):
RDU(DT("2018-09-04 14:38:11"), "inches")
def test_round_down_to_unit_tz(self):
RDU = schedule._round_down_to_unit
dt = datetime(2018, 1, 1, 0, 0, 0, tzinfo=moment.tzinfo("America/New_York"))
self.assertDateIso(RDU(dt, "years"), "2018-01-01 00:00:00-05:00")
self.assertDateIso(RDU(dt - TICK, "years"), "2017-01-01 00:00:00-05:00")
self.assertDateIso(RDU(dt, "months"), "2018-01-01 00:00:00-05:00")
self.assertDateIso(RDU(dt - TICK, "months"), "2017-12-01 00:00:00-05:00")
# 2018-01-01 is a Monday
self.assertDateIso(RDU(dt, "weeks"), "2017-12-31 00:00:00-05:00")
self.assertDateIso(RDU(dt - timedelta(days=1) - TICK, "weeks"), "2017-12-24 00:00:00-05:00")
self.assertDateIso(RDU(dt, "days"), "2018-01-01 00:00:00-05:00")
self.assertDateIso(RDU(dt - TICK, "days"), "2017-12-31 00:00:00-05:00")
self.assertDateIso(RDU(dt, "hours"), "2018-01-01 00:00:00-05:00")
self.assertDateIso(RDU(dt - TICK, "hours"), "2017-12-31 23:00:00-05:00")
def test_parse_interval(self):
self.assertEqual(schedule._parse_interval("annual"), (1, "years"))
self.assertEqual(schedule._parse_interval("daily"), (1, "days"))
self.assertEqual(schedule._parse_interval("1-year"), (1, "years"))
self.assertEqual(schedule._parse_interval("1 year"), (1, "years"))
self.assertEqual(schedule._parse_interval("1 Years"), (1, "years"))
self.assertEqual(schedule._parse_interval("25-months"), (25, "months"))
self.assertEqual(schedule._parse_interval("3-day"), (3, "days"))
self.assertEqual(schedule._parse_interval("2-hour"), (2, "hours"))
with self.assertRaisesRegexp(ValueError, "Not a valid interval"):
schedule._parse_interval("1Year")
with self.assertRaisesRegexp(ValueError, "Not a valid interval"):
schedule._parse_interval("1y")
with self.assertRaisesRegexp(ValueError, "Unknown unit"):
schedule._parse_interval("1-daily")
def test_parse_slot(self):
self.assertDelta(schedule._parse_slot('Jan-15', 'years'), months=0, days=14)
self.assertDelta(schedule._parse_slot('1/15', 'years'), months=0, days=14)
self.assertDelta(schedule._parse_slot('march-1', 'years'), months=2, days=0)
self.assertDelta(schedule._parse_slot('03/09', 'years'), months=2, days=8)
self.assertDelta(schedule._parse_slot('/15', 'months'), days=14)
self.assertDelta(schedule._parse_slot('/1', 'months'), days=0)
self.assertDelta(schedule._parse_slot('Mon', 'weeks'), days=1)
self.assertDelta(schedule._parse_slot('tu', 'weeks'), days=2)
self.assertDelta(schedule._parse_slot('Friday', 'weeks'), days=5)
self.assertDelta(schedule._parse_slot('10am', 'days'), hours=10)
self.assertDelta(schedule._parse_slot('1:30pm', 'days'), hours=13, minutes=30)
self.assertDelta(schedule._parse_slot('15:45', 'days'), hours=15, minutes=45)
self.assertDelta(schedule._parse_slot('Apr-1 9am', 'years'), months=3, days=0, hours=9)
self.assertDelta(schedule._parse_slot('/3 12:30', 'months'), days=2, hours=12, minutes=30)
self.assertDelta(schedule._parse_slot('Sat 6:15pm', 'weeks'), days=6, hours=18, minutes=15)
self.assertDelta(schedule._parse_slot(':45', 'hours'), minutes=45)
self.assertDelta(schedule._parse_slot(':00', 'hours'), minutes=00)
self.assertDelta(schedule._parse_slot('+1d', 'days'), days=1)
self.assertDelta(schedule._parse_slot('+15d', 'months'), days=15)
self.assertDelta(schedule._parse_slot('+3w', 'weeks'), weeks=3)
self.assertDelta(schedule._parse_slot('+2m', 'years'), months=2)
self.assertDelta(schedule._parse_slot('+1y', 'years'), months=12)
# Test a few combinations.
self.assertDelta(schedule._parse_slot('+1y 4/5 3:45pm +30S', 'years'),
months=15, days=4, hours=15, minutes=45, seconds=30)
self.assertDelta(schedule._parse_slot('+2w Wed +6H +20M +40S', 'weeks'),
weeks=2, days=3, hours=6, minutes=20, seconds=40)
self.assertDelta(schedule._parse_slot('+2m /20 11pm', 'months'), months=2, days=19, hours=23)
self.assertDelta(schedule._parse_slot('+2M +30S', 'minutes'), minutes=2, seconds=30)
def test_parse_slot_errors(self):
# Test failures with duplicate units
with self.assertRaisesRegexp(ValueError, 'Duplicate unit'):
schedule._parse_slot('+1d +2d', 'weeks')
with self.assertRaisesRegexp(ValueError, 'Duplicate unit'):
schedule._parse_slot('9:30am +2H', 'days')
with self.assertRaisesRegexp(ValueError, 'Duplicate unit'):
schedule._parse_slot('/15 +1d', 'months')
with self.assertRaisesRegexp(ValueError, 'Duplicate unit'):
schedule._parse_slot('Feb-1 12:30pm +20M', 'years')
# Test failures with improper slot types
with self.assertRaisesRegexp(ValueError, 'Invalid slot.*for unit'):
schedule._parse_slot('Feb-1', 'weeks')
with self.assertRaisesRegexp(ValueError, 'Invalid slot.*for unit'):
schedule._parse_slot('Monday', 'months')
with self.assertRaisesRegexp(ValueError, 'Invalid slot.*for unit'):
schedule._parse_slot('4/15', 'hours')
with self.assertRaisesRegexp(ValueError, 'Invalid slot.*for unit'):
schedule._parse_slot('/1', 'years')
# Test failures with outright invalid slot syntax.
with self.assertRaisesRegexp(ValueError, 'Invalid slot'):
schedule._parse_slot('Feb:1', 'weeks')
with self.assertRaisesRegexp(ValueError, 'Invalid slot'):
schedule._parse_slot('/1d', 'months')
with self.assertRaisesRegexp(ValueError, 'Invalid slot'):
schedule._parse_slot('10', 'hours')
with self.assertRaisesRegexp(ValueError, 'Invalid slot'):
schedule._parse_slot('H1', 'years')
# Test failures with unknown values
with self.assertRaisesRegexp(ValueError, 'Unknown month'):
schedule._parse_slot('februarium-1', 'years')
with self.assertRaisesRegexp(ValueError, 'Unknown day of the week'):
schedule._parse_slot('snu', 'weeks')
with self.assertRaisesRegexp(ValueError, 'Unknown unit'):
schedule._parse_slot('+1t', 'hours')
def test_schedule(self):
# A few more examples. The ones in doctest strings are those that help documentation; the rest
# are in this file to keep the size of the main file more manageable.
# Note that the start of 2018-01-01 is a Monday
self.assertEqual(list(schedule.SCHEDULE(
"1-week: +1d 9:30am, +4d 3:30pm", start=datetime(2018,1,1), end=datetime(2018,1,31))),
[
DT("2018-01-01 09:30:00"), DT("2018-01-04 15:30:00"),
DT("2018-01-08 09:30:00"), DT("2018-01-11 15:30:00"),
DT("2018-01-15 09:30:00"), DT("2018-01-18 15:30:00"),
DT("2018-01-22 09:30:00"), DT("2018-01-25 15:30:00"),
DT("2018-01-29 09:30:00"),
])
self.assertEqual(list(schedule.SCHEDULE(
"3-month: +0d 12pm", start=datetime(2018,1,1), end=datetime(2018,6,30))),
[DT('2018-01-01 12:00:00'), DT('2018-04-01 12:00:00')])
# Ensure we can use date() object for start/end too.
self.assertEqual(list(schedule.SCHEDULE(
"3-month: +0d 12pm", start=date(2018,1,1), end=date(2018,6,30))),
[DT('2018-01-01 12:00:00'), DT('2018-04-01 12:00:00')])
# We can even use strings.
self.assertEqual(list(schedule.SCHEDULE(
"3-month: +0d 12pm", start="2018-01-01", end="2018-06-30")),
[DT('2018-01-01 12:00:00'), DT('2018-04-01 12:00:00')])
def test_timezone(self):
# Verify that the time zone of `start` determines the time zone of generated times.
tz_ny = moment.tzinfo("America/New_York")
self.assertEqual([d.isoformat(' ') for d in schedule.SCHEDULE(
"daily: 9am", count=4, start=datetime(2018, 2, 14, tzinfo=tz_ny))],
[ '2018-02-14 09:00:00-05:00', '2018-02-15 09:00:00-05:00',
'2018-02-16 09:00:00-05:00', '2018-02-17 09:00:00-05:00' ])
tz_la = moment.tzinfo("America/Los_Angeles")
self.assertEqual([d.isoformat(' ') for d in schedule.SCHEDULE(
"daily: 9am, 4:30pm", count=4, start=datetime(2018, 2, 14, 9, 0, tzinfo=tz_la))],
[ '2018-02-14 09:00:00-08:00', '2018-02-14 16:30:00-08:00',
'2018-02-15 09:00:00-08:00', '2018-02-15 16:30:00-08:00' ])
tz_utc = moment.tzinfo("UTC")
self.assertEqual([d.isoformat(' ') for d in schedule.SCHEDULE(
"daily: 9am, 4:30pm", count=4, start=datetime(2018, 2, 14, 17, 0, tzinfo=tz_utc))],
[ '2018-02-15 09:00:00+00:00', '2018-02-15 16:30:00+00:00',
'2018-02-16 09:00:00+00:00', '2018-02-16 16:30:00+00:00' ])
# This is not really a test but just a way to see some timing information about Schedule
# implementation. Run with env PY_TIMING_TESTS=1 in the environment, and the console output will
# include the measured times.
@unittest.skipUnless(os.getenv("PY_TIMING_TESTS") == "1", "Set PY_TIMING_TESTS=1 for timing")
def test_timing(self):
N = 1000
sched = "weekly: Mo 10:30am, We 10:30am"
setup = """
from functions import schedule
from datetime import datetime
"""
setup = "from functions import test_schedule as t"
expected_result = [
datetime(2018, 9, 24, 10, 30), datetime(2018, 9, 26, 22, 30),
datetime(2018, 10, 1, 10, 30), datetime(2018, 10, 3, 22, 30),
]
self.assertEqual(timing_schedule_full(), expected_result)
t = min(timeit.repeat(stmt="t.timing_schedule_full()", setup=setup, number=N, repeat=3))
print "\n*** SCHEDULE call with 4 points: %.2f us" % (t * 1000000 / N)
t = min(timeit.repeat(stmt="t.timing_schedule_init()", setup=setup, number=N, repeat=3))
print "*** Schedule constructor: %.2f us" % (t * 1000000 / N)
self.assertEqual(timing_schedule_series(), expected_result)
t = min(timeit.repeat(stmt="t.timing_schedule_series()", setup=setup, number=N, repeat=3))
print "*** Schedule series with 4 points: %.2f us" % (t * 1000000 / N)
def timing_schedule_full():
return list(schedule.SCHEDULE("weekly: Mo 10:30am, We 10:30pm",
start=datetime(2018, 9, 23), count=4))
def timing_schedule_init():
return schedule.Schedule("weekly: Mo 10:30am, We 10:30pm")
def timing_schedule_series(sched=schedule.Schedule("weekly: Mo 10:30am, We 10:30pm")):
return list(sched.series(datetime(2018, 9, 23), None, count=4))
|
[] |
[] |
[
"PY_TIMING_TESTS"
] |
[]
|
["PY_TIMING_TESTS"]
|
python
| 1 | 0 | |
docs/source/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
# sys.path.insert(0, os.path.abspath('../..'))
# sys.path.insert(0, os.path.abspath('.'))
# sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'SimpleGAN'
copyright = '2020, Rohith Gandhi G'
author = 'Rohith Gandhi G'
# The full version, including alpha/beta/rc tags
release = 'v0.2.8'
# -- General configuration ---------------------------------------------------
# Mock imports
autodoc_mock_imports = ["cv2",
"tensorflow",
"tensorflow_datasets",
"imagio",
"numpy",
"tqdm",
"trimesh",
"scipy",
"plotly",
"matplotlib"]
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.napoleon',
'sphinx.ext.doctest','sphinx.ext.intersphinx', 'sphinx.ext.ifconfig',
'sphinx.ext.viewcode', 'sphinx.ext.githubpages'
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
master_doc = 'index'
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
html_theme = 'default'
else:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# if not on_rtd: # only import and set the theme if we're building docs locally
# import sphinx_rtd_theme
# html_theme = "sphinx_rtd_theme"
# else:
# # Override default css to get a larger width for ReadTheDoc build
# html_context = {
# "css_files": [
# "https://media.readthedocs.org/css/sphinx_rtd_theme.css",
# "https://media.readthedocs.org/css/readthedocs-doc-embed.css"
# ]
# }
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
source_suffix = [".rst", ".md"]
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
python/setup.py
|
import os
import sys
setup_type = sys.argv[1]
try:
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
use_setuptools = True
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
if sys.version_info[0] >= 3:
import builtins
if hasattr(builtins, '__NUMPY_SETUP__'):
del builtins.__NUMPY_SETUP__
import importlib
import numpy
importlib.reload(numpy)
else:
import __builtin__
if hasattr(__builtin__, '__NUMPY_SETUP__'):
del __builtin__.__NUMPY_SETUP__
import imp
import numpy
imp.reload(numpy)
self.include_dirs.append(numpy.get_include())
print("setuptools is used.")
except ImportError:
from distutils.core import setup, Extension
use_setuptools = False
print("distutils is used.")
try:
from numpy.distutils.misc_util import get_numpy_include_dirs
except ImportError:
print("numpy.distutils.misc_util cannot be imported. Please install "
"numpy first before installing spglib...")
sys.exit(1)
# Workaround Python issue 21121
import sysconfig
config_var = sysconfig.get_config_var("CFLAGS")
if (config_var is not None and
"-Werror=declaration-after-statement" in config_var):
os.environ['CFLAGS'] = config_var.replace(
"-Werror=declaration-after-statement", "")
sources = ['arithmetic.c',
'cell.c',
'delaunay.c',
'debug.c',
'determination.c',
'hall_symbol.c',
'kgrid.c',
'kpoint.c',
'mathfunc.c',
'niggli.c',
'overlap.c',
'pointgroup.c',
'primitive.c',
'refinement.c',
'sitesym_database.c',
'site_symmetry.c',
'spacegroup.c',
'spin.c',
'spg_database.c',
'spglib.c',
'symmetry.c']
if os.path.exists('src'):
source_dir = "src"
else:
source_dir = "../src"
include_dirs = [source_dir, ]
if not use_setuptools:
include_dirs += get_numpy_include_dirs()
for i, s in enumerate(sources):
sources[i] = "%s/%s" % (source_dir, s)
extra_compile_args = []
if setup_type == 'test':
extra_compile_args.append("-UNDEBUG")
extra_link_args = []
define_macros = []
## Uncomment to activate OpenMP support for gcc
# extra_compile_args += ['-fopenmp']
# extra_link_args += ['-lgomp']
## For debugging
# define_macros = [('SPGWARNING', None),
# ('SPGDEBUG', None)]
extension = Extension('spglib._spglib',
include_dirs=include_dirs,
sources=['_spglib.c'] + sources,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
define_macros=define_macros)
version_nums = [None, None, None]
with open("%s/version.h" % source_dir) as w:
for line in w:
for i, chars in enumerate(("MAJOR", "MINOR", "MICRO")):
if chars in line:
version_nums[i] = int(line.split()[2])
# To deploy to pypi by travis-CI
nanoversion = 0
if os.path.isfile("__nanoversion__.txt"):
with open('__nanoversion__.txt') as nv:
try:
for line in nv:
nanoversion = int(line.strip())
break
except ValueError:
nanoversion = 0
version_nums.append(nanoversion)
if None in version_nums:
print("Failed to get version number in setup.py.")
raise
version = ".".join(["%d" % n for n in version_nums[:3]])
if len(version_nums) > 3:
version += "-%d" % version_nums[3]
if use_setuptools:
setup(name='spglib',
version=version,
cmdclass={'build_ext': build_ext},
setup_requires=['numpy', 'setuptools>=18.0'],
license='BSD-3-Clause',
description='This is the spglib module.',
long_description=open('README.rst', 'rb').read().decode('utf-8'),
long_description_content_type='text/x-rst',
author='Atsushi Togo',
author_email='[email protected]',
url='http://atztogo.github.io/spglib/',
packages=['spglib'],
install_requires=['numpy'],
provides=['spglib'],
platforms=['all'],
ext_modules=[extension],
test_suite='nose.collector',
tests_require=['nose'])
else:
setup(name='spglib',
version=version,
license='BSD-3-Clause',
description='This is the spglib module.',
long_description=open('README.rst', 'rb').read().decode('utf-8'),
long_description_content_type='text/x-rst',
author='Atsushi Togo',
author_email='[email protected]',
url='http://atztogo.github.io/spglib/',
packages=['spglib'],
requires=['numpy'],
provides=['spglib'],
platforms=['all'],
ext_modules=[extension],
test_suite='nose.collector',
tests_require=['nose'])
|
[] |
[] |
[
"CFLAGS"
] |
[]
|
["CFLAGS"]
|
python
| 1 | 0 | |
tests_deployment/test_dask_gateway.py
|
import dask_gateway
import os
import pytest
from tests_deployment import constants
from tests_deployment.utils import monkeypatch_ssl_context, get_jupyterhub_token
monkeypatch_ssl_context()
@pytest.fixture
def dask_gateway_object():
"""Connects to Dask Gateway cluster from outside the cluster."""
os.environ['JUPYTERHUB_API_TOKEN'] = get_jupyterhub_token()
return dask_gateway.Gateway(
address=f'https://{constants.QHUB_HOSTNAME}/{constants.GATEWAY_ENDPOINT}',
auth='jupyterhub',
proxy_address=f'tcp://{constants.QHUB_HOSTNAME}:8786'
)
def test_dask_gateway(dask_gateway_object):
"""This test checks if we're able to connect to dask gateway."""
assert dask_gateway_object.list_clusters() == []
def test_dask_gateway_cluster_options(dask_gateway_object):
"""Tests Dask Gateway's cluster options."""
cluster_options = dask_gateway_object.cluster_options()
assert cluster_options.conda_environment == "dask"
assert cluster_options.profile == "Small Worker"
assert cluster_options.environment_vars == {}
|
[] |
[] |
[
"JUPYTERHUB_API_TOKEN"
] |
[]
|
["JUPYTERHUB_API_TOKEN"]
|
python
| 1 | 0 | |
prestodb/client.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module implements the Presto protocol to submit SQL statements, track
their state and retrieve their result as described in
https://github.com/prestodb/presto/wiki/HTTP-Protocol
and Presto source code.
The outline of a query is:
- Send HTTP POST to the coordinator
- Retrieve HTTP response with ``nextUri``
- Get status of the query execution by sending a HTTP GET to the coordinator
Presto queries are managed by the ``PrestoQuery`` class. HTTP requests are
managed by the ``PrestoRequest`` class. the status of a query is represented
by ``PrestoStatus`` and the result by ``PrestoResult``.
The main interface is :class:`PrestoQuery`: ::
>> request = PrestoRequest(host='coordinator', port=8080, user='test')
>> query = PrestoQuery(request, sql)
>> rows = list(query.execute())
"""
from __future__ import absolute_import, division, print_function
import os
from typing import Any, Dict, List, Optional, Text, Tuple, Union # NOQA for mypy types
import prestodb.logging
import prestodb.redirect
import requests
from prestodb import constants, exceptions
from prestodb.transaction import NO_TRANSACTION
__all__ = ["PrestoQuery", "PrestoRequest"]
logger = prestodb.logging.get_logger(__name__)
MAX_ATTEMPTS = constants.DEFAULT_MAX_ATTEMPTS
SOCKS_PROXY = os.environ.get("SOCKS_PROXY")
if SOCKS_PROXY:
PROXIES = {"http": "socks5://" + SOCKS_PROXY, "https": "socks5://" + SOCKS_PROXY}
else:
PROXIES = None
class ClientSession(object):
def __init__(
self,
catalog,
schema,
source,
user,
properties=None,
headers=None,
transaction_id=None,
):
self.catalog = catalog
self.schema = schema
self.source = source
self.user = user
if properties is None:
properties = {}
self._properties = properties
self._headers = headers or {}
self.transaction_id = transaction_id
@property
def properties(self):
return self._properties
@property
def headers(self):
return self._headers
def get_header_values(headers, header):
return [val.strip() for val in headers[header].split(",")]
def get_session_property_values(headers, header):
kvs = get_header_values(headers, header)
return [(k.strip(), v.strip()) for k, v in (kv.split("=", 1) for kv in kvs)]
class PrestoStatus(object):
def __init__(self, id, stats, warnings, info_uri, next_uri, rows, columns=None):
self.id = id
self.stats = stats
self.warnings = warnings
self.info_uri = info_uri
self.next_uri = next_uri
self.rows = rows
self.columns = columns
def __repr__(self):
return (
"PrestoStatus("
"id={}, stats={{...}}, warnings={}, info_uri={}, next_uri={}, rows=<count={}>"
")".format(
self.id,
len(self.warnings),
self.info_uri,
self.next_uri,
len(self.rows),
)
)
class PrestoRequest(object):
"""
Manage the HTTP requests of a Presto query.
:param host: name of the coordinator
:param port: TCP port to connect to the coordinator
:param user: associated with the query. It is useful for access control
and query scheduling.
:param source: associated with the query. It is useful for access
control and query scheduling.
:param catalog: to query. The *catalog* is associated with a Presto
connector. This variable sets the default catalog used
by SQL statements. For example, if *catalog* is set
to ``some_catalog``, the SQL statement
``SELECT * FROM some_schema.some_table`` will actually
query the table
``some_catalog.some_schema.some_table``.
:param schema: to query. The *schema* is a logical abstraction to group
table. This variable sets the default schema used by
SQL statements. For eample, if *schema* is set to
``some_schema``, the SQL statement
``SELECT * FROM some_table`` will actually query the
table ``some_catalog.some_schema.some_table``.
:param session_properties: set specific Presto behavior for the current
session. Please refer to the output of
``SHOW SESSION`` to check the available
properties.
:param http_headers: HTTP headers to post/get in the HTTP requests
:param http_scheme: "http" or "https"
:param auth: class that manages user authentication. ``None`` means no
authentication.
:max_attempts: maximum number of attempts when sending HTTP requests. An
attempt is an HTTP request. 5 attempts means 4 retries.
:request_timeout: How long (in seconds) to wait for the server to send
data before giving up, as a float or a
``(connect timeout, read timeout)`` tuple.
The client initiates a query by sending an HTTP POST to the
coordinator. It then gets a response back from the coordinator with:
- An URI to query to get the status for the query and the remaining
data
- An URI to get more information about the execution of the query
- Statistics about the current query execution
Please refer to :class:`PrestoStatus` to access the status returned by
:meth:`PrestoRequest.process`.
When the client makes an HTTP request, it may encounter the following
errors:
- Connection or read timeout:
- There is a network partition and TCP segments are
either dropped or delayed.
- The coordinator stalled because of an OS level stall (page allocation
stall, long time to page in pages, etc...), a JVM stall (full GC), or
an application level stall (thread starving, lock contention)
- Connection refused: Configuration or runtime issue on the coordinator
- Connection closed:
As most of these errors are transient, the question the caller should set
retries with respect to when they want to notify the application that uses
the client.
"""
http = requests
HTTP_EXCEPTIONS = (
http.ConnectionError, # type: ignore
http.Timeout, # type: ignore
)
def __init__(
self,
host, # type: Text
port, # type: int
user, # type: Text
source=None, # type: Text
catalog=None, # type: Text
schema=None, # type: Text
session_properties=None, # type: Optional[Dict[Text, Any]]
http_session=None, # type: Any
http_headers=None, # type: Optional[Dict[Text, Text]]
transaction_id=NO_TRANSACTION, # type: Optional[Text]
http_scheme=constants.HTTP, # type: Text
auth=constants.DEFAULT_AUTH, # type: Optional[Any]
redirect_handler=prestodb.redirect.GatewayRedirectHandler(),
max_attempts=MAX_ATTEMPTS, # type: int
request_timeout=constants.DEFAULT_REQUEST_TIMEOUT, # type: Union[float, Tuple[float, float]]
handle_retry=exceptions.RetryWithExponentialBackoff(),
):
# type: (...) -> None
self._client_session = ClientSession(
catalog,
schema,
source,
user,
session_properties,
http_headers,
transaction_id,
)
self._host = host
self._port = port
self._next_uri = None # type: Optional[Text]
if http_session is not None:
self._http_session = http_session
else:
# mypy cannot follow module import
self._http_session = self.http.Session() # type: ignore
self._http_session.headers.update(self.http_headers)
self._exceptions = self.HTTP_EXCEPTIONS
self._auth = auth
if self._auth:
if http_scheme == constants.HTTP:
raise ValueError("cannot use authentication with HTTP")
self._auth.set_http_session(self._http_session)
self._exceptions += self._auth.get_exceptions()
self._redirect_handler = redirect_handler
self._request_timeout = request_timeout
self._handle_retry = handle_retry
self.max_attempts = max_attempts
self._http_scheme = http_scheme
@property
def transaction_id(self):
return self._client_session.transaction_id
@transaction_id.setter
def transaction_id(self, value):
self._client_session.transaction_id = value
@property
def http_headers(self):
# type: () -> Dict[Text, Text]
headers = {}
headers[constants.HEADER_CATALOG] = self._client_session.catalog
headers[constants.HEADER_SCHEMA] = self._client_session.schema
headers[constants.HEADER_SOURCE] = self._client_session.source
headers[constants.HEADER_USER] = self._client_session.user
headers[constants.HEADER_SESSION] = ",".join(
# ``name`` must not contain ``=``
"{}={}".format(name, value)
for name, value in self._client_session.properties.items()
)
# merge custom http headers
for key in self._client_session.headers:
if key in headers.keys():
raise ValueError("cannot override reserved HTTP header {}".format(key))
headers.update(self._client_session.headers)
transaction_id = self._client_session.transaction_id
headers[constants.HEADER_TRANSACTION] = transaction_id
return headers
@property
def max_attempts(self):
# type: () -> int
return self._max_attempts
@max_attempts.setter
def max_attempts(self, value):
# type: (int) -> None
self._max_attempts = value
if value == 1: # No retry
self._get = self._http_session.get
self._post = self._http_session.post
self._delete = self._http_session.delete
return
with_retry = exceptions.retry_with(
self._handle_retry,
exceptions=self._exceptions,
conditions=(
# need retry when there is no exception but the status code is 503
lambda response: getattr(response, "status_code", None)
== 503,
),
max_attempts=self._max_attempts,
)
self._get = with_retry(self._http_session.get)
self._post = with_retry(self._http_session.post)
self._delete = with_retry(self._http_session.delete)
def get_url(self, path):
# type: (Text) -> Text
return "{protocol}://{host}:{port}{path}".format(
protocol=self._http_scheme, host=self._host, port=self._port, path=path
)
@property
def statement_url(self):
# type: () -> Text
return self.get_url(constants.URL_STATEMENT_PATH)
@property
def next_uri(self):
# type: () -> Text
return self._next_uri
def post(self, sql):
data = sql.encode("utf-8")
http_headers = self.http_headers
http_response = self._post(
self.statement_url,
data=data,
headers=http_headers,
timeout=self._request_timeout,
allow_redirects=self._redirect_handler is None,
proxies=PROXIES,
)
if self._redirect_handler is not None:
while http_response is not None and http_response.is_redirect:
location = http_response.headers["Location"]
url = self._redirect_handler.handle(location)
logger.info(
"redirect {} from {} to {}".format(
http_response.status_code, location, url
)
)
http_response = self._post(
url,
data=data,
headers=http_headers,
timeout=self._request_timeout,
allow_redirects=False,
proxies=PROXIES,
)
return http_response
def get(self, url):
return self._get(
url,
headers=self.http_headers,
timeout=self._request_timeout,
proxies=PROXIES,
)
def delete(self, url):
return self._delete(url, timeout=self._request_timeout, proxies=PROXIES)
def _process_error(self, error, query_id):
error_type = error["errorType"]
if error_type == "EXTERNAL":
raise exceptions.PrestoExternalError(error, query_id)
elif error_type == "USER_ERROR":
return exceptions.PrestoUserError(error, query_id)
return exceptions.PrestoQueryError(error, query_id)
def raise_response_error(self, http_response):
if http_response.status_code == 503:
raise exceptions.Http503Error("error 503: service unavailable")
raise exceptions.HttpError(
"error {}{}".format(
http_response.status_code,
": {}".format(http_response.content) if http_response.content else "",
)
)
def process(self, http_response):
# type: (requests.Response) -> PrestoStatus
if not http_response.ok:
self.raise_response_error(http_response)
http_response.encoding = "utf-8"
response = http_response.json()
logger.debug("HTTP {}: {}".format(http_response.status_code, response))
if "error" in response:
raise self._process_error(response["error"], response.get("id"))
if constants.HEADER_CLEAR_SESSION in http_response.headers:
for prop in get_header_values(
http_response.headers, constants.HEADER_CLEAR_SESSION
):
self._client_session.properties.pop(prop, None)
if constants.HEADER_SET_SESSION in http_response.headers:
for key, value in get_session_property_values(
http_response.headers, constants.HEADER_SET_SESSION
):
self._client_session.properties[key] = value
self._next_uri = response.get("nextUri")
return PrestoStatus(
id=response["id"],
stats=response["stats"],
warnings=response.get("warnings", []),
info_uri=response["infoUri"],
next_uri=self._next_uri,
rows=response.get("data", []),
columns=response.get("columns"),
)
class PrestoResult(object):
"""
Represent the result of a Presto query as an iterator on rows.
This class implements the iterator protocol as a generator type
https://docs.python.org/3/library/stdtypes.html#generator-types
"""
def __init__(self, query, rows=None):
self._query = query
self._rows = rows or []
self._rownumber = 0
@property
def rownumber(self):
# type: () -> int
return self._rownumber
def __iter__(self):
# Initial fetch from the first POST request
for row in self._rows:
self._rownumber += 1
yield row
self._rows = None
# Subsequent fetches from GET requests until next_uri is empty.
while not self._query.is_finished():
rows = self._query.fetch()
for row in rows:
self._rownumber += 1
logger.debug("row {}".format(row))
yield row
class PrestoQuery(object):
"""Represent the execution of a SQL statement by Presto."""
def __init__(
self,
request, # type: PrestoRequest
sql, # type: Text
):
# type: (...) -> None
self.query_id = None # type: Optional[Text]
self._stats = {} # type: Dict[Any, Any]
self._warnings = [] # type: List[Dict[Any, Any]]
self._columns = None # type: Optional[List[Text]]
self._finished = False
self._cancelled = False
self._request = request
self._sql = sql
self._result = PrestoResult(self)
@property
def columns(self):
return self._columns
@property
def stats(self):
return self._stats
@property
def warnings(self):
return self._warnings
@property
def result(self):
return self._result
def execute(self):
# type: () -> PrestoResult
"""Initiate a Presto query by sending the SQL statement
This is the first HTTP request sent to the coordinator.
It sets the query_id and returns a Result object used to
track the rows returned by the query. To fetch all rows,
call fetch() until is_finished is true.
"""
if self._cancelled:
raise exceptions.PrestoUserError("Query has been cancelled", self.query_id)
response = self._request.post(self._sql)
status = self._request.process(response)
self.query_id = status.id
self._stats.update({u"queryId": self.query_id})
self._stats.update(status.stats)
self._warnings = getattr(status, "warnings", [])
if status.next_uri is None:
self._finished = True
self._result = PrestoResult(self, status.rows)
return self._result
def fetch(self):
# type: () -> List[List[Any]]
"""Continue fetching data for the current query_id"""
response = self._request.get(self._request.next_uri)
status = self._request.process(response)
if status.columns:
self._columns = status.columns
self._stats.update(status.stats)
logger.debug(status)
if status.next_uri is None:
self._finished = True
return status.rows
def cancel(self):
# type: () -> None
"""Cancel the current query"""
if self.query_id is None or self.is_finished():
return
self._cancelled = True
url = self._request.get_url("/v1/query/{}".format(self.query_id))
logger.debug("cancelling query: %s", self.query_id)
response = self._request.delete(url)
logger.info(response)
if response.status_code == requests.codes.no_content:
logger.debug("query cancelled: %s", self.query_id)
return
self._request.raise_response_error(response)
def is_finished(self):
# type: () -> bool
return self._finished
|
[] |
[] |
[
"SOCKS_PROXY"
] |
[]
|
["SOCKS_PROXY"]
|
python
| 1 | 0 | |
tests/ignite/distributed/utils/test_native.py
|
import os
import pytest
import torch
import torch.distributed as dist
import ignite.distributed as idist
from ignite.distributed.utils import has_native_dist_support
from tests.ignite.distributed.utils import (
_test_distrib_all_gather,
_test_distrib_all_reduce,
_test_distrib_barrier,
_test_distrib_broadcast,
_test_distrib_config,
_test_distrib_one_rank_only,
_test_distrib_one_rank_only_with_engine,
_test_sync,
)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_native_distrib_single_node_launch_tool_gloo(local_rank, world_size):
import os
from datetime import timedelta
timeout = timedelta(seconds=20)
rank = local_rank
os.environ["RANK"] = f"{rank}"
idist.initialize("gloo", timeout=timeout)
_test_distrib_config(local_rank, "gloo", world_size, "cpu", rank)
idist.finalize()
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_native_distrib_single_node_launch_tool_nccl(local_rank, world_size):
import os
rank = local_rank
os.environ["RANK"] = f"{rank}"
idist.initialize("nccl")
_test_distrib_config(local_rank, "nccl", world_size, "cuda", rank)
idist.finalize()
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_native_distrib_single_node_spawn_gloo():
from datetime import timedelta
timeout = timedelta(seconds=20)
world_size = 4
idist.spawn(
"gloo", _test_distrib_config, args=("gloo", world_size, "cpu"), nproc_per_node=world_size, timeout=timeout
)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_native_distrib_single_node_spawn_nccl():
world_size = torch.cuda.device_count()
idist.spawn("nccl", _test_distrib_config, args=("nccl", world_size, "cuda"), nproc_per_node=world_size)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_sync_as_native_gloo(distributed_context_single_node_gloo):
from ignite.distributed.comp_models.native import _NativeDistModel
_test_sync(_NativeDistModel)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_sync_as_native_nccl(distributed_context_single_node_nccl):
from ignite.distributed.comp_models.native import _NativeDistModel
_test_sync(_NativeDistModel)
def _test_idist_methods_in_native_context(backend, device, local_rank):
# We explicitly set _model as _SerialModel
# then call idist.* methods and check that they give correct values
from ignite.distributed.utils import _set_model, _SerialModel
_set_model(_SerialModel())
ws = dist.get_world_size()
rank = dist.get_rank()
_test_distrib_config(local_rank, backend=backend, ws=ws, true_device=device, rank=rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_methods_in_native_gloo_context(distributed_context_single_node_gloo):
local_rank = distributed_context_single_node_gloo["local_rank"]
_test_idist_methods_in_native_context("gloo", "cpu", local_rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_methods_in_native_nccl_context(distributed_context_single_node_nccl):
local_rank = distributed_context_single_node_nccl["local_rank"]
_test_idist_methods_in_native_context("nccl", "cuda", local_rank)
def _test_idist_methods_in_native_context_set_local_rank(backend, device, local_rank):
# We explicitly set _model as _SerialModel
# then call idist.* methods and check that they give correct values
from ignite.distributed.utils import _set_model, _SerialModel
_set_model(_SerialModel())
lrank = int(os.environ["LOCAL_RANK"])
del os.environ["LOCAL_RANK"]
ws = dist.get_world_size()
rank = dist.get_rank()
idist.set_local_rank(local_rank)
_test_distrib_config(local_rank=local_rank, backend=backend, ws=ws, true_device=device, rank=rank)
os.environ["LOCAL_RANK"] = str(lrank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_methods_in_native_gloo_context_set_local_rank(distributed_context_single_node_gloo):
local_rank = distributed_context_single_node_gloo["local_rank"]
_test_idist_methods_in_native_context_set_local_rank("gloo", "cpu", local_rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_methods_in_native_nccl_context_set_local_rank(distributed_context_single_node_nccl):
local_rank = distributed_context_single_node_nccl["local_rank"]
_test_idist_methods_in_native_context_set_local_rank("nccl", "cuda", local_rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_all_reduce_nccl(distributed_context_single_node_nccl):
device = f"cuda:{distributed_context_single_node_nccl['local_rank']}"
_test_distrib_all_reduce(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_all_reduce_gloo(distributed_context_single_node_gloo):
device = "cpu"
_test_distrib_all_reduce(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_all_gather_nccl(distributed_context_single_node_nccl):
device = f"cuda:{distributed_context_single_node_nccl['local_rank']}"
_test_distrib_all_gather(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_all_gather_gloo(distributed_context_single_node_gloo):
device = "cpu"
_test_distrib_all_gather(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_broadcast_nccl(distributed_context_single_node_nccl):
device = f"cuda:{distributed_context_single_node_nccl['local_rank']}"
_test_distrib_broadcast(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_broadcast_gloo(distributed_context_single_node_gloo):
device = "cpu"
_test_distrib_broadcast(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_barrier_nccl(distributed_context_single_node_nccl):
device = f"cuda:{distributed_context_single_node_nccl['local_rank']}"
_test_distrib_barrier(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_barrier_gloo(distributed_context_single_node_gloo):
device = "cpu"
_test_distrib_barrier(device)
def _test_idist_methods_overhead(ok_factor):
import time
n = 100000
m = 5
t2 = 0.0
t1 = 0.0
for j in range(m):
start = time.time()
for _ in range(n):
_ = dist.get_world_size()
_ = dist.get_rank()
elapsed = time.time() - start
t2 += elapsed / n / m
start = time.time()
for _ in range(n):
_ = idist.get_world_size()
_ = idist.get_rank()
elapsed = time.time() - start
t1 += elapsed / n / m
overhead_factor = t1 / t2
assert overhead_factor < ok_factor, f"{overhead_factor} vs {ok_factor} | {t2} vs {t1}"
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="Do not want to run this test on Github or Travis, but CircleCI"
)
def test_idist_methods_overhead_gloo(distributed_context_single_node_gloo):
_test_idist_methods_overhead(2.5)
idist.sync()
from ignite.distributed.utils import _model
from ignite.distributed.comp_models.native import _NativeDistModel
assert isinstance(_model, _NativeDistModel)
_test_idist_methods_overhead(1.7)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_methods_overhead_nccl(distributed_context_single_node_nccl):
_test_idist_methods_overhead(2.5)
idist.sync()
from ignite.distributed.utils import _model
from ignite.distributed.comp_models.native import _NativeDistModel
assert isinstance(_model, _NativeDistModel)
_test_idist_methods_overhead(1.7)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_one_rank_only_gloo(distributed_context_single_node_gloo):
device = "cpu"
_test_distrib_one_rank_only(device=device)
_test_distrib_one_rank_only_with_engine(device=device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_one_rank_only_nccl(local_rank, distributed_context_single_node_nccl):
device = f"cuda:{local_rank}"
_test_distrib_one_rank_only(device=device)
_test_distrib_one_rank_only_with_engine(device=device)
|
[] |
[] |
[
"LOCAL_RANK",
"RANK"
] |
[]
|
["LOCAL_RANK", "RANK"]
|
python
| 2 | 0 | |
service/events/stream/memory/memory.go
|
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Original source: github.com/micro/go-micro/v3/events/stream/memory/memory.go
package memory
import (
"encoding/json"
"fmt"
"sync"
"time"
"github.com/google/uuid"
"github.com/micro-community/micro/v3/service/events"
"github.com/micro-community/micro/v3/service/logger"
"github.com/micro-community/micro/v3/service/store"
"github.com/micro-community/micro/v3/service/store/memory"
"github.com/pkg/errors"
)
// NewStream returns an initialized memory stream
func NewStream(opts ...Option) (events.Stream, error) {
// parse the options
var options Options
for _, o := range opts {
o(&options)
}
if options.Store == nil {
options.Store = memory.NewStore()
}
return &mem{store: options.Store}, nil
}
type subscriber struct {
Group string
Topic string
Channel chan events.Event
sync.RWMutex
retryMap map[string]int
retryLimit int
autoAck bool
ackWait time.Duration
}
type mem struct {
store store.Store
subs []*subscriber
sync.RWMutex
}
func (m *mem) Publish(topic string, msg interface{}, opts ...events.PublishOption) error {
// validate the topic
if len(topic) == 0 {
return events.ErrMissingTopic
}
// parse the options
options := events.PublishOptions{
Timestamp: time.Now(),
}
for _, o := range opts {
o(&options)
}
// encode the message if it's not already encoded
var payload []byte
if p, ok := msg.([]byte); ok {
payload = p
} else {
p, err := json.Marshal(msg)
if err != nil {
return events.ErrEncodingMessage
}
payload = p
}
// construct the event
event := &events.Event{
ID: uuid.New().String(),
Topic: topic,
Timestamp: options.Timestamp,
Metadata: options.Metadata,
Payload: payload,
}
// serialize the event to bytes
bytes, err := json.Marshal(event)
if err != nil {
return errors.Wrap(err, "Error encoding event")
}
// write to the store
key := fmt.Sprintf("%v/%v", event.Topic, event.ID)
if err := m.store.Write(&store.Record{Key: key, Value: bytes}); err != nil {
return errors.Wrap(err, "Error writing event to store")
}
// send to the subscribers async
go m.handleEvent(event)
return nil
}
func (m *mem) Consume(topic string, opts ...events.ConsumeOption) (<-chan events.Event, error) {
// validate the topic
if len(topic) == 0 {
return nil, events.ErrMissingTopic
}
// parse the options
options := events.ConsumeOptions{
Group: uuid.New().String(),
AutoAck: true,
}
for _, o := range opts {
o(&options)
}
// TODO RetryLimit
// setup the subscriber
sub := &subscriber{
Channel: make(chan events.Event),
Topic: topic,
Group: options.Group,
retryMap: map[string]int{},
autoAck: true,
retryLimit: options.GetRetryLimit(),
}
if !options.AutoAck {
if options.AckWait == 0 {
return nil, fmt.Errorf("invalid AckWait passed, should be positive integer")
}
sub.autoAck = options.AutoAck
sub.ackWait = options.AckWait
}
// register the subscriber
m.Lock()
m.subs = append(m.subs, sub)
m.Unlock()
// lookup previous events if the start time option was passed
if options.Offset.Unix() > 0 {
go m.lookupPreviousEvents(sub, options.Offset)
}
// return the channel
return sub.Channel, nil
}
// lookupPreviousEvents finds events for a subscriber which occurred before a given time and sends
// them into the subscribers channel
func (m *mem) lookupPreviousEvents(sub *subscriber, startTime time.Time) {
// lookup all events which match the topic (a blank topic will return all results)
recs, err := m.store.Read(sub.Topic+"/", store.ReadPrefix())
if err != nil && logger.V(logger.ErrorLevel, logger.DefaultLogger) {
logger.Errorf("Error looking up previous events: %v", err)
return
} else if err != nil {
return
}
// loop through the records and send it to the channel if it matches
for _, r := range recs {
var ev events.Event
if err := json.Unmarshal(r.Value, &ev); err != nil {
continue
}
if ev.Timestamp.Unix() < startTime.Unix() {
continue
}
sendEvent(&ev, sub)
}
}
// handleEvents sends the event to any registered subscribers.
func (m *mem) handleEvent(ev *events.Event) {
m.RLock()
subs := m.subs
m.RUnlock()
// filteredSubs is a KV map of the queue name and subscribers. This is used to prevent a message
// being sent to two subscribers with the same queue.
filteredSubs := map[string]*subscriber{}
// filter down to subscribers who are interested in this topic
for _, sub := range subs {
if len(sub.Topic) == 0 || sub.Topic == ev.Topic {
filteredSubs[sub.Group] = sub
}
}
// send the message to each channel async (since one channel might be blocked)
for _, sub := range filteredSubs {
sendEvent(ev, sub)
}
}
func sendEvent(ev *events.Event, sub *subscriber) {
go func(s *subscriber) {
evCopy := *ev
if s.autoAck {
s.Channel <- evCopy
return
}
evCopy.SetAckFunc(ackFunc(s, evCopy))
evCopy.SetNackFunc(nackFunc(s, evCopy))
s.retryMap[evCopy.ID] = 0
tick := time.NewTicker(s.ackWait)
defer tick.Stop()
for range tick.C {
s.Lock()
count, ok := s.retryMap[evCopy.ID]
s.Unlock()
if !ok {
// success
break
}
if s.retryLimit > -1 && count > s.retryLimit {
if logger.V(logger.ErrorLevel, logger.DefaultLogger) {
logger.Errorf("Message retry limit reached, discarding: %v %d %d", evCopy.ID, count, s.retryLimit)
}
s.Lock()
delete(s.retryMap, evCopy.ID)
s.Unlock()
return
}
s.Channel <- evCopy
s.Lock()
s.retryMap[evCopy.ID] = count + 1
s.Unlock()
}
}(sub)
}
func ackFunc(s *subscriber, evCopy events.Event) func() error {
return func() error {
s.Lock()
delete(s.retryMap, evCopy.ID)
s.Unlock()
return nil
}
}
func nackFunc(s *subscriber, evCopy events.Event) func() error {
return func() error {
return nil
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
main.py
|
import os
from app import create_app, socketio
server_app = create_app()
server_app.config['UPLOAD_FOLDER'] = 'uploads'
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8080))
socketio.run(server_app, host = "0.0.0.0", port = 8080, debug = True)
|
[] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
python
| 1 | 0 | |
src/main/java/com/artipie/api/AuthApi.java
|
/*
* The MIT License (MIT)
*
* Copyright (c) 2020 artipie.com
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.artipie.api;
import com.artipie.http.auth.Authentication;
import com.artipie.http.auth.BasicIdentities;
import com.artipie.http.auth.Identities;
import com.artipie.http.rq.RequestLineFrom;
import com.artipie.http.rq.RqHeaders;
import com.jcabi.log.Logger;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.security.GeneralSecurityException;
import java.security.KeyFactory;
import java.security.spec.KeySpec;
import java.security.spec.PKCS8EncodedKeySpec;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.crypto.Cipher;
import org.apache.commons.codec.DecoderException;
import org.apache.commons.codec.binary.Hex;
/**
* API authentication wrapper.
* @since 0.6
*/
public final class AuthApi implements Identities {
/**
* URI path pattern.
*/
private static final Pattern PTN_PATH =
Pattern.compile("(?:/api/\\w+)?/(?<user>[^/.]+)(?:/.*)?");
/**
* Origin authentication.
*/
private final Authentication auth;
/**
* Wraps authentication with API restrictions.
* @param auth Origin
*/
public AuthApi(final Authentication auth) {
this.auth = auth;
}
@Override
public Optional<String> user(final String line,
final Iterable<Map.Entry<String, String>> headers) {
final Matcher matcher = PTN_PATH.matcher(new RequestLineFrom(line).uri().getPath());
final Optional<String> res;
if (matcher.matches()) {
res = Optional.ofNullable(
AuthApi.cookies(new RqHeaders(headers, "Cookie")).get("session")
).map(AuthApi::session)
.orElse(new BasicIdentities(this.auth).user(line, headers))
.filter(user -> user.equals(matcher.group("user")));
} else {
res = Optional.empty();
}
return res;
}
/**
* Map of cookies.
* @param raw Raw strings of cookie headers
* @return Cookies map
*/
private static Map<String, String> cookies(final Iterable<String> raw) {
final Map<String, String> map = new HashMap<>(0);
for (final String value : raw) {
for (final String pair : value.split(";")) {
final String[] parts = pair.split("=", 2);
final String key = parts[0].trim().toLowerCase(Locale.US);
if (parts.length > 1 && !parts[1].isEmpty()) {
map.put(key, parts[1].trim());
} else {
map.remove(key);
}
}
}
return map;
}
/**
* Decode session id to user name.
* <p>
* Encoded session string is hex of user id encrypted with RSA public key.
* See cipher and key spec format for more details.
* </p>
* @param encoded Encoded string
* @return User id
*/
@SuppressWarnings("PMD.PreserveStackTrace")
private static Optional<String> session(final String encoded) {
final String env = System.getenv("ARTIPIE_SESSION_KEY");
final Optional<String> user;
if (env == null) {
user = Optional.empty();
} else {
final byte[] key;
try {
key = Files.readAllBytes(Paths.get(env));
final KeySpec spec = new PKCS8EncodedKeySpec(key);
final Cipher rsa = Cipher.getInstance("RSA/ECB/OAEPWithSHA1AndMGF1Padding");
rsa.init(Cipher.DECRYPT_MODE, KeyFactory.getInstance("RSA").generatePrivate(spec));
user = Optional.of(
new String(
rsa.doFinal(Hex.decodeHex(encoded.toCharArray())),
StandardCharsets.UTF_8
)
);
} catch (final IOException | DecoderException | GeneralSecurityException err) {
Logger.error(AuthApi.class, "Failed to read session cookie: %[exception]s");
throw new IllegalStateException("Failed to read session cookie");
}
}
return user;
}
}
|
[
"\"ARTIPIE_SESSION_KEY\""
] |
[] |
[
"ARTIPIE_SESSION_KEY"
] |
[]
|
["ARTIPIE_SESSION_KEY"]
|
java
| 1 | 0 | |
gender_recognition_ai/gender_ai_proj/asgi.py
|
"""
ASGI config for gender_ai_proj project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gender_ai_proj.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
aiven/resource_service_mirrormaker_test.go
|
package aiven
import (
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
"os"
"testing"
)
// MySQL service tests
func TestAccAivenService_mirrormaker(t *testing.T) {
resourceName := "aiven_service.bar"
rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories,
CheckDestroy: testAccCheckAivenServiceResourceDestroy,
Steps: []resource.TestStep{
{
Config: testAccMirrorMakerServiceResource(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckAivenServiceMirrorMakerAttributes("data.aiven_service.service"),
resource.TestCheckResourceAttr(resourceName, "service_name", fmt.Sprintf("test-acc-sr-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "state", "RUNNING"),
resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")),
resource.TestCheckResourceAttr(resourceName, "service_type", "kafka_mirrormaker"),
resource.TestCheckResourceAttr(resourceName, "cloud_name", "google-europe-west1"),
resource.TestCheckResourceAttr(resourceName, "state", "RUNNING"),
resource.TestCheckResourceAttr(resourceName, "termination_protection", "false"),
),
},
},
})
}
func testAccMirrorMakerServiceResource(name string) string {
return fmt.Sprintf(`
data "aiven_project" "foo" {
project = "%s"
}
resource "aiven_service" "bar" {
project = data.aiven_project.foo.project
cloud_name = "google-europe-west1"
plan = "startup-4"
service_name = "test-acc-sr-%s"
service_type = "kafka_mirrormaker"
kafka_mirrormaker_user_config {
ip_filter = ["0.0.0.0/0"]
kafka_mirrormaker {
refresh_groups_interval_seconds = 600
refresh_topics_enabled = true
refresh_topics_interval_seconds = 600
}
}
}
data "aiven_service" "service" {
service_name = aiven_service.bar.service_name
project = aiven_service.bar.project
depends_on = [aiven_service.bar]
}
`, os.Getenv("AIVEN_PROJECT_NAME"), name)
}
func testAccCheckAivenServiceMirrorMakerAttributes(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
r := s.RootModule().Resources[n]
a := r.Primary.Attributes
if a["service_type"] != "kafka_mirrormaker" {
return fmt.Errorf("expected to get a correct service type from Aiven, got :%s", a["service_type"])
}
if a["kafka_mirrormaker_user_config.0.kafka_mirrormaker.0.refresh_groups_interval_seconds"] != "600" {
return fmt.Errorf("expected to get a correct refresh_groups_interval_seconds from Aiven")
}
if a["kafka_mirrormaker_user_config.0.kafka_mirrormaker.0.refresh_topics_enabled"] != "true" {
return fmt.Errorf("expected to get a correct refresh_topics_enabled from Aiven")
}
if a["kafka_mirrormaker_user_config.0.kafka_mirrormaker.0.refresh_topics_interval_seconds"] != "600" {
return fmt.Errorf("expected to get a correct refresh_topics_interval_seconds from Aiven")
}
return nil
}
}
|
[
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\""
] |
[] |
[
"AIVEN_PROJECT_NAME"
] |
[]
|
["AIVEN_PROJECT_NAME"]
|
go
| 1 | 0 | |
lhotse/dataset/speech_synthesis.py
|
from pathlib import Path
from typing import Dict, List, Optional
import torch
from torch.utils.data import Dataset
from lhotse.cut import CutSet
from lhotse.utils import Pathlike
EPS = 1e-8
class SpeechSynthesisDataset(Dataset):
"""
The PyTorch Dataset for the speech synthesis task.
Each item in this dataset is a dict of:
.. code-block::
{
'audio': (1 x NumSamples) tensor
'features': (NumFrames x NumFeatures) tensor
'tokens': list of characters
}
"""
def __init__(
self,
cuts: CutSet,
root_dir: Optional[Pathlike] = None
):
super().__init__()
self.cuts = cuts
self.root_dir = Path(root_dir) if root_dir else None
self.cut_ids = list(self.cuts.ids)
# generate tokens from text
self.id_to_token = {}
self.token_set = set()
for cut in cuts:
assert len(cut.supervisions) == 1, 'Only the Cuts with single supervision are supported.'
characters = list(cut.supervisions[0].text)
self.token_set.update(set(characters))
self.id_to_token[cut.id] = characters
self.token_set = sorted(list(self.tokens))
def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
cut_id = self.cut_ids[idx]
cut = self.cuts[cut_id]
features = torch.from_numpy(cut.load_features())
audio = torch.from_numpy(cut.load_audio())
assert cut.id in self.id_to_token
return {
'audio': audio,
'features': features,
'tokens': self.id_to_token[cut.id]
}
def __len__(self) -> int:
return len(self.cut_ids)
@property
def tokens(self) -> List[str]:
return self.token_set
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
local/lib/python3.6/site-packages/pgadmin4/regression/feature_utils/app_starter.py
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2018, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import os
import subprocess
import signal
import random
import time
from selenium.common.exceptions import WebDriverException
class AppStarter:
""" Helper for starting the full pgadmin4 app and loading the page via
selenium
"""
def __init__(self, driver, app_config):
self.driver = driver
self.app_config = app_config
def start_app(self):
""" This function start the subprocess to start pgAdmin app """
random_server_port = str(random.randint(10000, 65535))
env = {
"PGADMIN_PORT": random_server_port,
"SQLITE_PATH": str(self.app_config.TEST_SQLITE_PATH)
}
env.update(os.environ)
# Add OS check for pass value for 'preexec_fn'
self.pgadmin_process = subprocess.Popen(
["python", "pgAdmin4.py"],
shell=False,
preexec_fn=None if os.name == 'nt' else os.setsid,
stderr=open(os.devnull, 'w'),
env=env
)
def launch_browser(retry_count):
try:
self.driver.get(
"http://" + self.app_config.DEFAULT_SERVER + ":" +
random_server_port
)
except WebDriverException as e:
# In case of WebDriverException sleep for 1 second and retry
# again. Retry 10 times and if still app will not start then
# raise exception.
time.sleep(1)
if retry_count < 60:
retry_count = retry_count + 1
launch_browser(retry_count)
else:
raise Exception('Unable to start python server even after '
'retrying 60 times.')
launch_browser(0)
def stop_app(self):
""" This function stop the started app by killing process """
self.driver.quit()
# os.killpg supported in Mac and Unix as this function not supported in
# Windows
try:
os.killpg(os.getpgid(self.pgadmin_process.pid), signal.SIGTERM)
except AttributeError:
# os.kill is supported by Windows
os.kill(self.pgadmin_process.pid, signal.SIGTERM)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
deafwave/wallet/cc_wallet/ccparent.py
|
from dataclasses import dataclass
from typing import Optional
from deafwave.types.blockchain_format.sized_bytes import bytes32
from deafwave.util.ints import uint64
from deafwave.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class CCParent(Streamable):
parent_name: bytes32
inner_puzzle_hash: Optional[bytes32]
amount: uint64
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
docs/serving/samples/knative-routing-go/sample.go
|
/*
Copyright 2018 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"log"
"net/http"
"os"
)
func handler(w http.ResponseWriter, r *http.Request) {
serviceName := os.Getenv("SERVICE_NAME")
if serviceName == "" {
serviceName = "NOT SPECIFIED"
}
log.Printf("%s received a request.", serviceName)
fmt.Fprintf(w, "%s is called !\n", serviceName)
}
func main() {
flag.Parse()
log.Print("Sample started.")
http.HandleFunc("/", handler)
http.ListenAndServe(":8080", nil)
}
|
[
"\"SERVICE_NAME\""
] |
[] |
[
"SERVICE_NAME"
] |
[]
|
["SERVICE_NAME"]
|
go
| 1 | 0 | |
cli/cmd/root.go
|
package cmd
import (
"bytes"
"fmt"
"os"
"regexp"
"strings"
"time"
"github.com/fatih/color"
"github.com/linkerd/linkerd2/cli/flag"
jaeger "github.com/linkerd/linkerd2/jaeger/cmd"
multicluster "github.com/linkerd/linkerd2/multicluster/cmd"
viz "github.com/linkerd/linkerd2/viz/cmd"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/clientcmd"
)
const (
defaultLinkerdNamespace = "linkerd"
defaultCNINamespace = "linkerd-cni"
defaultLinkerdVizNamespace = "linkerd-viz"
defaultClusterDomain = "cluster.local"
defaultDockerRegistry = "ghcr.io/linkerd"
jsonOutput = "json"
tableOutput = "table"
wideOutput = "wide"
maxRps = 100.0
)
var (
// special handling for Windows, on all other platforms these resolve to
// os.Stdout and os.Stderr, thanks to https://github.com/mattn/go-colorable
stdout = color.Output
stderr = color.Error
okStatus = color.New(color.FgGreen, color.Bold).SprintFunc()("\u221A") // √
warnStatus = color.New(color.FgYellow, color.Bold).SprintFunc()("\u203C") // ‼
failStatus = color.New(color.FgRed, color.Bold).SprintFunc()("\u00D7") // ×
controlPlaneNamespace string
cniNamespace string
apiAddr string // An empty value means "use the Kubernetes configuration"
kubeconfigPath string
kubeContext string
defaultNamespace string // Default namespace taken from current kubectl context
impersonate string
impersonateGroup []string
verbose bool
// These regexs are not as strict as they could be, but are a quick and dirty
// sanity check against illegal characters.
alphaNumDash = regexp.MustCompile(`^[a-zA-Z0-9-]+$`)
alphaNumDashDot = regexp.MustCompile(`^[\.a-zA-Z0-9-]+$`)
alphaNumDashDotSlashColon = regexp.MustCompile(`^[\./a-zA-Z0-9-:]+$`)
// Full Rust log level syntax at
// https://docs.rs/env_logger/0.6.0/env_logger/#enabling-logging
r = strings.NewReplacer("\t", "", "\n", "")
validProxyLogLevel = regexp.MustCompile(r.Replace(`
^(
(
(trace|debug|warn|info|error)|
(\w|::)+|
((\w|::)+=(trace|debug|warn|info|error))
)(?:,|$)
)+$`))
)
// RootCmd represents the root Cobra command
var RootCmd = &cobra.Command{
Use: "linkerd",
Short: "linkerd manages the Linkerd service mesh",
Long: `linkerd manages the Linkerd service mesh.`,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
// enable / disable logging
if verbose {
log.SetLevel(log.DebugLevel)
} else {
log.SetLevel(log.PanicLevel)
}
controlPlaneNamespaceFromEnv := os.Getenv("LINKERD_NAMESPACE")
if controlPlaneNamespace == defaultLinkerdNamespace && controlPlaneNamespaceFromEnv != "" {
controlPlaneNamespace = controlPlaneNamespaceFromEnv
}
if !alphaNumDash.MatchString(controlPlaneNamespace) {
return fmt.Errorf("%s is not a valid namespace", controlPlaneNamespace)
}
return nil
},
}
func init() {
defaultNamespace = getDefaultNamespace()
RootCmd.PersistentFlags().StringVarP(&controlPlaneNamespace, "linkerd-namespace", "L", defaultLinkerdNamespace, "Namespace in which Linkerd is installed ($LINKERD_NAMESPACE)")
RootCmd.PersistentFlags().StringVarP(&cniNamespace, "cni-namespace", "", defaultCNINamespace, "Namespace in which the Linkerd CNI plugin is installed")
RootCmd.PersistentFlags().StringVar(&kubeconfigPath, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests")
RootCmd.PersistentFlags().StringVar(&kubeContext, "context", "", "Name of the kubeconfig context to use")
RootCmd.PersistentFlags().StringVar(&impersonate, "as", "", "Username to impersonate for Kubernetes operations")
RootCmd.PersistentFlags().StringArrayVar(&impersonateGroup, "as-group", []string{}, "Group to impersonate for Kubernetes operations")
RootCmd.PersistentFlags().StringVar(&apiAddr, "api-addr", "", "Override kubeconfig and communicate directly with the control plane at host:port (mostly for testing)")
RootCmd.PersistentFlags().BoolVar(&verbose, "verbose", false, "Turn on debug logging")
RootCmd.AddCommand(newCmdAlpha())
RootCmd.AddCommand(newCmdCheck())
RootCmd.AddCommand(newCmdCompletion())
RootCmd.AddCommand(newCmdDashboard())
RootCmd.AddCommand(newCmdDiagnostics())
RootCmd.AddCommand(newCmdDoc())
RootCmd.AddCommand(newCmdEdges())
RootCmd.AddCommand(newCmdEndpoints())
RootCmd.AddCommand(newCmdIdentity())
RootCmd.AddCommand(newCmdInject())
RootCmd.AddCommand(newCmdInstall())
RootCmd.AddCommand(newCmdInstallCNIPlugin())
RootCmd.AddCommand(newCmdInstallSP())
RootCmd.AddCommand(newCmdMetrics())
RootCmd.AddCommand(newCmdProfile())
RootCmd.AddCommand(newCmdRoutes())
RootCmd.AddCommand(newCmdStat())
RootCmd.AddCommand(newCmdTap())
RootCmd.AddCommand(newCmdTop())
RootCmd.AddCommand(newCmdUninject())
RootCmd.AddCommand(newCmdUpgrade())
RootCmd.AddCommand(newCmdVersion())
RootCmd.AddCommand(newCmdUninstall())
// Extension Sub Commands
RootCmd.AddCommand(jaeger.NewCmdJaeger())
RootCmd.AddCommand(multicluster.NewCmdMulticluster())
RootCmd.AddCommand(viz.NewCmdViz())
}
type statOptionsBase struct {
namespace string
timeWindow string
outputFormat string
}
func newStatOptionsBase() *statOptionsBase {
return &statOptionsBase{
namespace: defaultNamespace,
timeWindow: "1m",
outputFormat: tableOutput,
}
}
func (o *statOptionsBase) validateOutputFormat() error {
switch o.outputFormat {
case tableOutput, jsonOutput, wideOutput:
return nil
default:
return fmt.Errorf("--output currently only supports %s, %s and %s", tableOutput, jsonOutput, wideOutput)
}
}
func renderStats(buffer bytes.Buffer, options *statOptionsBase) string {
var out string
switch options.outputFormat {
case jsonOutput:
out = buffer.String()
default:
// strip left padding on the first column
b := buffer.Bytes()
if len(b) > padding {
out = string(b[padding:])
}
out = strings.Replace(out, "\n"+strings.Repeat(" ", padding), "\n", -1)
}
return out
}
// getRequestRate calculates request rate from Public API BasicStats.
func getRequestRate(success, failure uint64, timeWindow string) float64 {
windowLength, err := time.ParseDuration(timeWindow)
if err != nil {
log.Error(err.Error())
return 0.0
}
return float64(success+failure) / windowLength.Seconds()
}
// getSuccessRate calculates success rate from Public API BasicStats.
func getSuccessRate(success, failure uint64) float64 {
if success+failure == 0 {
return 0.0
}
return float64(success) / float64(success+failure)
}
// getDefaultNamespace fetches the default namespace
// used in the current KubeConfig context
func getDefaultNamespace() string {
rules := clientcmd.NewDefaultClientConfigLoadingRules()
if kubeconfigPath != "" {
rules.ExplicitPath = kubeconfigPath
}
overrides := &clientcmd.ConfigOverrides{CurrentContext: kubeContext}
kubeCfg := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides)
ns, _, err := kubeCfg.Namespace()
if err != nil {
log.Warnf(`could not set namespace from kubectl context, using 'default' namespace: %s
ensure the KUBECONFIG path %s is valid`, err, kubeconfigPath)
return corev1.NamespaceDefault
}
return ns
}
// registryOverride replaces the registry-portion of the provided image with the provided registry.
func registryOverride(image, newRegistry string) string {
if image == "" {
return image
}
registry := newRegistry
if registry != "" && !strings.HasSuffix(registry, slash) {
registry += slash
}
imageName := image
if strings.Contains(image, slash) {
imageName = image[strings.LastIndex(image, slash)+1:]
}
return registry + imageName
}
func flattenFlags(flags ...[]flag.Flag) []flag.Flag {
out := []flag.Flag{}
for _, f := range flags {
out = append(out, f...)
}
return out
}
|
[
"\"LINKERD_NAMESPACE\""
] |
[] |
[
"LINKERD_NAMESPACE"
] |
[]
|
["LINKERD_NAMESPACE"]
|
go
| 1 | 0 | |
mpserver/main.go
|
package main
import (
"encoding/gob"
"os"
"time"
"github.com/aouyang1/go-matrixprofile/matrixprofile"
"github.com/gin-contrib/cors"
"github.com/gin-contrib/sessions"
"github.com/gin-contrib/sessions/redis"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
mpConcurrency = 4
maxRedisBlobSize = 10 * 1024 * 1024
retentionPeriod = 10 * 60
redisURL = "localhost:6379" // override with REDIS_URL environment variable
port = "8081" // override with PORT environment variable
requestTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "mpserver_requests_total",
Help: "count of all HTTP requests for the mpserver",
},
[]string{"method", "endpoint", "code"},
)
serviceRequestDuration = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Name: "mpserver_service_request_durations_ms",
Help: "service request duration in milliseconds.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"endpoint"},
)
redisClientRequestDuration = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Name: "mpserver_client_redis_request_durations_ms",
Help: "redis client request duration in milliseconds.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"command", "status"},
)
)
type RespError struct {
Error error `json:"error"`
CacheExpired bool `json:"cache_expired"`
}
func init() {
prometheus.MustRegister(requestTotal)
prometheus.MustRegister(serviceRequestDuration)
prometheus.MustRegister(redisClientRequestDuration)
}
func main() {
r := gin.Default()
store, err := initRedis()
if err != nil {
panic(err)
}
r.Use(sessions.Sessions("mysession", store))
r.Use(cors.Default())
gob.RegisterName(
"github.com/aouyang1/go-matrixprofile/matrixprofile.MatrixProfile",
matrixprofile.MatrixProfile{},
)
v1 := r.Group("/api/v1")
{
v1.GET("/data", getData)
v1.GET("/sources", getSources)
v1.POST("/calculate", calculateMP)
v1.GET("/topkmotifs", topKMotifs)
v1.GET("/topkdiscords", topKDiscords)
v1.POST("/mp", getMP)
}
r.GET("/metrics", gin.WrapH(promhttp.Handler()))
if p := os.Getenv("PORT"); p != "" {
port = p
}
r.Run(":" + port)
}
// initRedis initializes the connection to the redis store for caching session Matrix Profile data
func initRedis() (redis.Store, error) {
if u := os.Getenv("REDIS_URL"); u != "" {
// override global variable if environment variable present
redisURL = u
}
store, err := redis.NewStore(10, "tcp", redisURL, "", []byte("secret"))
if err != nil {
return nil, err
}
err, rs := redis.GetRedisStore(store)
if err != nil {
return nil, err
}
rs.SetMaxLength(maxRedisBlobSize)
rs.Options.MaxAge = retentionPeriod
return store, nil
}
func buildCORSHeaders(c *gin.Context) {
c.Header("Access-Control-Allow-Origin", "http://localhost:8080")
c.Header("Access-Control-Allow-Credentials", "true")
c.Header("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept")
c.Header("Access-Control-Allow-Methods", "GET, POST")
}
func fetchMPCache(session sessions.Session) interface{} {
start := time.Now()
v := session.Get("mp")
if v == nil {
redisClientRequestDuration.WithLabelValues("GET", "500").Observe(time.Since(start).Seconds() * 1000)
} else {
redisClientRequestDuration.WithLabelValues("GET", "200").Observe(time.Since(start).Seconds() * 1000)
}
return v
}
func storeMPCache(session sessions.Session, mp *matrixprofile.MatrixProfile) {
start := time.Now()
session.Set("mp", mp)
err := session.Save()
if err != nil {
redisClientRequestDuration.WithLabelValues("SET", "500").Observe(time.Since(start).Seconds() * 1000)
} else {
redisClientRequestDuration.WithLabelValues("SET", "200").Observe(time.Since(start).Seconds() * 1000)
}
}
|
[
"\"PORT\"",
"\"REDIS_URL\""
] |
[] |
[
"PORT",
"REDIS_URL"
] |
[]
|
["PORT", "REDIS_URL"]
|
go
| 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.