filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
src/condor_tests/test_get_htcondor.py | #!/usr/bin/env pytest
#
# This is NOT a regression test. This is a post-release acceptance test,
# verifying some minimal functionality of get_htcondor. Running the
# entire suite takes quite a while, but you can use pytest's -k option
# to select a container image to test (and then test images concurrently):
#
# pytest test_get_htcondor.py -k ubuntu:10
#
# You can also specify a channel to test:
#
# pytest test_get_htcondor.py -k stable
#
# or both an image and a channel:
#
# pytest test_get_htcondor.py -k "ubuntu:20.04 and stable"
#
# To get information about which tarball the download tests actually
# got, run the download tests with the INFO debug level:
#
# pytest test_get_htcondor.py -k download --log-cli-level INFO
#
# If you're developing get_htcondor and have made the new version
# available via http at some-address.tld/get_htcondor, you can run
# set THE_URL in this test's environment to test the new version:
#
# THE_URL=http://some-address.tld/get_htcondor pytest test_get_htcondor.py
#
import os
import subprocess
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# This is completely unnecessary, but I'm used to using Ornithology's
# fixtures now.
from ornithology import (
config,
standup,
action,
)
# You can change this to simplify testing new version of get_htcondor.
THE_URL = "https://get.htcondor.org"
if "THE_URL" in os.environ:
THE_URL = os.environ["THE_URL"]
# PyTest gets the ordering wrong unless I make it explicit. *sigh*
#
# IMAGES_BY_CHANNEL seems the most natural way to list those mappings.
#
# TESTS is currently too heavy-weight, but if the test(s) ever need to
# care about the log, it'll be easy to add a log-must-contain string or
# a flag-specific log-examining function.
IMAGES_BY_CHANNEL = {
"stable": [
"debian:9",
"debian:10",
"ubuntu:18.04",
"ubuntu:20.04",
"centos:7",
"centos:8",
"amazonlinux:2",
],
"current": [
"ubuntu:18.04",
"ubuntu:20.04",
"debian:10",
"centos:7",
"centos:8",
"amazonlinux:2",
],
}
#
# The --help test is essentially the null test; it can be helpful for
# testing the test, but doesn't tell us anything the other tests
# don't. The --no-dry-run and --minicondor tests should be identical;
# if you're feeling paranoid, you can check, but it's not worth waiting
# around for if you're just smoke-testing a release. The default test
# is also a no-op test, useful for debugging, but otherwise not worth
# the time.
#
TESTS = {
# "help": {
# "flag": "--help",
# },
"download": {
"flag": "--download",
# Using 'head' screws up the exit code, so we can't just use
# the name of the directory (that's printed on the first line).
"postscript": "if command -v yum > /dev/null 2>&1; then " +
"yum install -y tar && yum install -y gzip; " +
"fi && " +
"tar -z -t -f condor.tar.gz | tail -1 | cut -d / -f 1",
"postscript-lines": [-1],
},
# "default": {
# "flag": "",
# },
# "no_dry_run": {
# "flag": "--no-dry-run",
# },
"minicondor": {
"flag": "--minicondor --no-dry-run",
"postscript": "condor_version",
"postscript-lines": [-2, -1],
},
}
PREFICES_BY_IMAGE = {
"debian:9" : "apt-get update && apt-get install -y curl",
"debian:10" : "apt-get update && apt-get install -y curl",
"ubuntu:18.04": "apt-get update && apt-get install -y curl",
"ubuntu:20.04": "apt-get update && apt-get install -y curl",
}
CHANNELS_BY_IMAGE = {}
for channel in IMAGES_BY_CHANNEL:
for image in IMAGES_BY_CHANNEL[channel]:
# This loop body feels un-Pythonic.
if image not in CHANNELS_BY_IMAGE:
CHANNELS_BY_IMAGE[image] = []
CHANNELS_BY_IMAGE[image].append(channel)
PARAMS = {}
for image, channels in CHANNELS_BY_IMAGE.items():
for channel in channels:
for testname, test in TESTS.items():
PARAMS[f"{image}_{channel}_{testname}"] = {
"channel": channel,
"image": image,
"test": test,
}
@config(params=PARAMS)
def the_test_case(request):
return request.param
@config
def channel(the_test_case):
return the_test_case["channel"]
@config
def container_image(the_test_case):
return the_test_case["image"]
@config
def flag(the_test_case):
return the_test_case["test"]["flag"]
@config
def postscript(the_test_case):
if "postscript" in the_test_case["test"]:
return the_test_case["test"]["postscript"]
else:
return None
@config
def postscript_lines(the_test_case):
if "postscript-lines" in the_test_case["test"]:
return the_test_case["test"]["postscript-lines"]
else:
return None
# This should avoid any potential problems with concurrently pulling the
# same container image... but it may count as a pull request even if you
# already have the latest image?
@action
def cached_container_image(container_image):
# cp = subprocess.run(['docker', 'pull', container_image])
# assert(cp.returncode == 0)
return container_image
@action
def results_from_container(channel, cached_container_image, flag, postscript):
platform_specific_prefix = ""
if cached_container_image in PREFICES_BY_IMAGE:
platform_specific_prefix = PREFICES_BY_IMAGE[cached_container_image]
platform_specific_prefix += " && "
# The 'set -o pipefail' is bash magic to make the scriptlet return
# the failure if any command in the pipe fails. This is super-useful
# for catching a failure in/of curl.
script = f"curl -fsSL {THE_URL} | /bin/bash -s -- --channel {channel} "
script += flag
if postscript is not None:
script += f" && {postscript}"
args = [ "docker", "run", "--rm", "-t", cached_container_image, "/bin/bash", "-c",
f"set -o pipefail; {platform_specific_prefix}{script}"
]
logger.debug(args)
return subprocess.run(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=240)
# We can parameterize further to string(s) required to be in the log,
# or with functions which inspect the log.
class TestGetHTCondor:
def test_results_from_container(self, results_from_container, postscript_lines):
logger.debug(results_from_container.stdout)
# There is undoubtedly a more-Pythonic way to do this, possibly
# involving the parameter being a slice.
if postscript_lines is not None:
lines = results_from_container.stdout.splitlines()
for line_number in postscript_lines:
logger.info(lines[line_number])
assert(results_from_container.returncode == 0)
| []
| []
| [
"THE_URL"
]
| [] | ["THE_URL"] | python | 1 | 0 | |
mantle/platform/util.go | // Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package platform
import (
"context"
"crypto/rand"
"crypto/rsa"
"fmt"
"os"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/terminal"
)
// Manhole connects os.Stdin, os.Stdout, and os.Stderr to an interactive shell
// session on the Machine m. Manhole blocks until the shell session has ended.
// If os.Stdin does not refer to a TTY, Manhole returns immediately with a nil
// error.
func Manhole(m Machine) error {
fd := int(os.Stdin.Fd())
if !terminal.IsTerminal(fd) {
return nil
}
tstate, _ := terminal.MakeRaw(fd)
defer terminal.Restore(fd, tstate)
client, err := m.SSHClient()
if err != nil {
return fmt.Errorf("SSH client failed: %v", err)
}
defer client.Close()
session, err := client.NewSession()
if err != nil {
return fmt.Errorf("SSH session failed: %v", err)
}
defer session.Close()
session.Stdin = os.Stdin
session.Stdout = os.Stdout
session.Stderr = os.Stderr
modes := ssh.TerminalModes{
ssh.TTY_OP_ISPEED: 115200,
ssh.TTY_OP_OSPEED: 115200,
}
cols, lines, err := terminal.GetSize(int(os.Stdin.Fd()))
if err != nil {
return err
}
if err = session.RequestPty(os.Getenv("TERM"), lines, cols, modes); err != nil {
return errors.Wrapf(err, "failed to request pseudo terminal")
}
if err := session.Shell(); err != nil {
return errors.Wrapf(err, "failed to start shell")
}
if err := session.Wait(); err != nil {
return errors.Wrapf(err, "failed to wait for session")
}
return nil
}
// Enable SELinux on a machine (skip on machines without SELinux support)
func EnableSelinux(m Machine) error {
_, stderr, err := m.SSH("if type -P setenforce; then sudo setenforce 1; fi")
if err != nil {
return fmt.Errorf("Unable to enable SELinux: %s: %s", err, stderr)
}
return nil
}
// Reboots a machine, stopping ssh first.
// Afterwards run CheckMachine to verify the system is back and operational.
func StartReboot(m Machine) error {
out, stderr, err := m.SSH("sudo reboot")
if _, ok := err.(*ssh.ExitMissingError); ok {
// A terminated session is perfectly normal during reboot.
err = nil
}
if err != nil {
return fmt.Errorf("issuing reboot command failed: %s: %s: %s", out, err, stderr)
}
return nil
}
// RebootMachine will reboot a given machine, provided the machine's journal.
func RebootMachine(m Machine, j *Journal) error {
bootId, err := GetMachineBootId(m)
if err != nil {
return err
}
if err := StartReboot(m); err != nil {
return fmt.Errorf("machine %q failed to begin rebooting: %v", m.ID(), err)
}
return StartMachineAfterReboot(m, j, bootId)
}
// WaitForMachineReboot will wait for the machine to reboot, i.e. it is assumed
// an action which will cause a reboot has already been initiated. Note the
// timeout here is for how long to wait for the machine to seemingly go
// *offline*, not for how long it takes to get back online. Journal.Start() has
// its own timeouts for that.
func WaitForMachineReboot(m Machine, j *Journal, timeout time.Duration, oldBootId string) error {
// The machine could be in three distinct states here wrt SSH
// accessibility: it may be up before the reboot, or down during the
// reboot, or up after the reboot.
// we *require* the old boot ID, otherwise there's no way to know if the
// machine already rebooted
if oldBootId == "" {
panic("unreachable: oldBootId empty")
}
// run a command we know will hold so we know approximately when the reboot happens
c := make(chan error)
go func() {
out, stderr, err := m.SSH(fmt.Sprintf("if [ $(cat /proc/sys/kernel/random/boot_id) == '%s' ]; then sleep infinity; fi", oldBootId))
if err == nil {
// we're already in the new boot!
c <- nil
} else if _, ok := err.(*ssh.ExitMissingError); ok {
// we got interrupted running the command, likely by sshd going down
c <- nil
} else if strings.Contains(err.Error(), "connection reset by peer") {
// Catch ECONNRESET, which can happen if sshd is killed during
// handshake. crypto/ssh doesn't provide a distinct error type for
// this, so we're left looking for the string... :(
c <- nil
} else {
c <- fmt.Errorf("waiting for reboot failed: %s: %s: %s", out, err, stderr)
}
}()
select {
case err := <-c:
if err != nil {
return err
}
return StartMachineAfterReboot(m, j, oldBootId)
case <-time.After(timeout):
return fmt.Errorf("timed out after %v waiting for machine to reboot", timeout)
}
}
func StartMachineAfterReboot(m Machine, j *Journal, oldBootId string) error {
if err := j.Start(context.TODO(), m, oldBootId); err != nil {
return fmt.Errorf("machine %q failed to start: %v", m.ID(), err)
}
if err := CheckMachine(context.TODO(), m); err != nil {
return fmt.Errorf("machine %q failed basic checks: %v", m.ID(), err)
}
if !m.RuntimeConf().NoEnableSelinux {
if err := EnableSelinux(m); err != nil {
return fmt.Errorf("machine %q failed to enable selinux: %v", m.ID(), err)
}
}
return nil
}
// StartMachine will start a given machine, provided the machine's journal.
func StartMachine(m Machine, j *Journal) error {
return StartMachineAfterReboot(m, j, "")
}
func GetMachineBootId(m Machine) (string, error) {
stdout, stderr, err := m.SSH("cat /proc/sys/kernel/random/boot_id")
if err != nil {
return "", fmt.Errorf("failed to retrieve boot ID: %s: %s: %s", stdout, err, stderr)
}
return strings.TrimSpace(string(stdout)), nil
}
// GenerateFakeKey generates a SSH key pair, returns the public key, and
// discards the private key. This is useful for droplets that don't need a
// public key, since DO & Azure insists on requiring one.
func GenerateFakeKey() (string, error) {
rsaKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return "", err
}
sshKey, err := ssh.NewPublicKey(&rsaKey.PublicKey)
if err != nil {
return "", err
}
return string(ssh.MarshalAuthorizedKey(sshKey)), nil
}
| [
"\"TERM\""
]
| []
| [
"TERM"
]
| [] | ["TERM"] | go | 1 | 0 | |
databasemigration/advisor_report_location_details.go | // Copyright (c) 2016, 2018, 2021, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Code generated. DO NOT EDIT.
// Database Migration API
//
// Use the Oracle Cloud Infrastructure Database Migration APIs to perform database migration operations.
//
package databasemigration
import (
"github.com/oracle/oci-go-sdk/v54/common"
)
// AdvisorReportLocationDetails Details to access Pre-Migration Advisor report.
type AdvisorReportLocationDetails struct {
ObjectStorageDetails *AdvisorReportBucketDetails `mandatory:"false" json:"objectStorageDetails"`
// Path in the Source Registered Connection where the Pre-Migration advisor report can be accessed.
LocationInSource *string `mandatory:"false" json:"locationInSource"`
}
func (m AdvisorReportLocationDetails) String() string {
return common.PointerString(m)
}
| []
| []
| []
| [] | [] | go | null | null | null |
acceptance/openstack/client_test.go | // +build acceptance
package openstack
import (
"os"
"testing"
"time"
"github.com/dynuc/gophercloud"
"github.com/dynuc/gophercloud/openstack"
)
func TestAuthenticatedClient(t *testing.T) {
// Obtain credentials from the environment.
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
t.Fatalf("Unable to acquire credentials: %v", err)
}
client, err := openstack.AuthenticatedClient(ao)
if err != nil {
t.Fatalf("Unable to authenticate: %v", err)
}
if client.TokenID == "" {
t.Errorf("No token ID assigned to the client")
}
t.Logf("Client successfully acquired a token: %v", client.TokenID)
// Find the storage service in the service catalog.
storage, err := openstack.NewObjectStorageV1(client, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
})
if err != nil {
t.Errorf("Unable to locate a storage service: %v", err)
} else {
t.Logf("Located a storage service at endpoint: [%s]", storage.Endpoint)
}
}
func TestReauth(t *testing.T) {
ao, err := openstack.AuthOptionsFromEnv()
if err != nil {
t.Fatalf("Unable to obtain environment auth options: %v", err)
}
// Allow reauth
ao.AllowReauth = true
conf := gophercloud.NewConfig()
provider, err := openstack.NewClient(ao.IdentityEndpoint, ao.TenantID, conf)
if err != nil {
t.Fatalf("Unable to create provider: %v", err)
}
err = openstack.Authenticate(provider, ao)
if err != nil {
t.Fatalf("Unable to authenticate: %v", err)
}
t.Logf("Creating a compute client")
_, err = openstack.NewComputeV2(provider, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
})
if err != nil {
t.Fatalf("Unable to create compute client: %v", err)
}
t.Logf("Sleeping for 1 second")
time.Sleep(1 * time.Second)
t.Logf("Attempting to reauthenticate")
err = provider.ReauthFunc()
if err != nil {
t.Fatalf("Unable to reauthenticate: %v", err)
}
t.Logf("Creating a compute client")
_, err = openstack.NewComputeV2(provider, gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
})
if err != nil {
t.Fatalf("Unable to create compute client: %v", err)
}
}
| [
"\"OS_REGION_NAME\"",
"\"OS_REGION_NAME\"",
"\"OS_REGION_NAME\""
]
| []
| [
"OS_REGION_NAME"
]
| [] | ["OS_REGION_NAME"] | go | 1 | 0 | |
etc/core/transfer.py | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import time
import os
from core.badges import badges
from core.fsmanip import fsmanip
class transfer:
def __init__(self, ghost):
self.badges = badges()
self.ghost = ghost
self.fsmanip = fsmanip()
def upload(self, input_file, output_path):
temp_container = os.environ['OLDPWD']
os.chdir(temp_container)
if self.fsmanip.file(input_file):
output_filename = os.path.split(input_file)[1]
output_directory = output_path
check_exists = self.ghost.send_command("shell", "stat "+output_path)
check_directory = self.ghost.send_command("shell", "'if [[ -d \""+output_path+"\" ]]; then echo 0; fi'")
if check_directory == "0":
if check_exists == "stat: '"+output_path+"': No such file or directory":
print(self.badges.E + "Remote directory: "+output_path+": does not exist!")
else:
if output_directory[-1] == "/":
output_directory = output_directory + output_filename
else:
output_directory = output_directory + "/" + output_filename
print(self.badges.G + "Uploading "+input_file+"...")
self.ghost.send_command("push", input_file + " " + output_directory)
print(self.badges.G + "Saving to "+output_directory+"...")
time.sleep(1)
print(self.badges.S + "Saved to "+output_directory+"!")
else:
directory = os.path.split(output_path)[0]
if directory == "":
directory = "."
check_exists = self.ghost.send_command("shell", "stat " + directory)
check_directory = self.ghost.send_command("shell", "'if [[ -d \""+directory+"\" ]]; then echo 0; fi'")
if check_exists != "stat: "+directory+": No such file or directory":
if check_directory == "0":
print(self.badges.G + "Uploading " + input_file + "...")
self.ghost.send_command("push", input_file + " " + output_directory)
print(self.badges.G + "Saving to " + output_directory + "...")
time.sleep(1)
print(self.badges.S + "Saved to " + output_directory + "!")
else:
print(self.badges.E + "Error: "+directory+": not a directory!")
else:
print(self.badges.E + "Remote directory: "+directory+": does not exist!")
main_container = os.environ['HOME']
os.chdir(main_container + "/ghost")
def download(self, input_file, output_path):
temp_container = os.environ['OLDPWD']
os.chdir(temp_container)
exists, path_type = self.fsmanip.exists_directory(output_path)
if exists:
if path_type != "file":
if output_path[-1] == "/":
output_path = output_path + os.path.split(input_file)[1]
else:
output_path = output_path + "/" + os.path.split(input_file)[1]
check_file_exists = self.ghost.send_command("shell", "stat " + input_file)
check_file_directory = self.ghost.send_command("shell", "'if [[ -d \""+input_file+"\" ]]; then echo 0; fi'")
if check_file_exists == "stat: '"+input_file+"': No such file or directory":
print(self.badges.E + "Remote file: "+input_file+": does not exist!")
else:
if check_file_directory == "0":
print(self.badges.E + "Error: " + input_file + ": not a file!")
else:
print(self.badges.G + "Downloading "+input_file+"...")
self.ghost.send_command("pull", input_file + " " + output_path, False, False)
print(self.badges.G + "Saving to "+output_path+"...")
time.sleep(1)
print(self.badges.S + "Saved to "+output_path+"!")
main_container = os.environ['HOME']
os.chdir(main_container + "/ghost")
| []
| []
| [
"OLDPWD",
"HOME"
]
| [] | ["OLDPWD", "HOME"] | python | 2 | 0 | |
test/e2e/framework/util.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
goruntime "runtime"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"text/tabwriter"
"time"
"github.com/golang/glog"
"golang.org/x/crypto/ssh"
"golang.org/x/net/websocket"
"google.golang.org/api/googleapi"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
batch "k8s.io/api/batch/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/apimachinery/pkg/watch"
"github.com/hyperhq/client-go/discovery"
"github.com/hyperhq/client-go/dynamic"
restclient "github.com/hyperhq/client-go/rest"
"github.com/hyperhq/client-go/tools/clientcmd"
clientcmdapi "github.com/hyperhq/client-go/tools/clientcmd/api"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "github.com/hyperhq/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/api/testapi"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/controller"
nodectlr "k8s.io/kubernetes/pkg/controller/node"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubectl"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/master/ports"
sshutil "k8s.io/kubernetes/pkg/ssh"
"k8s.io/kubernetes/pkg/util/system"
taintutils "k8s.io/kubernetes/pkg/util/taints"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
testutil "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
)
const (
// How long to wait for the pod to be listable
PodListTimeout = time.Minute
// Initial pod start can be delayed O(minutes) by slow docker pulls
// TODO: Make this 30 seconds once #4566 is resolved.
PodStartTimeout = 5 * time.Minute
// If there are any orphaned namespaces to clean up, this test is running
// on a long lived cluster. A long wait here is preferably to spurious test
// failures caused by leaked resources from a previous test run.
NamespaceCleanupTimeout = 15 * time.Minute
// Some pods can take much longer to get ready due to volume attach/detach latency.
slowPodStartTimeout = 15 * time.Minute
// How long to wait for a service endpoint to be resolvable.
ServiceStartTimeout = 1 * time.Minute
// How often to Poll pods, nodes and claims.
Poll = 2 * time.Second
pollShortTimeout = 1 * time.Minute
pollLongTimeout = 5 * time.Minute
// service accounts are provisioned after namespace creation
// a service account is required to support pod creation in a namespace as part of admission control
ServiceAccountProvisionTimeout = 2 * time.Minute
// How long to try single API calls (like 'get' or 'list'). Used to prevent
// transient failures from failing tests.
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
SingleCallTimeout = 5 * time.Minute
// How long nodes have to be "ready" when a test begins. They should already
// be "ready" before the test starts, so this is small.
NodeReadyInitialTimeout = 20 * time.Second
// How long pods have to be "ready" when a test begins.
PodReadyBeforeTimeout = 5 * time.Minute
// How long pods have to become scheduled onto nodes
podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second)
podRespondingTimeout = 15 * time.Minute
ServiceRespondingTimeout = 2 * time.Minute
EndpointRegisterTimeout = time.Minute
// How long claims have to become dynamically provisioned
ClaimProvisionTimeout = 5 * time.Minute
// When these values are updated, also update cmd/kubelet/app/options/options.go
currentPodInfraContainerImageName = "gcr.io/google_containers/pause"
currentPodInfraContainerImageVersion = "3.0"
// How long a node is allowed to become "Ready" after it is restarted before
// the test is considered failed.
RestartNodeReadyAgainTimeout = 5 * time.Minute
// How long a pod is allowed to become "running" and "ready" after a node
// restart before test is considered failed.
RestartPodReadyAgainTimeout = 5 * time.Minute
// Number of objects that gc can delete in a second.
// GC issues 2 requestes for single delete.
gcThroughput = 10
// Minimal number of nodes for the cluster to be considered large.
largeClusterThreshold = 100
// TODO(justinsb): Avoid hardcoding this.
awsMasterIP = "172.20.0.9"
// ssh port
sshPort = "22"
// ImagePrePullingTimeout is the time we wait for the e2e-image-puller
// static pods to pull the list of seeded images. If they don't pull
// images within this time we simply log their output and carry on
// with the tests.
ImagePrePullingTimeout = 5 * time.Minute
)
var (
BusyBoxImage = "busybox"
// Label allocated to the image puller static pod that runs on each node
// before e2es.
ImagePullerLabels = map[string]string{"name": "e2e-image-puller"}
// For parsing Kubectl version for version-skewed testing.
gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"")
// Slice of regexps for names of pods that have to be running to consider a Node "healthy"
requiredPerNodePods = []*regexp.Regexp{
regexp.MustCompile(".*kube-proxy.*"),
regexp.MustCompile(".*fluentd-elasticsearch.*"),
regexp.MustCompile(".*node-problem-detector.*"),
}
// Serve hostname image name
ServeHostnameImage = imageutils.GetE2EImage(imageutils.ServeHostname)
)
type Address struct {
internalIP string
externalIP string
hostname string
}
// GetServerArchitecture fetches the architecture of the cluster's apiserver.
func GetServerArchitecture(c clientset.Interface) string {
arch := ""
sVer, err := c.Discovery().ServerVersion()
if err != nil || sVer.Platform == "" {
// If we failed to get the server version for some reason, default to amd64.
arch = "amd64"
} else {
// Split the platform string into OS and Arch separately.
// The platform string may for example be "linux/amd64", "linux/arm" or "windows/amd64".
osArchArray := strings.Split(sVer.Platform, "/")
arch = osArchArray[1]
}
return arch
}
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
func GetPauseImageName(c clientset.Interface) string {
return currentPodInfraContainerImageName + "-" + GetServerArchitecture(c) + ":" + currentPodInfraContainerImageVersion
}
// GetPauseImageNameForHostArch fetches the pause image name for the same architecture the test is running on.
// TODO: move this function to the test/utils
func GetPauseImageNameForHostArch() string {
return currentPodInfraContainerImageName + "-" + goruntime.GOARCH + ":" + currentPodInfraContainerImageVersion
}
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
return request.Resource("services").SubResource("proxy"), nil
}
// unique identifier of the e2e run
var RunId = uuid.NewUUID()
type CreateTestingNSFn func(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error)
type ContainerFailures struct {
status *v1.ContainerStateTerminated
Restarts int
}
func GetMasterHost() string {
masterUrl, err := url.Parse(TestContext.Host)
ExpectNoError(err)
return masterUrl.Host
}
func nowStamp() string {
return time.Now().Format(time.StampMilli)
}
func log(level string, format string, args ...interface{}) {
fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
func Logf(format string, args ...interface{}) {
log("INFO", format, args...)
}
func Failf(format string, args ...interface{}) {
FailfWithOffset(1, format, args...)
}
// FailfWithOffset calls "Fail" and logs the error at "offset" levels above its caller
// (for example, for call chain f -> g -> FailfWithOffset(1, ...) error would be logged for "f").
func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
ginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
}
func Skipf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
ginkgowrapper.Skip(nowStamp() + ": " + msg)
}
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
if TestContext.CloudConfig.NumNodes < minNodeCount {
Skipf("Requires at least %d nodes (not %d)", minNodeCount, TestContext.CloudConfig.NumNodes)
}
}
func SkipUnlessNodeCountIsAtMost(maxNodeCount int) {
if TestContext.CloudConfig.NumNodes > maxNodeCount {
Skipf("Requires at most %d nodes (not %d)", maxNodeCount, TestContext.CloudConfig.NumNodes)
}
}
func SkipUnlessAtLeast(value int, minValue int, message string) {
if value < minValue {
Skipf(message)
}
}
func SkipIfProviderIs(unsupportedProviders ...string) {
if ProviderIs(unsupportedProviders...) {
Skipf("Not supported for providers %v (found %s)", unsupportedProviders, TestContext.Provider)
}
}
func SkipUnlessLocalEphemeralStorageEnabled() {
if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
Skipf("Only supported when %v feature is enabled", features.LocalStorageCapacityIsolation)
}
}
func SkipUnlessSSHKeyPresent() {
if _, err := GetSigner(TestContext.Provider); err != nil {
Skipf("No SSH Key for provider %s: '%v'", TestContext.Provider, err)
}
}
func SkipUnlessProviderIs(supportedProviders ...string) {
if !ProviderIs(supportedProviders...) {
Skipf("Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider)
}
}
func SkipIfMultizone(c clientset.Interface) {
zones, err := GetClusterZones(c)
if err != nil {
Skipf("Error listing cluster zones")
}
if zones.Len() > 1 {
Skipf("Requires more than one zone")
}
}
func SkipUnlessClusterMonitoringModeIs(supportedMonitoring ...string) {
if !ClusterMonitoringModeIs(supportedMonitoring...) {
Skipf("Only next monitoring modes are supported %v (not %s)", supportedMonitoring, TestContext.ClusterMonitoringMode)
}
}
func SkipUnlessMasterOSDistroIs(supportedMasterOsDistros ...string) {
if !MasterOSDistroIs(supportedMasterOsDistros...) {
Skipf("Only supported for master OS distro %v (not %s)", supportedMasterOsDistros, TestContext.MasterOSDistro)
}
}
func SkipUnlessNodeOSDistroIs(supportedNodeOsDistros ...string) {
if !NodeOSDistroIs(supportedNodeOsDistros...) {
Skipf("Only supported for node OS distro %v (not %s)", supportedNodeOsDistros, TestContext.NodeOSDistro)
}
}
func SkipIfContainerRuntimeIs(runtimes ...string) {
for _, runtime := range runtimes {
if runtime == TestContext.ContainerRuntime {
Skipf("Not supported under container runtime %s", runtime)
}
}
}
func RunIfContainerRuntimeIs(runtimes ...string) {
for _, runtime := range runtimes {
if runtime == TestContext.ContainerRuntime {
return
}
}
Skipf("Skipped because container runtime %q is not in %s", TestContext.ContainerRuntime, runtimes)
}
func RunIfSystemSpecNameIs(names ...string) {
for _, name := range names {
if name == TestContext.SystemSpecName {
return
}
}
Skipf("Skipped because system spec name %q is not in %v", TestContext.SystemSpecName, names)
}
func ProviderIs(providers ...string) bool {
for _, provider := range providers {
if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) {
return true
}
}
return false
}
func ClusterMonitoringModeIs(monitoringModes ...string) bool {
for _, mode := range monitoringModes {
if strings.ToLower(mode) == strings.ToLower(TestContext.ClusterMonitoringMode) {
return true
}
}
return false
}
func MasterOSDistroIs(supportedMasterOsDistros ...string) bool {
for _, distro := range supportedMasterOsDistros {
if strings.ToLower(distro) == strings.ToLower(TestContext.MasterOSDistro) {
return true
}
}
return false
}
func NodeOSDistroIs(supportedNodeOsDistros ...string) bool {
for _, distro := range supportedNodeOsDistros {
if strings.ToLower(distro) == strings.ToLower(TestContext.NodeOSDistro) {
return true
}
}
return false
}
func ProxyMode(f *Framework) (string, error) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-proxy-mode-detector",
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
HostNetwork: true,
Containers: []v1.Container{
{
Name: "detector",
Image: imageutils.GetE2EImage(imageutils.Net),
Command: []string{"/bin/sleep", "3600"},
},
},
},
}
f.PodClient().CreateSync(pod)
defer f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode"
stdout, err := RunHostCmd(pod.Namespace, pod.Name, cmd)
if err != nil {
return "", err
}
Logf("ProxyMode: %s", stdout)
return stdout, nil
}
func SkipUnlessServerVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) {
gte, err := ServerVersionGTE(v, c)
if err != nil {
Failf("Failed to get server version: %v", err)
}
if !gte {
Skipf("Not supported for server versions before %q", v)
}
}
func SkipIfMissingResource(clientPool dynamic.ClientPool, gvr schema.GroupVersionResource, namespace string) {
dynamicClient, err := clientPool.ClientForGroupVersionResource(gvr)
if err != nil {
Failf("Unexpected error getting dynamic client for %v: %v", gvr.GroupVersion(), err)
}
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
_, err = dynamicClient.Resource(&apiResource, namespace).List(metav1.ListOptions{})
if err != nil {
// not all resources support list, so we ignore those
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
Skipf("Could not find %s resource, skipping test: %#v", gvr, err)
}
Failf("Unexpected error getting %v: %v", gvr, err)
}
}
// ProvidersWithSSH are those providers where each node is accessible with SSH
var ProvidersWithSSH = []string{"gce", "gke", "aws", "local"}
type podCondition func(pod *v1.Pod) (bool, error)
// logPodStates logs basic info of provided pods for debugging.
func logPodStates(pods []v1.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.
maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE")
for i := range pods {
pod := &pods[i]
if len(pod.ObjectMeta.Name) > maxPodW {
maxPodW = len(pod.ObjectMeta.Name)
}
if len(pod.Spec.NodeName) > maxNodeW {
maxNodeW = len(pod.Spec.NodeName)
}
if len(pod.Status.Phase) > maxPhaseW {
maxPhaseW = len(pod.Status.Phase)
}
}
// Increase widths by one to separate by a single space.
maxPodW++
maxNodeW++
maxPhaseW++
maxGraceW++
// Log pod info. * does space padding, - makes them left-aligned.
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS")
for _, pod := range pods {
grace := ""
if pod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
}
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
}
Logf("") // Final empty line helps for readability.
}
// errorBadPodsStates create error message of basic info of bad pods for debugging.
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string {
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
// Print bad pods info only if there are fewer than 10 bad pods
if len(badPods) > 10 {
return errStr + "There are too many bad pods. Please check log for details."
}
buf := bytes.NewBuffer(nil)
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
for _, badPod := range badPods {
grace := ""
if badPod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
}
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v",
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
fmt.Fprintln(w, podInfo)
}
w.Flush()
return errStr + buf.String()
}
// WaitForPodsSuccess waits till all labels matching the given selector enter
// the Success state. The caller is expected to only invoke this method once the
// pods have been created.
func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[string]string, timeout time.Duration) error {
successPodSelector := labels.SelectorFromSet(successPodLabels)
start, badPods, desiredPods := time.Now(), []v1.Pod{}, 0
if wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: successPodSelector.String()})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
if len(podList.Items) == 0 {
Logf("Waiting for pods to enter Success, but no pods in %q match label %v", ns, successPodLabels)
return true, nil
}
badPods = []v1.Pod{}
desiredPods = len(podList.Items)
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodSucceeded {
badPods = append(badPods, pod)
}
}
successPods := len(podList.Items) - len(badPods)
Logf("%d / %d pods in namespace %q are in Success state (%d seconds elapsed)",
successPods, len(podList.Items), ns, int(time.Since(start).Seconds()))
if len(badPods) == 0 {
return true, nil
}
return false, nil
}) != nil {
logPodStates(badPods)
LogPodsWithLabels(c, ns, successPodLabels, Logf)
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "SUCCESS", timeout))
}
return nil
}
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
// namespace ns are either running and ready, or failed but controlled by a
// controller. Also, it ensures that at least minPods are running and
// ready. It has separate behavior from other 'wait for' pods functions in
// that it requests the list of pods on every iteration. This is useful, for
// example, in cluster startup, because the number of pods increases while
// waiting. All pods that are in SUCCESS state are not counted.
//
// If ignoreLabels is not empty, pods matching this selector are ignored.
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
start := time.Now()
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
wg := sync.WaitGroup{}
wg.Add(1)
var ignoreNotReady bool
badPods := []v1.Pod{}
desiredPods := 0
notReady := int32(0)
if wait.PollImmediate(Poll, timeout, func() (bool, error) {
// We get the new list of pods, replication controllers, and
// replica sets in every iteration because more pods come
// online during startup and we want to ensure they are also
// checked.
replicas, replicaOk := int32(0), int32(0)
rcList, err := c.CoreV1().ReplicationControllers(ns).List(metav1.ListOptions{})
if err != nil {
Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for _, rc := range rcList.Items {
replicas += *rc.Spec.Replicas
replicaOk += rc.Status.ReadyReplicas
}
rsList, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(metav1.ListOptions{})
if err != nil {
Logf("Error getting replication sets in namespace %q: %v", ns, err)
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for _, rs := range rsList.Items {
replicas += *rs.Spec.Replicas
replicaOk += rs.Status.ReadyReplicas
}
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
Logf("Error getting pods in namespace '%s': %v", ns, err)
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
nOk := int32(0)
notReady = int32(0)
badPods = []v1.Pod{}
desiredPods = len(podList.Items)
for _, pod := range podList.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {
continue
}
res, err := testutil.PodRunningReady(&pod)
switch {
case res && err == nil:
nOk++
case pod.Status.Phase == v1.PodSucceeded:
Logf("The status of Pod %s is Succeeded which is unexpected", pod.ObjectMeta.Name)
badPods = append(badPods, pod)
// it doesn't make sense to wait for this pod
return false, errors.New("unexpected Succeeded pod state")
case pod.Status.Phase != v1.PodFailed:
Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
notReady++
badPods = append(badPods, pod)
default:
if metav1.GetControllerOf(&pod) == nil {
Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
badPods = append(badPods, pod)
}
//ignore failed pods that are controlled by some controller
}
}
Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
return true, nil
}
ignoreNotReady = (notReady <= allowedNotReadyPods)
logPodStates(badPods)
return false, nil
}) != nil {
if !ignoreNotReady {
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout))
}
Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
}
return nil
}
func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Name, containerNameSubstr) {
// Contains() matches all strings if substr is empty
logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
}
}
logFunc("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName)
logFunc("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name)
}
}
}
func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
logFunc("Error getting pods in namespace '%s': %v", ns, err)
return
}
logFunc("Running kubectl logs on non-ready containers in %v", ns)
for _, pod := range podList.Items {
if res, err := testutil.PodRunningReady(&pod); !res || err != nil {
kubectlLogPod(c, pod, "", Logf)
}
}
}
func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
if err != nil {
logFunc("Error getting pods in namespace %q: %v", ns, err)
return
}
logFunc("Running kubectl logs on pods with labels %v in %v", match, ns)
for _, pod := range podList.Items {
kubectlLogPod(c, pod, "", logFunc)
}
}
func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[string]string, containerSubstr string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return
}
for _, pod := range podList.Items {
kubectlLogPod(c, pod, containerSubstr, logFunc)
}
}
// DeleteNamespaces deletes all namespaces that match the given delete and skip filters.
// Filter is by simple strings.Contains; first skip filter, then delete filter.
// Returns the list of deleted namespaces or an error.
func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) {
By("Deleting namespaces")
nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
var deleted []string
var wg sync.WaitGroup
OUTER:
for _, item := range nsList.Items {
if skipFilter != nil {
for _, pattern := range skipFilter {
if strings.Contains(item.Name, pattern) {
continue OUTER
}
}
}
if deleteFilter != nil {
var shouldDelete bool
for _, pattern := range deleteFilter {
if strings.Contains(item.Name, pattern) {
shouldDelete = true
break
}
}
if !shouldDelete {
continue OUTER
}
}
wg.Add(1)
deleted = append(deleted, item.Name)
go func(nsName string) {
defer wg.Done()
defer GinkgoRecover()
Expect(c.CoreV1().Namespaces().Delete(nsName, nil)).To(Succeed())
Logf("namespace : %v api call to delete is complete ", nsName)
}(item.Name)
}
wg.Wait()
return deleted, nil
}
func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeout time.Duration) error {
By("Waiting for namespaces to vanish")
nsMap := map[string]bool{}
for _, ns := range namespaces {
nsMap[ns] = true
}
//Now POLL until all namespaces have been eradicated.
return wait.Poll(2*time.Second, timeout,
func() (bool, error) {
nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
if err != nil {
return false, err
}
for _, item := range nsList.Items {
if _, ok := nsMap[item.Name]; ok {
return false, nil
}
}
return true, nil
})
}
func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error {
w, err := c.CoreV1().ServiceAccounts(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, conditions.ServiceAccountHasSecrets)
return err
}
func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err)
return err
}
Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, Poll, err)
continue
}
// log now so that current pod info is reported before calling `condition()`
Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
podName, pod.Status.Phase, pod.Status.Reason, podutil.IsPodReady(pod), time.Since(start))
if done, err := condition(pod); done {
if err == nil {
Logf("Pod %q satisfied condition %q", podName, desc)
}
return err
}
}
return fmt.Errorf("Gave up after waiting %v for pod %q to be %q", timeout, podName, desc)
}
// WaitForMatchPodsCondition finds match pods based on the input ListOptions.
// waits and checks if all match pods are in the given podCondition
func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(opts)
if err != nil {
return err
}
conditionNotMatch := []string{}
for _, pod := range pods.Items {
done, err := condition(&pod)
if done && err != nil {
return fmt.Errorf("Unexpected error: %v", err)
}
if !done {
conditionNotMatch = append(conditionNotMatch, format.Pod(&pod))
}
}
if len(conditionNotMatch) <= 0 {
return err
}
Logf("%d pods are not %s: %v", len(conditionNotMatch), desc, conditionNotMatch)
}
return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout)
}
// WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned
// the default service account is what is associated with pods when they do not specify a service account
// as a result, pods are not able to be provisioned in a namespace until the service account is provisioned
func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace string) error {
return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout)
}
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err != nil {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
continue
} else {
if pv.Status.Phase == phase {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
}
// WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err == nil {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
continue
} else {
if apierrs.IsNotFound(err) {
Logf("PersistentVolume %s was removed", pvName)
return nil
} else {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
}
}
}
return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout)
}
// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
if err != nil {
Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
continue
} else {
if pvc.Status.Phase == phase {
Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolumeClaim %s not in phase %s within %v", pvcName, phase, timeout)
}
// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name.
// Please see NewFramework instead of using this directly.
func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) {
if labels == nil {
labels = map[string]string{}
}
labels["e2e-run"] = string(RunId)
namespaceObj := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName),
Namespace: "",
Labels: labels,
},
Status: v1.NamespaceStatus{},
}
// Be robust about making the namespace creation call.
var got *v1.Namespace
if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) {
var err error
got, err = c.CoreV1().Namespaces().Create(namespaceObj)
if err != nil {
Logf("Unexpected error while creating namespace: %v", err)
return false, nil
}
return true, nil
}); err != nil {
return nil, err
}
if TestContext.VerifyServiceAccount {
if err := WaitForDefaultServiceAccountInNamespace(c, got.Name); err != nil {
// Even if we fail to create serviceAccount in the namespace,
// we have successfully create a namespace.
// So, return the created namespace.
return got, err
}
}
return got, nil
}
// CheckTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state
// and waits until they are finally deleted. It ignores namespace skip.
func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error {
// TODO: Since we don't have support for bulk resource deletion in the API,
// while deleting a namespace we are deleting all objects from that namespace
// one by one (one deletion == one API call). This basically exposes us to
// throttling - currently controller-manager has a limit of max 20 QPS.
// Once #10217 is implemented and used in namespace-controller, deleting all
// object from a given namespace should be much faster and we will be able
// to lower this timeout.
// However, now Density test is producing ~26000 events and Load capacity test
// is producing ~35000 events, thus assuming there are no other requests it will
// take ~30 minutes to fully delete the namespace. Thus I'm setting it to 60
// minutes to avoid any timeouts here.
timeout := 60 * time.Minute
Logf("Waiting for terminating namespaces to be deleted...")
for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) {
namespaces, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
if err != nil {
Logf("Listing namespaces failed: %v", err)
continue
}
terminating := 0
for _, ns := range namespaces.Items {
if strings.HasPrefix(ns.ObjectMeta.Name, "e2e-tests-") && ns.ObjectMeta.Name != skip {
if ns.Status.Phase == v1.NamespaceActive {
return fmt.Errorf("Namespace %s is active", ns.ObjectMeta.Name)
}
terminating++
}
}
if terminating == 0 {
return nil
}
}
return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out")
}
// deleteNS deletes the provided namespace, waits for it to be completely deleted, and then checks
// whether there are any pods remaining in a non-terminating state.
func deleteNS(c clientset.Interface, clientPool dynamic.ClientPool, namespace string, timeout time.Duration) error {
startTime := time.Now()
if err := c.CoreV1().Namespaces().Delete(namespace, nil); err != nil {
return err
}
// wait for namespace to delete or timeout.
err := wait.PollImmediate(2*time.Second, timeout, func() (bool, error) {
if _, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{}); err != nil {
if apierrs.IsNotFound(err) {
return true, nil
}
Logf("Error while waiting for namespace to be terminated: %v", err)
return false, nil
}
return false, nil
})
// verify there is no more remaining content in the namespace
remainingContent, cerr := hasRemainingContent(c, clientPool, namespace)
if cerr != nil {
return cerr
}
// if content remains, let's dump information about the namespace, and system for flake debugging.
remainingPods := 0
missingTimestamp := 0
if remainingContent {
// log information about namespace, and set of namespaces in api server to help flake detection
logNamespace(c, namespace)
logNamespaces(c, namespace)
// if we can, check if there were pods remaining with no timestamp.
remainingPods, missingTimestamp, _ = countRemainingPods(c, namespace)
}
// a timeout waiting for namespace deletion happened!
if err != nil {
// some content remains in the namespace
if remainingContent {
// pods remain
if remainingPods > 0 {
if missingTimestamp != 0 {
// pods remained, but were not undergoing deletion (namespace controller is probably culprit)
return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v, pods missing deletion timestamp: %v", namespace, err, remainingPods, missingTimestamp)
}
// but they were all undergoing deletion (kubelet is probably culprit, check NodeLost)
return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v", namespace, err, remainingPods)
}
// other content remains (namespace controller is probably screwed up)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespaced content other than pods remain", namespace, err)
}
// no remaining content, but namespace was not deleted (namespace controller is probably wedged)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespace is empty but is not yet removed", namespace, err)
}
Logf("namespace %v deletion completed in %s", namespace, time.Now().Sub(startTime))
return nil
}
// logNamespaces logs the number of namespaces by phase
// namespace is the namespace the test was operating against that failed to delete so it can be grepped in logs
func logNamespaces(c clientset.Interface, namespace string) {
namespaceList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
if err != nil {
Logf("namespace: %v, unable to list namespaces: %v", namespace, err)
return
}
numActive := 0
numTerminating := 0
for _, namespace := range namespaceList.Items {
if namespace.Status.Phase == v1.NamespaceActive {
numActive++
} else {
numTerminating++
}
}
Logf("namespace: %v, total namespaces: %v, active: %v, terminating: %v", namespace, len(namespaceList.Items), numActive, numTerminating)
}
// logNamespace logs detail about a namespace
func logNamespace(c clientset.Interface, namespace string) {
ns, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
Logf("namespace: %v no longer exists", namespace)
return
}
Logf("namespace: %v, unable to get namespace due to error: %v", namespace, err)
return
}
Logf("namespace: %v, DeletionTimetamp: %v, Finalizers: %v, Phase: %v", ns.Name, ns.DeletionTimestamp, ns.Spec.Finalizers, ns.Status.Phase)
}
// countRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp.
func countRemainingPods(c clientset.Interface, namespace string) (int, int, error) {
// check for remaining pods
pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
return 0, 0, err
}
// nothing remains!
if len(pods.Items) == 0 {
return 0, 0, nil
}
// stuff remains, log about it
logPodStates(pods.Items)
// check if there were any pods with missing deletion timestamp
numPods := len(pods.Items)
missingTimestamp := 0
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil {
missingTimestamp++
}
}
return numPods, missingTimestamp, nil
}
// isDynamicDiscoveryError returns true if the error is a group discovery error
// only for groups expected to be created/deleted dynamically during e2e tests
func isDynamicDiscoveryError(err error) bool {
if !discovery.IsGroupDiscoveryFailedError(err) {
return false
}
discoveryErr := err.(*discovery.ErrGroupDiscoveryFailed)
for gv := range discoveryErr.Groups {
switch gv.Group {
case "mygroup.example.com":
// custom_resource_definition
// garbage_collector
case "wardle.k8s.io":
// aggregator
default:
Logf("discovery error for unexpected group: %#v", gv)
return false
}
}
return true
}
// hasRemainingContent checks if there is remaining content in the namespace via API discovery
func hasRemainingContent(c clientset.Interface, clientPool dynamic.ClientPool, namespace string) (bool, error) {
// some tests generate their own framework.Client rather than the default
// TODO: ensure every test call has a configured clientPool
if clientPool == nil {
return false, nil
}
// find out what content is supported on the server
// Since extension apiserver is not always available, e.g. metrics server sometimes goes down,
// add retry here.
resources, err := waitForServerPreferredNamespacedResources(c.Discovery(), 30*time.Second)
if err != nil {
return false, err
}
groupVersionResources, err := discovery.GroupVersionResources(resources)
if err != nil {
return false, err
}
// TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798
ignoredResources := sets.NewString("bindings")
contentRemaining := false
// dump how many of resource type is on the server in a log.
for gvr := range groupVersionResources {
// get a client for this group version...
dynamicClient, err := clientPool.ClientForGroupVersionResource(gvr)
if err != nil {
// not all resource types support list, so some errors here are normal depending on the resource type.
Logf("namespace: %s, unable to get client - gvr: %v, error: %v", namespace, gvr, err)
continue
}
// get the api resource
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
// TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798
if ignoredResources.Has(apiResource.Name) {
Logf("namespace: %s, resource: %s, ignored listing per whitelist", namespace, apiResource.Name)
continue
}
obj, err := dynamicClient.Resource(&apiResource, namespace).List(metav1.ListOptions{})
if err != nil {
// not all resources support list, so we ignore those
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
continue
}
return false, err
}
unstructuredList, ok := obj.(*unstructured.UnstructuredList)
if !ok {
return false, fmt.Errorf("namespace: %s, resource: %s, expected *unstructured.UnstructuredList, got %#v", namespace, apiResource.Name, obj)
}
if len(unstructuredList.Items) > 0 {
Logf("namespace: %s, resource: %s, items remaining: %v", namespace, apiResource.Name, len(unstructuredList.Items))
contentRemaining = true
}
}
return contentRemaining, nil
}
func ContainerInitInvariant(older, newer runtime.Object) error {
oldPod := older.(*v1.Pod)
newPod := newer.(*v1.Pod)
if len(oldPod.Spec.InitContainers) == 0 {
return nil
}
if len(oldPod.Spec.InitContainers) != len(newPod.Spec.InitContainers) {
return fmt.Errorf("init container list changed")
}
if oldPod.UID != newPod.UID {
return fmt.Errorf("two different pods exist in the condition: %s vs %s", oldPod.UID, newPod.UID)
}
if err := initContainersInvariants(oldPod); err != nil {
return err
}
if err := initContainersInvariants(newPod); err != nil {
return err
}
oldInit, _, _ := podInitialized(oldPod)
newInit, _, _ := podInitialized(newPod)
if oldInit && !newInit {
// TODO: we may in the future enable resetting PodInitialized = false if the kubelet needs to restart it
// from scratch
return fmt.Errorf("pod cannot be initialized and then regress to not being initialized")
}
return nil
}
func podInitialized(pod *v1.Pod) (ok bool, failed bool, err error) {
allInit := true
initFailed := false
for _, s := range pod.Status.InitContainerStatuses {
switch {
case initFailed && s.State.Waiting == nil:
return allInit, initFailed, fmt.Errorf("container %s is after a failed container but isn't waiting", s.Name)
case allInit && s.State.Waiting == nil:
return allInit, initFailed, fmt.Errorf("container %s is after an initializing container but isn't waiting", s.Name)
case s.State.Terminated == nil:
allInit = false
case s.State.Terminated.ExitCode != 0:
allInit = false
initFailed = true
case !s.Ready:
return allInit, initFailed, fmt.Errorf("container %s initialized but isn't marked as ready", s.Name)
}
}
return allInit, initFailed, nil
}
func initContainersInvariants(pod *v1.Pod) error {
allInit, initFailed, err := podInitialized(pod)
if err != nil {
return err
}
if !allInit || initFailed {
for _, s := range pod.Status.ContainerStatuses {
if s.State.Waiting == nil || s.RestartCount != 0 {
return fmt.Errorf("container %s is not waiting but initialization not complete", s.Name)
}
if s.State.Waiting.Reason != "PodInitializing" {
return fmt.Errorf("container %s should have reason PodInitializing: %s", s.Name, s.State.Waiting.Reason)
}
}
}
_, c := podutil.GetPodCondition(&pod.Status, v1.PodInitialized)
if c == nil {
return fmt.Errorf("pod does not have initialized condition")
}
if c.LastTransitionTime.IsZero() {
return fmt.Errorf("PodInitialized condition should always have a transition time")
}
switch {
case c.Status == v1.ConditionUnknown:
return fmt.Errorf("PodInitialized condition should never be Unknown")
case c.Status == v1.ConditionTrue && (initFailed || !allInit):
return fmt.Errorf("PodInitialized condition was True but all not all containers initialized")
case c.Status == v1.ConditionFalse && (!initFailed && allInit):
return fmt.Errorf("PodInitialized condition was False but all containers initialized")
}
return nil
}
type InvariantFunc func(older, newer runtime.Object) error
func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error {
errs := sets.NewString()
for i := range events {
j := i + 1
if j >= len(events) {
continue
}
for _, fn := range fns {
if err := fn(events[i].Object, events[j].Object); err != nil {
errs.Insert(err.Error())
}
}
}
if errs.Len() > 0 {
return fmt.Errorf("invariants violated:\n* %s", strings.Join(errs.List(), "\n* "))
}
return nil
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error {
if pod.Status.Phase == v1.PodRunning {
return nil
}
return WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, PodStartTimeout)
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error {
return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, PodStartTimeout)
}
// Waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running.
// The resourceVersion is used when Watching object changes, it tells since when we care
// about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state.
func waitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace string) error {
return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, slowPodStartTimeout)
}
func WaitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return wait.PollImmediate(Poll, timeout, podRunning(c, podName, namespace))
}
func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodRunning:
return true, nil
case v1.PodFailed, v1.PodSucceeded:
return false, conditions.ErrPodCompleted
}
return false, nil
}
}
// Waits default amount of time (DefaultPodDeletionTimeout) for the specified pod to stop running.
// Returns an error if timeout occurs first.
func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string) error {
return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, DefaultPodDeletionTimeout)
}
func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return wait.PollImmediate(Poll, timeout, podCompleted(c, podName, namespace))
}
func podCompleted(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodFailed, v1.PodSucceeded:
return true, nil
}
return false, nil
}
}
func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return wait.PollImmediate(Poll, timeout, podRunningAndReady(c, podName, namespace))
}
func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodFailed, v1.PodSucceeded:
return false, conditions.ErrPodCompleted
case v1.PodRunning:
return podutil.IsPodReady(pod), nil
}
return false, nil
}
}
// WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state.
// The resourceVersion is used when Watching object changes, it tells since when we care
// about changes to the pod.
func WaitForPodNotPending(c clientset.Interface, ns, podName string) error {
return wait.PollImmediate(Poll, PodStartTimeout, podNotPending(c, podName, ns))
}
func podNotPending(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodPending:
return false, nil
default:
return true, nil
}
}
}
// waitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate,
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed (and thus did not
// terminate) with an unexpected reason. Typically called to test that the passed-in pod is fully
// terminated (reason==""), but may be called to detect if a pod did *not* terminate according to
// the supplied reason.
func waitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error {
return WaitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", PodStartTimeout, func(pod *v1.Pod) (bool, error) {
// Only consider Failed pods. Successful pods will be deleted and detected in
// waitForPodCondition's Get call returning `IsNotFound`
if pod.Status.Phase == v1.PodFailed {
if pod.Status.Reason == reason { // short-circuit waitForPodCondition's loop
return true, nil
} else {
return true, fmt.Errorf("Expected pod %q in namespace %q to be terminated with reason %q, got reason: %q", podName, namespace, reason, pod.Status.Reason)
}
}
return false, nil
})
}
// waitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate.
// Unlike `waitForPodTerminatedInNamespace`, the pod's Phase and Reason are ignored. If the pod Get
// api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other
// than "not found" then that error is returned and the wait stops.
func waitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error {
return wait.PollImmediate(Poll, timeout, func() (bool, error) {
_, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if apierrs.IsNotFound(err) {
return true, nil // done
}
if err != nil {
return true, err // stop wait with error
}
return false, nil
})
}
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, namespace string, timeout time.Duration) error {
return WaitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *v1.Pod) (bool, error) {
if pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName)
}
switch pod.Status.Phase {
case v1.PodSucceeded:
By("Saw pod success")
return true, nil
case v1.PodFailed:
return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status)
default:
return false, nil
}
})
}
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, PodStartTimeout)
}
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
}
// WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status.
func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.Duration) error {
options := metav1.ListOptions{FieldSelector: fields.Set{
"metadata.name": name,
"metadata.namespace": ns,
}.AsSelector().String()}
w, err := c.CoreV1().ReplicationControllers(ns).Watch(options)
if err != nil {
return err
}
_, err = watch.Until(timeout, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, apierrs.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "")
}
switch rc := event.Object.(type) {
case *v1.ReplicationController:
if rc.Name == name && rc.Namespace == ns &&
rc.Generation <= rc.Status.ObservedGeneration &&
*(rc.Spec.Replicas) == rc.Status.Replicas {
return true, nil
}
Logf("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d",
name, rc.Generation, rc.Status.ObservedGeneration, *(rc.Spec.Replicas), rc.Status.Replicas)
}
return false, nil
})
return err
}
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
Logf("Waiting for pod %s to disappear", podName)
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(options)
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
found := false
for _, pod := range pods.Items {
if pod.Name == podName {
Logf("Pod %s still exists", podName)
found = true
break
}
}
if !found {
Logf("Pod %s no longer exists", podName)
return true, nil
}
return false, nil
})
}
// WaitForService waits until the service appears (exist == true), or disappears (exist == false)
func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
switch {
case err == nil:
Logf("Service %s in namespace %s found.", name, namespace)
return exist, nil
case apierrs.IsNotFound(err):
Logf("Service %s in namespace %s disappeared.", name, namespace)
return !exist, nil
case !IsRetryableAPIError(err):
Logf("Non-retryable failure while getting service.")
return false, err
default:
Logf("Get service %s in namespace %s failed: %v", name, namespace, err)
return false, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service %s/%s %s: %v", namespace, name, stateMsg[exist], err)
}
return nil
}
// WaitForServiceWithSelector waits until any service with given selector appears (exist == true), or disappears (exist == false)
func WaitForServiceWithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval,
timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
services, err := c.CoreV1().Services(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
switch {
case len(services.Items) != 0:
Logf("Service with %s in namespace %s found.", selector.String(), namespace)
return exist, nil
case len(services.Items) == 0:
Logf("Service with %s in namespace %s disappeared.", selector.String(), namespace)
return !exist, nil
case !IsRetryableAPIError(err):
Logf("Non-retryable failure while listing service.")
return false, err
default:
Logf("List service with %s in namespace %s failed: %v", selector.String(), namespace, err)
return false, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err)
}
return nil
}
//WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
list, err := c.CoreV1().Endpoints(namespace).List(metav1.ListOptions{})
if err != nil {
return false, err
}
for _, e := range list.Items {
if e.Name == serviceName && countEndpointsNum(&e) == expectNum {
return true, nil
}
}
return false, nil
})
}
func countEndpointsNum(e *v1.Endpoints) int {
num := 0
for _, sub := range e.Subsets {
num += len(sub.Addresses)
}
return num
}
func WaitForEndpoint(c clientset.Interface, ns, name string) error {
for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) {
endpoint, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{})
if apierrs.IsNotFound(err) {
Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
}
Expect(err).NotTo(HaveOccurred())
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
} else {
return nil
}
}
return fmt.Errorf("Failed to get endpoints for %s/%s", ns, name)
}
// Context for checking pods responses by issuing GETs to them (via the API
// proxy) and verifying that they answer with ther own pod name.
type podProxyResponseChecker struct {
c clientset.Interface
ns string
label labels.Selector
controllerName string
respondName bool // Whether the pod should respond with its own name.
pods *v1.PodList
}
func PodProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) podProxyResponseChecker {
return podProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
}
// CheckAllResponses issues GETs to all pods in the context and verify they
// reply with their own pod name.
func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) {
successes := 0
options := metav1.ListOptions{LabelSelector: r.label.String()}
currentPods, err := r.c.CoreV1().Pods(r.ns).List(options)
Expect(err).NotTo(HaveOccurred())
for i, pod := range r.pods.Items {
// Check that the replica list remains unchanged, otherwise we have problems.
if !isElementOf(pod.UID, currentPods) {
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
}
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
defer cancel()
body, err := r.c.CoreV1().RESTClient().Get().
Context(ctx).
Namespace(r.ns).
Resource("pods").
SubResource("proxy").
Name(string(pod.Name)).
Do().
Raw()
if err != nil {
if ctx.Err() != nil {
// We may encounter errors here because of a race between the pod readiness and apiserver
// proxy. So, we log the error and retry if this occurs.
Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
return false, nil
}
Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
continue
}
// The response checker expects the pod's name unless !respondName, in
// which case it just checks for a non-empty response.
got := string(body)
what := ""
if r.respondName {
what = "expected"
want := pod.Name
if got != want {
Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
r.controllerName, i+1, pod.Name, want, got)
continue
}
} else {
what = "non-empty"
if len(got) == 0 {
Logf("Controller %s: Replica %d [%s] expected non-empty response",
r.controllerName, i+1, pod.Name)
continue
}
}
successes++
Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
}
if successes < len(r.pods.Items) {
return false, nil
}
return true, nil
}
// ServerVersionGTE returns true if v is greater than or equal to the server
// version.
//
// TODO(18726): This should be incorporated into client.VersionInterface.
func ServerVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) (bool, error) {
serverVersion, err := c.ServerVersion()
if err != nil {
return false, fmt.Errorf("Unable to get server version: %v", err)
}
sv, err := utilversion.ParseSemantic(serverVersion.GitVersion)
if err != nil {
return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err)
}
return sv.AtLeast(v), nil
}
func SkipUnlessKubectlVersionGTE(v *utilversion.Version) {
gte, err := KubectlVersionGTE(v)
if err != nil {
Failf("Failed to get kubectl version: %v", err)
}
if !gte {
Skipf("Not supported for kubectl versions before %q", v)
}
}
// KubectlVersionGTE returns true if the kubectl version is greater than or
// equal to v.
func KubectlVersionGTE(v *utilversion.Version) (bool, error) {
kv, err := KubectlVersion()
if err != nil {
return false, err
}
return kv.AtLeast(v), nil
}
// KubectlVersion gets the version of kubectl that's currently being used (see
// --kubectl-path in e2e.go to use an alternate kubectl).
func KubectlVersion() (*utilversion.Version, error) {
output := RunKubectlOrDie("version", "--client")
matches := gitVersionRegexp.FindStringSubmatch(output)
if len(matches) != 2 {
return nil, fmt.Errorf("Could not find kubectl version in output %v", output)
}
// Don't use the full match, as it contains "GitVersion:\"" and a
// trailing "\"". Just use the submatch.
return utilversion.ParseSemantic(matches[1])
}
func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error {
By("trying to dial each unique pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
}
func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return PodsCreatedByLabel(c, ns, name, replicas, label)
}
func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) {
timeout := 2 * time.Minute
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
options := metav1.ListOptions{LabelSelector: label.String()}
// List the pods, making sure we observe all the replicas.
pods, err := c.CoreV1().Pods(ns).List(options)
if err != nil {
return nil, err
}
created := []v1.Pod{}
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
created = append(created, pod)
}
Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
if int32(len(created)) == replicas {
pods.Items = created
return pods, nil
}
}
return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
By("ensuring each pod is running")
e := []error{}
error_chan := make(chan error)
for _, pod := range pods.Items {
go func(p v1.Pod) {
error_chan <- WaitForPodRunningInNamespace(c, &p)
}(pod)
}
for range pods.Items {
err := <-error_chan
if err != nil {
e = append(e, err)
}
}
return e
}
func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(c, ns, name, wantName, replicas, true)
}
func VerifyPodsRunning(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(c, ns, name, wantName, replicas, false)
}
func podRunningMaybeResponding(c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error {
pods, err := PodsCreated(c, ns, name, replicas)
if err != nil {
return err
}
e := podsRunning(c, pods)
if len(e) > 0 {
return fmt.Errorf("failed to wait for pods running: %v", e)
}
if checkResponding {
err = PodsResponding(c, ns, name, wantName, pods)
if err != nil {
return fmt.Errorf("failed to wait for pods responding: %v", err)
}
}
return nil
}
func ServiceResponding(c clientset.Interface, ns, name string) error {
By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) {
proxyRequest, errProxy := GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
if errProxy != nil {
Logf("Failed to get services proxy request: %v:", errProxy)
return false, nil
}
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
defer cancel()
body, err := proxyRequest.Namespace(ns).
Context(ctx).
Name(name).
Do().
Raw()
if err != nil {
if ctx.Err() != nil {
Failf("Failed to GET from service %s: %v", name, err)
return true, err
}
Logf("Failed to GET from service %s: %v:", name, err)
return false, nil
}
got := string(body)
if len(got) == 0 {
Logf("Service %s: expected non-empty response", name)
return false, err // stop polling
}
Logf("Service %s: found nonempty answer: %s", name, got)
return true, nil
})
}
func RestclientConfig(kubeContext string) (*clientcmdapi.Config, error) {
Logf(">>> kubeConfig: %s", TestContext.KubeConfig)
if TestContext.KubeConfig == "" {
return nil, fmt.Errorf("KubeConfig must be specified to load client config")
}
c, err := clientcmd.LoadFromFile(TestContext.KubeConfig)
if err != nil {
return nil, fmt.Errorf("error loading KubeConfig: %v", err.Error())
}
if kubeContext != "" {
Logf(">>> kubeContext: %s", kubeContext)
c.CurrentContext = kubeContext
}
return c, nil
}
type ClientConfigGetter func() (*restclient.Config, error)
func LoadConfig() (*restclient.Config, error) {
if TestContext.NodeE2E {
// This is a node e2e test, apply the node e2e configuration
return &restclient.Config{Host: TestContext.Host}, nil
}
c, err := RestclientConfig(TestContext.KubeContext)
if err != nil {
if TestContext.KubeConfig == "" {
return restclient.InClusterConfig()
} else {
return nil, err
}
}
return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig()
}
func LoadInternalClientset() (*internalclientset.Clientset, error) {
config, err := LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
return internalclientset.NewForConfig(config)
}
func LoadClientset() (*clientset.Clientset, error) {
config, err := LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
return clientset.NewForConfig(config)
}
// randomSuffix provides a random string to append to pods,services,rcs.
// TODO: Allow service names to have the same form as names
// for pods and replication controllers so we don't
// need to use such a function and can instead
// use the UUID utility function.
func randomSuffix() string {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return strconv.Itoa(r.Int() % 10000)
}
func ExpectNoError(err error, explain ...interface{}) {
ExpectNoErrorWithOffset(1, err, explain...)
}
// ExpectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller
// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f").
func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
if err != nil {
Logf("Unexpected error occurred: %v", err)
}
ExpectWithOffset(1+offset, err).NotTo(HaveOccurred(), explain...)
}
func ExpectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) {
var err error
for i := 0; i < maxRetries; i++ {
err = fn()
if err == nil {
return
}
Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err)
}
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
func Cleanup(filePath, ns string, selectors ...string) {
By("using delete to clean up resources")
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
RunKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg)
AssertCleanup(ns, selectors...)
}
// Asserts that cleanup of a namespace wrt selectors occurred.
func AssertCleanup(ns string, selectors ...string) {
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
for _, selector := range selectors {
resources := RunKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg)
if resources != "" {
Failf("Resources left running after stop:\n%s", resources)
}
pods := RunKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
Failf("Pods left unterminated after stop:\n%s", pods)
}
}
}
// KubectlCmd runs the kubectl executable through the wrapper script.
func KubectlCmd(args ...string) *exec.Cmd {
defaultArgs := []string{}
// Reference a --server option so tests can run anywhere.
if TestContext.Host != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+TestContext.Host)
}
if TestContext.KubeConfig != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
// Reference the KubeContext
if TestContext.KubeContext != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+TestContext.KubeContext)
}
} else {
if TestContext.CertDir != "" {
defaultArgs = append(defaultArgs,
fmt.Sprintf("--certificate-authority=%s", filepath.Join(TestContext.CertDir, "ca.crt")),
fmt.Sprintf("--client-certificate=%s", filepath.Join(TestContext.CertDir, "kubecfg.crt")),
fmt.Sprintf("--client-key=%s", filepath.Join(TestContext.CertDir, "kubecfg.key")))
}
}
kubectlArgs := append(defaultArgs, args...)
//We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh"
//and so on.
cmd := exec.Command(TestContext.KubectlPath, kubectlArgs...)
//caller will invoke this and wait on it.
return cmd
}
// kubectlBuilder is used to build, customize and execute a kubectl Command.
// Add more functions to customize the builder as needed.
type kubectlBuilder struct {
cmd *exec.Cmd
timeout <-chan time.Time
}
func NewKubectlCommand(args ...string) *kubectlBuilder {
b := new(kubectlBuilder)
b.cmd = KubectlCmd(args...)
return b
}
func (b *kubectlBuilder) WithEnv(env []string) *kubectlBuilder {
b.cmd.Env = env
return b
}
func (b *kubectlBuilder) WithTimeout(t <-chan time.Time) *kubectlBuilder {
b.timeout = t
return b
}
func (b kubectlBuilder) WithStdinData(data string) *kubectlBuilder {
b.cmd.Stdin = strings.NewReader(data)
return &b
}
func (b kubectlBuilder) WithStdinReader(reader io.Reader) *kubectlBuilder {
b.cmd.Stdin = reader
return &b
}
func (b kubectlBuilder) ExecOrDie() string {
str, err := b.Exec()
Logf("stdout: %q", str)
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
// Note that we're still dying after retrying so that we can get visibility to triage it further.
if isTimeout(err) {
Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.")
time.Sleep(2 * time.Second)
retryStr, retryErr := RunKubectl("version")
Logf("stdout: %q", retryStr)
Logf("err: %v", retryErr)
}
Expect(err).NotTo(HaveOccurred())
return str
}
func isTimeout(err error) bool {
switch err := err.(type) {
case net.Error:
if err.Timeout() {
return true
}
case *url.Error:
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
return true
}
}
return false
}
func (b kubectlBuilder) Exec() (string, error) {
var stdout, stderr bytes.Buffer
cmd := b.cmd
cmd.Stdout, cmd.Stderr = &stdout, &stderr
Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately
if err := cmd.Start(); err != nil {
return "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err)
}
errCh := make(chan error, 1)
go func() {
errCh <- cmd.Wait()
}()
select {
case err := <-errCh:
if err != nil {
var rc int = 127
if ee, ok := err.(*exec.ExitError); ok {
Logf("rc: %d", rc)
rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus())
}
return "", uexec.CodeExitError{
Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err),
Code: rc,
}
}
case <-b.timeout:
b.cmd.Process.Kill()
return "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr)
}
Logf("stderr: %q", stderr.String())
return stdout.String(), nil
}
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
func RunKubectlOrDie(args ...string) string {
return NewKubectlCommand(args...).ExecOrDie()
}
// RunKubectl is a convenience wrapper over kubectlBuilder
func RunKubectl(args ...string) (string, error) {
return NewKubectlCommand(args...).Exec()
}
// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
func RunKubectlOrDieInput(data string, args ...string) string {
return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie()
}
func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
stdout, err = cmd.StdoutPipe()
if err != nil {
return
}
stderr, err = cmd.StderrPipe()
if err != nil {
return
}
Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args, " "))
err = cmd.Start()
return
}
// Rough equivalent of ctrl+c for cleaning up processes. Intended to be run in defer.
func TryKill(cmd *exec.Cmd) {
if err := cmd.Process.Kill(); err != nil {
Logf("ERROR failed to kill command %v! The process may leak", cmd)
}
}
// testContainerOutputMatcher runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using the given matcher.
func (f *Framework) testContainerOutputMatcher(scenarioName string,
pod *v1.Pod,
containerIndex int,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
Failf("Invalid container index: %d", containerIndex)
}
ExpectNoError(f.MatchContainerOutput(pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
}
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
func (f *Framework) MatchContainerOutput(
pod *v1.Pod,
containerName string,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
ns := pod.ObjectMeta.Namespace
if ns == "" {
ns = f.Namespace.Name
}
podClient := f.PodClientNS(ns)
createdPod := podClient.Create(pod)
defer func() {
By("delete the pod")
podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
}()
// Wait for client pod to complete.
podErr := WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns)
// Grab its logs. Get host first.
podStatus, err := podClient.Get(createdPod.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get pod status: %v", err)
}
if podErr != nil {
// Pod failed. Dump all logs from all containers to see what's wrong
for _, container := range podStatus.Spec.Containers {
logs, err := GetPodLogs(f.ClientSet, ns, podStatus.Name, container.Name)
if err != nil {
Logf("Failed to get logs from node %q pod %q container %q: %v",
podStatus.Spec.NodeName, podStatus.Name, container.Name, err)
continue
}
Logf("Output of node %q pod %q container %q: %s", podStatus.Spec.NodeName, podStatus.Name, container.Name, logs)
}
return fmt.Errorf("expected pod %q success: %v", createdPod.Name, podErr)
}
Logf("Trying to get logs from node %s pod %s container %s: %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
// Sometimes the actual containers take a second to get started, try to get logs for 60s
logs, err := GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName)
if err != nil {
Logf("Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
}
for _, expected := range expectedOutput {
m := matcher(expected)
matches, err := m.Match(logs)
if err != nil {
return fmt.Errorf("expected %q in container output: %v", expected, err)
} else if !matches {
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
}
}
return nil
}
type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error)
func DumpEventsInNamespace(eventsLister EventsLister, namespace string) {
By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
events, err := eventsLister(metav1.ListOptions{}, namespace)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Found %d events.", len(events.Items)))
// Sort events by their first timestamp
sortedEvents := events.Items
if len(sortedEvents) > 1 {
sort.Sort(byFirstTimestamp(sortedEvents))
}
for _, e := range sortedEvents {
Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
}
// Note that we don't wait for any Cleanup to propagate, which means
// that if you delete a bunch of pods right before ending your test,
// you may or may not see the killing/deletion/Cleanup events.
}
func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
DumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) {
return c.CoreV1().Events(ns).List(opts)
}, namespace)
// If cluster is large, then the following logs are basically useless, because:
// 1. it takes tens of minutes or hours to grab all of them
// 2. there are so many of them that working with them are mostly impossible
// So we dump them only if the cluster is relatively small.
maxNodesForDump := 20
if nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}); err == nil {
if len(nodes.Items) <= maxNodesForDump {
dumpAllPodInfo(c)
dumpAllNodeInfo(c)
} else {
Logf("skipping dumping cluster info - cluster too large")
}
} else {
Logf("unable to fetch node list: %v", err)
}
}
// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker.
type byFirstTimestamp []v1.Event
func (o byFirstTimestamp) Len() int { return len(o) }
func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byFirstTimestamp) Less(i, j int) bool {
if o[i].FirstTimestamp.Equal(&o[j].FirstTimestamp) {
return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name
}
return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp)
}
func dumpAllPodInfo(c clientset.Interface) {
pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{})
if err != nil {
Logf("unable to fetch pod debug info: %v", err)
}
logPodStates(pods.Items)
}
func dumpAllNodeInfo(c clientset.Interface) {
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
Logf("unable to fetch node list: %v", err)
return
}
names := make([]string, len(nodes.Items))
for ix := range nodes.Items {
names[ix] = nodes.Items[ix].Name
}
DumpNodeDebugInfo(c, names, Logf)
}
func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
for _, n := range nodeNames {
logFunc("\nLogging node info for node %v", n)
node, err := c.CoreV1().Nodes().Get(n, metav1.GetOptions{})
if err != nil {
logFunc("Error getting node info %v", err)
}
logFunc("Node Info: %v", node)
logFunc("\nLogging kubelet events for node %v", n)
for _, e := range getNodeEvents(c, n) {
logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
}
logFunc("\nLogging pods the kubelet thinks is on node %v", n)
podList, err := GetKubeletPods(c, n)
if err != nil {
logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err)
continue
}
for _, p := range podList.Items {
logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
for _, c := range p.Status.InitContainerStatuses {
logFunc("\tInit container %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
for _, c := range p.Status.ContainerStatuses {
logFunc("\tContainer %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
}
HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc)
// TODO: Log node resource info
}
}
// logNodeEvents logs kubelet events from the given node. This includes kubelet
// restart and node unhealthy events. Note that listing events like this will mess
// with latency metrics, beware of calling it during a test.
func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": nodeName,
"involvedObject.namespace": metav1.NamespaceAll,
"source": "kubelet",
}.AsSelector().String()
options := metav1.ListOptions{FieldSelector: selector}
events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(options)
if err != nil {
Logf("Unexpected error retrieving node events %v", err)
return []v1.Event{}
}
return events.Items
}
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList {
var nodes *v1.NodeList
var err error
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return true, nil
}) != nil {
ExpectNoError(err, "Non-retryable failure or timed out while listing nodes for e2e cluster.")
}
return nodes
}
// Node is schedulable if:
// 1) doesn't have "unschedulable" field set
// 2) it's Ready condition is set to true
// 3) doesn't have NetworkUnavailable condition set to true
func isNodeSchedulable(node *v1.Node) bool {
nodeReady := IsNodeConditionSetAsExpected(node, v1.NodeReady, true)
networkReady := IsNodeConditionUnset(node, v1.NodeNetworkUnavailable) ||
IsNodeConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
return !node.Spec.Unschedulable && nodeReady && networkReady
}
// Test whether a fake pod can be scheduled on "node", given its current taints.
func isNodeUntainted(node *v1.Node) bool {
fakePod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "fake-not-scheduled",
Namespace: "fake-not-scheduled",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-not-scheduled",
Image: "fake-not-scheduled",
},
},
},
}
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(node)
fit, _, err := predicates.PodToleratesNodeTaints(fakePod, nil, nodeInfo)
if err != nil {
Failf("Can't test predicates for node %s: %v", node.Name, err)
return false
}
return fit
}
// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on.
// 1) Needs to be schedulable.
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList) {
nodes = waitListSchedulableNodesOrDie(c)
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
FilterNodes(nodes, func(node v1.Node) bool {
return isNodeSchedulable(&node) && isNodeUntainted(&node)
})
return nodes
}
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
var notSchedulable []*v1.Node
attempt := 0
return wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
attempt++
notSchedulable = nil
opts := metav1.ListOptions{
ResourceVersion: "0",
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
}
nodes, err := c.CoreV1().Nodes().List(opts)
if err != nil {
Logf("Unexpected error listing nodes: %v", err)
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !isNodeSchedulable(node) {
notSchedulable = append(notSchedulable, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
//
// However, we only allow non-ready nodes with some specific reasons.
if len(notSchedulable) > 0 {
// In large clusters, log them only every 10th pass.
if len(nodes.Items) >= largeClusterThreshold && attempt%10 == 0 {
Logf("Unschedulable nodes:")
for i := range notSchedulable {
Logf("-> %s Ready=%t Network=%t",
notSchedulable[i].Name,
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false))
}
Logf("================================")
}
}
return len(notSchedulable) <= TestContext.AllowedNotReadyNodes, nil
})
}
func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
// With SecretManager(ConfigMapManager), we may have to wait up to full sync period +
// TTL of secret(configmap) to elapse before the Kubelet projects the update into the
// volume and the container picks it up.
// So this timeout is based on default Kubelet sync period (1 minute) + maximum TTL for
// secret(configmap) that's based on cluster size + additional time as a fudge factor.
secretTTL, err := GetNodeTTLAnnotationValue(c)
if err != nil {
Logf("Couldn't get node TTL annotation (using default value of 0): %v", err)
}
podLogTimeout := 240*time.Second + secretTTL
return podLogTimeout
}
func GetNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil || len(nodes.Items) == 0 {
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err)
}
// Since TTL the kubelet is using is stored in node object, for the timeout
// purpose we take it from the first node (all of them should be the same).
node := &nodes.Items[0]
if node.Annotations == nil {
return time.Duration(0), fmt.Errorf("No annotations found on the node")
}
value, ok := node.Annotations[v1.ObjectTTLAnnotationKey]
if !ok {
return time.Duration(0), fmt.Errorf("No TTL annotation found on the node")
}
intValue, err := strconv.Atoi(value)
if err != nil {
return time.Duration(0), fmt.Errorf("Cannot convert TTL annotation from %#v to int", *node)
}
return time.Duration(intValue) * time.Second, nil
}
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
ExpectNoError(testutil.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
}
func AddOrUpdateLabelOnNodeAndReturnOldValue(c clientset.Interface, nodeName string, labelKey, labelValue string) string {
var oldValue string
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err)
oldValue = node.Labels[labelKey]
ExpectNoError(testutil.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
return oldValue
}
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err)
Expect(node.Labels[labelKey]).To(Equal(labelValue))
}
func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) {
ExpectNoError(controller.RemoveTaintOffNode(c, nodeName, nil, &taint))
VerifyThatTaintIsGone(c, nodeName, &taint)
}
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Taint) {
ExpectNoError(controller.AddOrUpdateTaintOnNode(c, nodeName, &taint))
}
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
By("removing the label " + labelKey + " off the node " + nodeName)
ExpectNoError(testutil.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
By("verifying the node doesn't have the label " + labelKey)
ExpectNoError(testutil.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
}
func VerifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) {
By("verifying the node doesn't have the taint " + taint.ToString())
nodeUpdated, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err)
if taintutils.TaintExists(nodeUpdated.Spec.Taints, taint) {
Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
}
}
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) {
By("verifying the node has the taint " + taint.ToString())
if has, err := NodeHasTaint(c, nodeName, taint); !has {
ExpectNoError(err)
Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
}
}
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
nodeTaints := node.Spec.Taints
if len(nodeTaints) == 0 || !taintutils.TaintExists(nodeTaints, taint) {
return false, nil
}
return true, nil
}
//AddOrUpdateAvoidPodOnNode adds avoidPods annotations to node, will override if it exists
func AddOrUpdateAvoidPodOnNode(c clientset.Interface, nodeName string, avoidPods v1.AvoidPods) {
err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
taintsData, err := json.Marshal(avoidPods)
ExpectNoError(err)
if node.Annotations == nil {
node.Annotations = make(map[string]string)
}
node.Annotations[v1.PreferAvoidPodsAnnotationKey] = string(taintsData)
_, err = c.CoreV1().Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to add/update avoidPonds %v to %v", avoidPods, nodeName)
}
}
return true, nil
})
ExpectNoError(err)
}
//RemoveAnnotationOffNode removes AvoidPods annotations from the node. It does not fail if no such annotation exists.
func RemoveAvoidPodsOffNode(c clientset.Interface, nodeName string) {
err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
if node.Annotations == nil {
return true, nil
}
delete(node.Annotations, v1.PreferAvoidPodsAnnotationKey)
_, err = c.CoreV1().Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to remove avoidPods to %v", nodeName)
}
}
return true, nil
})
ExpectNoError(err)
}
func getScalerForKind(internalClientset internalclientset.Interface, kind schema.GroupKind) (kubectl.Scaler, error) {
return kubectl.ScalerFor(kind, internalClientset)
}
func ScaleResource(
clientset clientset.Interface,
internalClientset internalclientset.Interface,
ns, name string,
size uint,
wait bool,
kind schema.GroupKind,
) error {
By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size))
scaler, err := getScalerForKind(internalClientset, kind)
if err != nil {
return err
}
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
if err = scaler.Scale(ns, name, size, nil, waitForScale, waitForReplicas); err != nil {
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
}
if !wait {
return nil
}
return WaitForControlledPodsRunning(clientset, ns, name, kind)
}
// Wait up to 10 minutes for pods to become Running.
func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind schema.GroupKind) error {
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
return err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
err = testutil.WaitForPodsWithLabelRunning(c, ns, selector)
if err != nil {
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
}
return nil
}
// Wait up to PodListTimeout for getting pods of the specified controller name and return them.
func WaitForControlledPods(c clientset.Interface, ns, name string, kind schema.GroupKind) (pods *v1.PodList, err error) {
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
return nil, err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return nil, err
}
return WaitForPodsWithLabel(c, ns, selector)
}
// Returns true if all the specified pods are scheduled, else returns false.
func podsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (bool, error) {
PodStore := testutil.NewPodStore(c, ns, label, fields.Everything())
defer PodStore.Stop()
pods := PodStore.List()
if len(pods) == 0 {
return false, nil
}
for _, pod := range pods {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
}
// Wait for all matching pods to become scheduled and at least one
// matching pod exists. Return the list of matching pods.
func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
err = wait.PollImmediate(Poll, podScheduledBeforeTimeout,
func() (bool, error) {
pods, err = WaitForPodsWithLabel(c, ns, label)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
})
return pods, err
}
// Wait up to PodListTimeout for getting pods with certain label
func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) {
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err = c.CoreV1().Pods(ns).List(options)
if err != nil {
if IsRetryableAPIError(err) {
continue
}
return
}
if len(pods.Items) > 0 {
break
}
}
if pods == nil || len(pods.Items) == 0 {
err = fmt.Errorf("Timeout while waiting for pods with label %v", label)
}
return
}
// Wait for exact amount of matching pods to become running and ready.
// Return the list of matching pods.
func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) {
var current int
err = wait.Poll(Poll, timeout,
func() (bool, error) {
pods, err := WaitForPodsWithLabel(c, ns, label)
if err != nil {
Logf("Failed to list pods: %v", err)
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
current = 0
for _, pod := range pods.Items {
if flag, err := testutil.PodRunningReady(&pod); err == nil && flag == true {
current++
}
}
if current != num {
Logf("Got %v pods running and ready, expect: %v", current, num)
return false, nil
}
return true, nil
})
return pods, err
}
func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) {
switch kind {
case api.Kind("ReplicationController"):
return c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"):
return c.ExtensionsV1beta1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"):
return c.ExtensionsV1beta1().Deployments(ns).Get(name, metav1.GetOptions{})
case extensionsinternal.Kind("DaemonSet"):
return c.ExtensionsV1beta1().DaemonSets(ns).Get(name, metav1.GetOptions{})
case batchinternal.Kind("Job"):
return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{})
default:
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
}
}
func deleteResource(c clientset.Interface, kind schema.GroupKind, ns, name string, deleteOption *metav1.DeleteOptions) error {
switch kind {
case api.Kind("ReplicationController"):
return c.CoreV1().ReplicationControllers(ns).Delete(name, deleteOption)
case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"):
return c.ExtensionsV1beta1().ReplicaSets(ns).Delete(name, deleteOption)
case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"):
return c.ExtensionsV1beta1().Deployments(ns).Delete(name, deleteOption)
case extensionsinternal.Kind("DaemonSet"):
return c.ExtensionsV1beta1().DaemonSets(ns).Delete(name, deleteOption)
case batchinternal.Kind("Job"):
return c.BatchV1().Jobs(ns).Delete(name, deleteOption)
default:
return fmt.Errorf("Unsupported kind when deleting: %v", kind)
}
}
func getSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
switch typed := obj.(type) {
case *v1.ReplicationController:
return labels.SelectorFromSet(typed.Spec.Selector), nil
case *extensions.ReplicaSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensions.Deployment:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensions.DaemonSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *batch.Job:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
default:
return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj)
}
}
func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
switch typed := obj.(type) {
case *v1.ReplicationController:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensions.ReplicaSet:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensions.Deployment:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *batch.Job:
// TODO: currently we use pause pods so that's OK. When we'll want to switch to Pods
// that actually finish we need a better way to do this.
if typed.Spec.Parallelism != nil {
return *typed.Spec.Parallelism, nil
}
return 0, nil
default:
return -1, fmt.Errorf("Unsupported kind when getting number of replicas: %v", obj)
}
}
func getReaperForKind(internalClientset internalclientset.Interface, kind schema.GroupKind) (kubectl.Reaper, error) {
return kubectl.ReaperFor(kind, internalClientset)
}
// DeleteResourceAndPods deletes a given resource and all pods it spawned
func DeleteResourceAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, kind schema.GroupKind, ns, name string) error {
By(fmt.Sprintf("deleting %v %s in namespace %s", kind, name, ns))
rtObject, err := getRuntimeObjectForKind(clientset, kind, ns, name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("%v %s not found: %v", kind, name, err)
return nil
}
return err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
reaper, err := getReaperForKind(internalClientset, kind)
if err != nil {
return err
}
ps, err := podStoreForSelector(clientset, ns, selector)
if err != nil {
return err
}
defer ps.Stop()
startTime := time.Now()
err = reaper.Stop(ns, name, 0, nil)
if apierrs.IsNotFound(err) {
Logf("%v %s was already deleted: %v", kind, name, err)
return nil
}
if err != nil {
return fmt.Errorf("error while stopping %v: %s: %v", kind, name, err)
}
deleteTime := time.Now().Sub(startTime)
Logf("Deleting %v %s took: %v", kind, name, deleteTime)
err = waitForPodsInactive(ps, 100*time.Millisecond, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteTime
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
// this is to relieve namespace controller's pressure when deleting the
// namespace after a test.
err = waitForPodsGone(ps, 100*time.Millisecond, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
gcPodTime := time.Now().Sub(startTime) - terminatePodTime
Logf("Garbage collecting %v %s pods took: %v", kind, name, gcPodTime)
return nil
}
// DeleteResourceAndWaitForGC deletes only given resource and waits for GC to delete the pods.
func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns, name string) error {
By(fmt.Sprintf("deleting %v %s in namespace %s, will wait for the garbage collector to delete the pods", kind, name, ns))
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("%v %s not found: %v", kind, name, err)
return nil
}
return err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
replicas, err := getReplicasFromRuntimeObject(rtObject)
if err != nil {
return err
}
ps, err := podStoreForSelector(c, ns, selector)
if err != nil {
return err
}
defer ps.Stop()
startTime := time.Now()
falseVar := false
deleteOption := &metav1.DeleteOptions{OrphanDependents: &falseVar}
err = deleteResource(c, kind, ns, name, deleteOption)
if err != nil && apierrs.IsNotFound(err) {
Logf("%v %s was already deleted: %v", kind, name, err)
return nil
}
if err != nil {
return err
}
deleteTime := time.Now().Sub(startTime)
Logf("Deleting %v %s took: %v", kind, name, deleteTime)
var interval, timeout time.Duration
switch {
case replicas < 100:
interval = 100 * time.Millisecond
case replicas < 1000:
interval = 1 * time.Second
default:
interval = 10 * time.Second
}
if replicas < 5000 {
timeout = 10 * time.Minute
} else {
timeout = time.Duration(replicas/gcThroughput) * time.Second
// gcThroughput is pretty strict now, add a bit more to it
timeout = timeout + 3*time.Minute
}
err = waitForPodsInactive(ps, interval, timeout)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteTime
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
err = waitForPodsGone(ps, interval, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
return nil
}
// podStoreForSelector creates a PodStore that monitors pods from given namespace matching given selector.
// It waits until the reflector does a List() before returning.
func podStoreForSelector(c clientset.Interface, ns string, selector labels.Selector) (*testutil.PodStore, error) {
ps := testutil.NewPodStore(c, ns, selector, fields.Everything())
err := wait.Poll(100*time.Millisecond, 2*time.Minute, func() (bool, error) {
if len(ps.Reflector.LastSyncResourceVersion()) != 0 {
return true, nil
}
return false, nil
})
return ps, err
}
// waitForPodsInactive waits until there are no active pods left in the PodStore.
// This is to make a fair comparison of deletion time between DeleteRCAndPods
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
// when the pod is inactvie.
func waitForPodsInactive(ps *testutil.PodStore, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
pods := ps.List()
for _, pod := range pods {
if controller.IsPodActive(pod) {
return false, nil
}
}
return true, nil
})
}
// waitForPodsGone waits until there are no pods left in the PodStore.
func waitForPodsGone(ps *testutil.PodStore, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
if pods := ps.List(); len(pods) == 0 {
return true, nil
}
return false, nil
})
}
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := metav1.ListOptions{LabelSelector: label.String()}
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
pods, err := c.CoreV1().Pods(ns).List(options)
if err != nil {
return false, nil
}
for _, pod := range pods.Items {
if !podutil.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) {
return false, nil
}
}
return true, nil
})
}
// Waits for the number of events on the given object to reach a desired count.
func WaitForEvents(c clientset.Interface, ns string, objOrRef runtime.Object, desiredEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount == desiredEventsCount {
return true, nil
}
if eventsCount < desiredEventsCount {
return false, nil
}
// Number of events has exceeded the desired count.
return false, fmt.Errorf("number of events has exceeded the desired count, eventsCount: %d, desiredCount: %d", eventsCount, desiredEventsCount)
})
}
// Waits for the number of events on the given object to be at least a desired count.
func WaitForPartialEvents(c clientset.Interface, ns string, objOrRef runtime.Object, atLeastEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount >= atLeastEventsCount {
return true, nil
}
return false, nil
})
}
type updateDSFunc func(*extensions.DaemonSet)
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *extensions.DaemonSet, err error) {
daemonsets := c.ExtensionsV1beta1().DaemonSets(namespace)
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(ds)
if ds, err = daemonsets.Update(ds); err == nil {
Logf("Updating DaemonSet %s", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to DaemonSet %q: %v", name, updateErr)
}
return ds, pollErr
}
// NodeAddresses returns the first address of the given type of each node.
func NodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
hosts := []string{}
for _, n := range nodelist.Items {
for _, addr := range n.Status.Addresses {
// Use the first external IP address we find on the node, and
// use at most one per node.
// TODO(roberthbailey): Use the "preferred" address for the node, once
// such a thing is defined (#2462).
if addr.Type == addrType {
hosts = append(hosts, addr.Address)
break
}
}
}
return hosts
}
// NodeSSHHosts returns SSH-able host names for all schedulable nodes - this excludes master node.
// It returns an error if it can't find an external IP for every node, though it still returns all
// hosts that it found in that case.
func NodeSSHHosts(c clientset.Interface) ([]string, error) {
nodelist := waitListSchedulableNodesOrDie(c)
// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).
hosts := NodeAddresses(nodelist, v1.NodeExternalIP)
// Error if any node didn't have an external IP.
if len(hosts) != len(nodelist.Items) {
return hosts, fmt.Errorf(
"only found %d external IPs on nodes, but found %d nodes. Nodelist: %v",
len(hosts), len(nodelist.Items), nodelist)
}
sshHosts := make([]string, 0, len(hosts))
for _, h := range hosts {
sshHosts = append(sshHosts, net.JoinHostPort(h, sshPort))
}
return sshHosts, nil
}
type SSHResult struct {
User string
Host string
Cmd string
Stdout string
Stderr string
Code int
}
// NodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name,
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
// cloud providers since it involves ssh.
func NodeExec(nodeName, cmd string) (SSHResult, error) {
return SSH(cmd, net.JoinHostPort(nodeName, sshPort), TestContext.Provider)
}
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
// is no error performing the SSH, the stdout, stderr, and exit code are
// returned.
func SSH(cmd, host, provider string) (SSHResult, error) {
result := SSHResult{Host: host, Cmd: cmd}
// Get a signer for the provider.
signer, err := GetSigner(provider)
if err != nil {
return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err)
}
// RunSSHCommand will default to Getenv("USER") if user == "", but we're
// defaulting here as well for logging clarity.
result.User = os.Getenv("KUBE_SSH_USER")
if result.User == "" {
result.User = os.Getenv("USER")
}
stdout, stderr, code, err := sshutil.RunSSHCommand(cmd, result.User, host, signer)
result.Stdout = stdout
result.Stderr = stderr
result.Code = code
return result, err
}
func LogSSHResult(result SSHResult) {
remote := fmt.Sprintf("%s@%s", result.User, result.Host)
Logf("ssh %s: command: %s", remote, result.Cmd)
Logf("ssh %s: stdout: %q", remote, result.Stdout)
Logf("ssh %s: stderr: %q", remote, result.Stderr)
Logf("ssh %s: exit code: %d", remote, result.Code)
}
func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*SSHResult, error) {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP {
host = net.JoinHostPort(a.Address, sshPort)
break
}
}
if host == "" {
// No external IPs were found, let's try to use internal as plan B
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeInternalIP {
host = net.JoinHostPort(a.Address, sshPort)
break
}
}
}
if host == "" {
return nil, fmt.Errorf("couldn't find any IP address for node %s", node.Name)
}
Logf("SSH %q on %s(%s)", cmd, node.Name, host)
result, err := SSH(cmd, host, provider)
LogSSHResult(result)
if result.Code != 0 || err != nil {
return nil, fmt.Errorf("failed running %q: %v (exit code %d)",
cmd, err, result.Code)
}
return &result, nil
}
func IssueSSHCommand(cmd, provider string, node *v1.Node) error {
_, err := IssueSSHCommandWithResult(cmd, provider, node)
if err != nil {
return err
}
return nil
}
// NewHostExecPodSpec returns the pod spec of hostexec pod
func NewHostExecPodSpec(ns, name string) *v1.Pod {
immediate := int64(0)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "hostexec",
Image: imageutils.GetE2EImage(imageutils.Hostexec),
ImagePullPolicy: v1.PullIfNotPresent,
},
},
HostNetwork: true,
SecurityContext: &v1.PodSecurityContext{},
TerminationGracePeriodSeconds: &immediate,
},
}
return pod
}
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell.
func RunHostCmd(ns, name, cmd string) (string, error) {
return RunKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd)
}
// RunHostCmdOrDie calls RunHostCmd and dies on error.
func RunHostCmdOrDie(ns, name, cmd string) string {
stdout, err := RunHostCmd(ns, name, cmd)
Logf("stdout: %v", stdout)
ExpectNoError(err)
return stdout
}
// RunHostCmdWithRetries calls RunHostCmd and retries all errors
// until it succeeds or the specified timeout expires.
// This can be used with idempotent commands to deflake transient Node issues.
func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration) (string, error) {
start := time.Now()
for {
out, err := RunHostCmd(ns, name, cmd)
if err == nil {
return out, nil
}
if elapsed := time.Since(start); elapsed > timeout {
return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err)
}
Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err)
time.Sleep(interval)
}
}
// LaunchHostExecPod launches a hostexec pod in the given namespace and waits
// until it's Running
func LaunchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod {
hostExecPod := NewHostExecPodSpec(ns, name)
pod, err := client.CoreV1().Pods(ns).Create(hostExecPod)
ExpectNoError(err)
err = WaitForPodRunningInNamespace(client, pod)
ExpectNoError(err)
return pod
}
// newExecPodSpec returns the pod spec of exec pod
func newExecPodSpec(ns, generateName string) *v1.Pod {
immediate := int64(0)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: generateName,
Namespace: ns,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &immediate,
Containers: []v1.Container{
{
Name: "exec",
Image: BusyBoxImage,
Command: []string{"sh", "-c", "while true; do sleep 5; done"},
},
},
},
}
return pod
}
// CreateExecPodOrFail creates a simple busybox pod in a sleep loop used as a
// vessel for kubectl exec commands.
// Returns the name of the created pod.
func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) string {
Logf("Creating new exec pod")
execPod := newExecPodSpec(ns, generateName)
if tweak != nil {
tweak(execPod)
}
created, err := client.CoreV1().Pods(ns).Create(execPod)
Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{})
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return retrievedPod.Status.Phase == v1.PodRunning, nil
})
Expect(err).NotTo(HaveOccurred())
return created.Name
}
func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort) {
By(fmt.Sprintf("Creating pod %s in namespace %s", name, ns))
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: GetPauseImageName(c),
Ports: containerPorts,
// Add a dummy environment variable to work around a docker issue.
// https://github.com/docker/docker/issues/14203
Env: []v1.EnvVar{{Name: "FOO", Value: " "}},
},
},
},
}
_, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
}
func DeletePodOrFail(c clientset.Interface, ns, name string) {
By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
err := c.CoreV1().Pods(ns).Delete(name, nil)
Expect(err).NotTo(HaveOccurred())
}
// GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be
// used to SSH to their nodes.
func GetSigner(provider string) (ssh.Signer, error) {
// Get the directory in which SSH keys are located.
keydir := filepath.Join(os.Getenv("HOME"), ".ssh")
// Select the key itself to use. When implementing more providers here,
// please also add them to any SSH tests that are disabled because of signer
// support.
keyfile := ""
key := ""
switch provider {
case "gce", "gke", "kubemark":
keyfile = "google_compute_engine"
case "aws":
// If there is an env. variable override, use that.
aws_keyfile := os.Getenv("AWS_SSH_KEY")
if len(aws_keyfile) != 0 {
return sshutil.MakePrivateKeySignerFromFile(aws_keyfile)
}
// Otherwise revert to home dir
keyfile = "kube_aws_rsa"
case "vagrant":
keyfile = os.Getenv("VAGRANT_SSH_KEY")
if len(keyfile) != 0 {
return sshutil.MakePrivateKeySignerFromFile(keyfile)
}
return nil, fmt.Errorf("VAGRANT_SSH_KEY env variable should be provided")
case "local", "vsphere":
keyfile = os.Getenv("LOCAL_SSH_KEY") // maybe?
if len(keyfile) == 0 {
keyfile = "id_rsa"
}
case "skeleton":
keyfile = os.Getenv("KUBE_SSH_KEY")
if len(keyfile) == 0 {
keyfile = "id_rsa"
}
default:
return nil, fmt.Errorf("GetSigner(...) not implemented for %s", provider)
}
if len(key) == 0 {
key = filepath.Join(keydir, keyfile)
}
return sshutil.MakePrivateKeySignerFromFile(key)
}
// CheckPodsRunningReady returns whether all pods whose names are listed in
// podNames in namespace ns are running and ready, using c and waiting at most
// timeout.
func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, testutil.PodRunningReady, "running and ready")
}
// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are
// listed in podNames in namespace ns are running and ready, or succeeded; use
// c and waiting at most timeout.
func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, testutil.PodRunningReadyOrSucceeded, "running and ready, or succeeded")
}
// CheckPodsCondition returns whether all pods whose names are listed in podNames
// in namespace ns are in the condition, using c and waiting at most timeout.
func CheckPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
np := len(podNames)
Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
type waitPodResult struct {
success bool
podName string
}
result := make(chan waitPodResult, len(podNames))
for _, podName := range podNames {
// Launch off pod readiness checkers.
go func(name string) {
err := WaitForPodCondition(c, ns, name, desc, timeout, condition)
result <- waitPodResult{err == nil, name}
}(podName)
}
// Wait for them all to finish.
success := true
for range podNames {
res := <-result
if !res.success {
Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc)
success = false
}
}
Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
return success
}
// WaitForNodeToBeReady returns whether node name is ready within timeout.
func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, v1.NodeReady, true, timeout)
}
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
// readiness condition is anything but ready, e.g false or unknown) within
// timeout.
func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, v1.NodeReady, false, timeout)
}
func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool {
// Check the node readiness condition (logging all).
for _, cond := range node.Status.Conditions {
// Ensure that the condition type and the status matches as desired.
if cond.Type == conditionType {
// For NodeReady condition we need to check Taints as well
if cond.Type == v1.NodeReady {
hasNodeControllerTaints := false
// For NodeReady we need to check if Taints are gone as well
taints := node.Spec.Taints
for _, taint := range taints {
if taint.MatchTaint(nodectlr.UnreachableTaintTemplate) || taint.MatchTaint(nodectlr.NotReadyTaintTemplate) {
hasNodeControllerTaints = true
break
}
}
if wantTrue {
if (cond.Status == v1.ConditionTrue) && !hasNodeControllerTaints {
return true
} else {
msg := ""
if !hasNodeControllerTaints {
msg = fmt.Sprintf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
} else {
msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure",
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
}
if !silent {
Logf(msg)
}
return false
}
} else {
// TODO: check if the Node is tainted once we enable NC notReady/unreachable taints by default
if cond.Status != v1.ConditionTrue {
return true
}
if !silent {
Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
}
if (wantTrue && (cond.Status == v1.ConditionTrue)) || (!wantTrue && (cond.Status != v1.ConditionTrue)) {
return true
} else {
if !silent {
Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
}
}
if !silent {
Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
}
return false
}
func IsNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false)
}
func IsNodeConditionSetAsExpectedSilent(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true)
}
func IsNodeConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
for _, cond := range node.Status.Conditions {
if cond.Type == conditionType {
return false
}
}
return true
}
// WaitForNodeToBe returns whether node "name's" condition state matches wantTrue
// within timeout. If wantTrue is true, it will ensure the node condition status
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
func WaitForNodeToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
Logf("Couldn't get node %s", name)
continue
}
if IsNodeConditionSetAsExpected(node, conditionType, wantTrue) {
return true
}
}
Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
return false
}
// Checks whether all registered nodes are ready.
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
// and figure out how to do it in a configurable way, as we can't expect all setups to run
// default test add-ons.
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !IsNodeConditionSetAsExpected(node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
return len(notReady) <= TestContext.AllowedNotReadyNodes, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > TestContext.AllowedNotReadyNodes {
msg := ""
for _, node := range notReady {
msg = fmt.Sprintf("%s, %s", msg, node.Name)
}
return fmt.Errorf("Not ready nodes: %#v", msg)
}
return nil
}
// checks whether all registered nodes are ready and all required Pods are running on them.
func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []v1.Node
var missingPodsPerNode map[string][]string
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for _, node := range nodes.Items {
if !IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
systemPodsPerNode := make(map[string][]string)
for _, pod := range pods.Items {
if pod.Namespace == metav1.NamespaceSystem && pod.Status.Phase == v1.PodRunning {
if pod.Spec.NodeName != "" {
systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name)
}
}
}
missingPodsPerNode = make(map[string][]string)
for _, node := range nodes.Items {
if !system.IsMasterNode(node.Name) {
for _, requiredPod := range requiredPerNodePods {
foundRequired := false
for _, presentPod := range systemPodsPerNode[node.Name] {
if requiredPod.MatchString(presentPod) {
foundRequired = true
break
}
}
if !foundRequired {
missingPodsPerNode[node.Name] = append(missingPodsPerNode[node.Name], requiredPod.String())
}
}
}
}
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > 0 {
return fmt.Errorf("Not ready nodes: %v", notReady)
}
if len(missingPodsPerNode) > 0 {
return fmt.Errorf("Not running system Pods: %v", missingPodsPerNode)
}
return nil
}
// Filters nodes in NodeList in place, removing nodes that do not
// satisfy the given condition
// TODO: consider merging with pkg/client/cache.NodeLister
func FilterNodes(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
var l []v1.Node
for _, node := range nodeList.Items {
if fn(node) {
l = append(l, node)
}
}
nodeList.Items = l
}
// ParseKVLines parses output that looks like lines containing "<key>: <val>"
// and returns <val> if <key> is found. Otherwise, it returns the empty string.
func ParseKVLines(output, key string) string {
delim := ":"
key = key + delim
for _, line := range strings.Split(output, "\n") {
pieces := strings.SplitAfterN(line, delim, 2)
if len(pieces) != 2 {
continue
}
k, v := pieces[0], pieces[1]
if k == key {
return strings.TrimSpace(v)
}
}
return ""
}
func RestartKubeProxy(host string) error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
// kubelet will restart the kube-proxy since it's running in a static pod
Logf("Killing kube-proxy on node %v", host)
result, err := SSH("sudo pkill kube-proxy", host, TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart kube-proxy: %v", err)
}
// wait for kube-proxy to come back up
sshCmd := "sudo /bin/sh -c 'pgrep kube-proxy | wc -l'"
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
Logf("Waiting for kubeproxy to come back up with %v on %v", sshCmd, host)
result, err := SSH(sshCmd, host, TestContext.Provider)
if err != nil {
return false, err
}
if result.Code != 0 {
LogSSHResult(result)
return false, fmt.Errorf("failed to run command, exited %d", result.Code)
}
if result.Stdout == "0\n" {
return false, nil
}
Logf("kube-proxy is back up.")
return true, nil
})
if err != nil {
return fmt.Errorf("kube-proxy didn't recover: %v", err)
}
return nil
}
func RestartKubelet(host string) error {
// TODO: Make it work for all providers and distros.
supportedProviders := []string{"gce", "aws", "vsphere"}
if !ProviderIs(supportedProviders...) {
return fmt.Errorf("unsupported provider: %s, supported providers are: %v", TestContext.Provider, supportedProviders)
}
if ProviderIs("gce") && !NodeOSDistroIs("debian", "gci") {
return fmt.Errorf("unsupported node OS distro: %s", TestContext.NodeOSDistro)
}
var cmd string
if ProviderIs("gce") && NodeOSDistroIs("debian") {
cmd = "sudo /etc/init.d/kubelet restart"
} else if ProviderIs("vsphere") {
var sudoPresent bool
sshResult, err := SSH("sudo --version", host, TestContext.Provider)
if err != nil {
return fmt.Errorf("Unable to ssh to host %s with error %v", host, err)
}
if !strings.Contains(sshResult.Stderr, "command not found") {
sudoPresent = true
}
sshResult, err = SSH("systemctl --version", host, TestContext.Provider)
if !strings.Contains(sshResult.Stderr, "command not found") {
cmd = "systemctl restart kubelet"
} else {
cmd = "service kubelet restart"
}
if sudoPresent {
cmd = fmt.Sprintf("sudo %s", cmd)
}
} else {
cmd = "sudo systemctl restart kubelet"
}
Logf("Restarting kubelet via ssh on host %s with command %s", host, cmd)
result, err := SSH(cmd, host, TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart kubelet: %v", err)
}
return nil
}
func WaitForKubeletUp(host string) error {
cmd := "curl http://localhost:" + strconv.Itoa(ports.KubeletReadOnlyPort) + "/healthz"
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
result, err := SSH(cmd, host, TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
}
if result.Stdout == "ok" {
return nil
}
}
return fmt.Errorf("waiting for kubelet timed out")
}
func RestartApiserver(c discovery.ServerVersionInterface) error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
if ProviderIs("gce", "aws") {
return sshRestartMaster()
}
// GKE doesn't allow ssh access, so use a same-version master
// upgrade to teardown/recreate master.
v, err := c.ServerVersion()
if err != nil {
return err
}
return masterUpgradeGKE(v.GitVersion[1:]) // strip leading 'v'
}
func sshRestartMaster() error {
if !ProviderIs("gce", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
var command string
if ProviderIs("gce") {
// `kube-apiserver_kube-apiserver` matches the name of the apiserver
// container.
command = "sudo docker ps | grep kube-apiserver_kube-apiserver | cut -d ' ' -f 1 | xargs sudo docker kill"
} else {
command = "sudo /etc/init.d/kube-apiserver restart"
}
Logf("Restarting master via ssh, running: %v", command)
result, err := SSH(command, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart apiserver: %v", err)
}
return nil
}
func WaitForApiserverUp(c clientset.Interface) error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
body, err := c.CoreV1().RESTClient().Get().AbsPath("/healthz").Do().Raw()
if err == nil && string(body) == "ok" {
return nil
}
}
return fmt.Errorf("waiting for apiserver timed out")
}
func RestartControllerManager() error {
// TODO: Make it work for all providers and distros.
if !ProviderIs("gce", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
if ProviderIs("gce") && !MasterOSDistroIs("gci") {
return fmt.Errorf("unsupported master OS distro: %s", TestContext.MasterOSDistro)
}
cmd := "sudo docker ps | grep k8s_kube-controller-manager | cut -d ' ' -f 1 | xargs sudo docker kill"
Logf("Restarting controller-manager via ssh, running: %v", cmd)
result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart controller-manager: %v", err)
}
return nil
}
func WaitForControllerManagerUp() error {
cmd := "curl http://localhost:" + strconv.Itoa(ports.ControllerManagerPort) + "/healthz"
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
}
if result.Stdout == "ok" {
return nil
}
}
return fmt.Errorf("waiting for controller-manager timed out")
}
// CheckForControllerManagerHealthy checks that the controller manager does not crash within "duration"
func CheckForControllerManagerHealthy(duration time.Duration) error {
var PID string
cmd := "sudo docker ps | grep k8s_kube-controller-manager | cut -d ' ' -f 1"
for start := time.Now(); time.Since(start) < duration; time.Sleep(5 * time.Second) {
result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
if err != nil {
// We don't necessarily know that it crashed, pipe could just be broken
LogSSHResult(result)
return fmt.Errorf("master unreachable after %v", time.Since(start))
} else if result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("SSH result code not 0. actually: %v after %v", result.Code, time.Since(start))
} else if result.Stdout != PID {
if PID == "" {
PID = result.Stdout
} else {
//its dead
return fmt.Errorf("controller manager crashed, old PID: %s, new PID: %s", PID, result.Stdout)
}
} else {
Logf("kube-controller-manager still healthy after %v", time.Since(start))
}
}
return nil
}
// Returns number of ready Nodes excluding Master Node.
func NumberOfReadyNodes(c clientset.Interface) (int, error) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
Logf("Failed to list nodes: %v", err)
return 0, err
}
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
return len(nodes.Items), nil
}
// WaitForReadyNodes waits until the cluster has desired size and there is no not-ready nodes in it.
// By cluster size we mean number of Nodes excluding Master Node.
func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired number of ready nodes %d", size)
return nil
}
Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size)
}
func GenerateMasterRegexp(prefix string) string {
return prefix + "(-...)?"
}
// waitForMasters waits until the cluster has the desired number of ready masters in it.
func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
// Filter out nodes that are not master replicas
FilterNodes(nodes, func(node v1.Node) bool {
res, err := regexp.Match(GenerateMasterRegexp(masterPrefix), ([]byte)(node.Name))
if err != nil {
Logf("Failed to match regexp to node name: %v", err)
return false
}
return res
})
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired number of masters %d", size)
return nil
}
Logf("Waiting for the number of masters %d, current %d, not ready master nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for the number of masters to be %d", timeout, size)
}
// GetHostExternalAddress gets the node for a pod and returns the first External
// address. Returns an error if the node the pod is on doesn't have an External
// address.
func GetHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddress string, err error) {
node, err := client.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return "", err
}
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
externalAddress = address.Address
break
}
}
}
if externalAddress == "" {
err = fmt.Errorf("No external address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
return
}
type extractRT struct {
http.Header
}
func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) {
rt.Header = req.Header
return &http.Response{}, nil
}
// headersForConfig extracts any http client logic necessary for the provided
// config.
func headersForConfig(c *restclient.Config) (http.Header, error) {
extract := &extractRT{}
rt, err := restclient.HTTPWrappersForConfig(c, extract)
if err != nil {
return nil, err
}
if _, err := rt.RoundTrip(&http.Request{}); err != nil {
return nil, err
}
return extract.Header, nil
}
// OpenWebSocketForURL constructs a websocket connection to the provided URL, using the client
// config, with the specified protocols.
func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) {
tlsConfig, err := restclient.TLSConfigFor(config)
if err != nil {
return nil, fmt.Errorf("failed to create tls config: %v", err)
}
if tlsConfig != nil {
url.Scheme = "wss"
if !strings.Contains(url.Host, ":") {
url.Host += ":443"
}
} else {
url.Scheme = "ws"
if !strings.Contains(url.Host, ":") {
url.Host += ":80"
}
}
headers, err := headersForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to load http headers: %v", err)
}
cfg, err := websocket.NewConfig(url.String(), "http://localhost")
if err != nil {
return nil, fmt.Errorf("failed to create websocket config: %v", err)
}
cfg.Header = headers
cfg.TlsConfig = tlsConfig
cfg.Protocol = protocols
return websocket.DialConfig(cfg)
}
// getIngressAddress returns the ips/hostnames associated with the Ingress.
func getIngressAddress(client clientset.Interface, ns, name string) ([]string, error) {
ing, err := client.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
addresses := []string{}
for _, a := range ing.Status.LoadBalancer.Ingress {
if a.IP != "" {
addresses = append(addresses, a.IP)
}
if a.Hostname != "" {
addresses = append(addresses, a.Hostname)
}
}
return addresses, nil
}
// WaitForIngressAddress waits for the Ingress to acquire an address.
func WaitForIngressAddress(c clientset.Interface, ns, ingName string, timeout time.Duration) (string, error) {
var address string
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
ipOrNameList, err := getIngressAddress(c, ns, ingName)
if err != nil || len(ipOrNameList) == 0 {
Logf("Waiting for Ingress %v to acquire IP, error %v", ingName, err)
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
address = ipOrNameList[0]
return true, nil
})
return address, err
}
// Looks for the given string in the log of a specific pod container
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("logs", podName, container, fmt.Sprintf("--namespace=%v", ns))
})
}
// Looks for the given string in a file in a specific pod container
func LookForStringInFile(ns, podName, container, file, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("exec", podName, "-c", container, fmt.Sprintf("--namespace=%v", ns), "--", "cat", file)
})
}
// Looks for the given string in the output of a command executed in a specific pod container
func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
// use the first container
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"}
args = append(args, command...)
return RunKubectlOrDie(args...)
})
}
// Looks for the given string in the output of fn, repeatedly calling fn until
// the timeout is reached or the string is found. Returns last log and possibly
// error if the string was not found.
func LookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) {
result = fn()
if strings.Contains(result, expectedString) {
return
}
}
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
return
}
// getSvcNodePort returns the node port for the given service:port.
func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) {
svc, err := client.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
if err != nil {
return 0, err
}
for _, p := range svc.Spec.Ports {
if p.Port == int32(svcPort) {
if p.NodePort != 0 {
return int(p.NodePort), nil
}
}
}
return 0, fmt.Errorf(
"No node port found for service %v, port %v", name, svcPort)
}
// GetNodePortURL returns the url to a nodeport Service.
func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (string, error) {
nodePort, err := getSvcNodePort(client, ns, name, svcPort)
if err != nil {
return "", err
}
// This list of nodes must not include the master, which is marked
// unschedulable, since the master doesn't run kube-proxy. Without
// kube-proxy NodePorts won't work.
var nodes *v1.NodeList
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return true, nil
}) != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", fmt.Errorf("Unable to list nodes in cluster.")
}
for _, node := range nodes.Items {
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil
}
}
}
}
return "", fmt.Errorf("Failed to find external address for service %v", name)
}
// TODO(random-liu): Change this to be a member function of the framework.
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false)
}
func getPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true)
}
// utility function for gomega Eventually
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) {
logs, err := c.CoreV1().RESTClient().Get().
Resource("pods").
Namespace(namespace).
Name(podName).SubResource("log").
Param("container", containerName).
Param("previous", strconv.FormatBool(previous)).
Do().
Raw()
if err != nil {
return "", err
}
if err == nil && strings.Contains(string(logs), "Internal Error") {
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs))
}
return string(logs), err
}
func GetGCECloud() (*gcecloud.GCECloud, error) {
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
if !ok {
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
}
return gceCloud, nil
}
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
// are actually cleaned up. Currently only implemented for GCE/GKE.
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
return ensureGCELoadBalancerResourcesDeleted(ip, portRange)
}
return nil
}
func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error {
gceCloud, err := GetGCECloud()
if err != nil {
return err
}
project := TestContext.CloudConfig.ProjectID
region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone)
if err != nil {
return fmt.Errorf("could not get region for zone %q: %v", TestContext.CloudConfig.Zone, err)
}
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
service := gceCloud.GetComputeService()
list, err := service.ForwardingRules.List(project, region).Do()
if err != nil {
return false, err
}
for _, item := range list.Items {
if item.PortRange == portRange && item.IPAddress == ip {
Logf("found a load balancer: %v", item)
return false, nil
}
}
return true, nil
})
}
// The following helper functions can block/unblock network from source
// host to destination host by manipulating iptable rules.
// This function assumes it can ssh to the source host.
//
// Caution:
// Recommend to input IP instead of hostnames. Using hostnames will cause iptables to
// do a DNS lookup to resolve the name to an IP address, which will
// slow down the test and cause it to fail if DNS is absent or broken.
//
// Suggested usage pattern:
// func foo() {
// ...
// defer UnblockNetwork(from, to)
// BlockNetwork(from, to)
// ...
// }
//
func BlockNetwork(from string, to string) {
Logf("block network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule)
if result, err := SSH(dropCmd, from, TestContext.Provider); result.Code != 0 || err != nil {
LogSSHResult(result)
Failf("Unexpected error: %v", err)
}
}
func UnblockNetwork(from string, to string) {
Logf("Unblock network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
undropCmd := fmt.Sprintf("sudo iptables --delete %s", iptablesRule)
// Undrop command may fail if the rule has never been created.
// In such case we just lose 30 seconds, but the cluster is healthy.
// But if the rule had been created and removing it failed, the node is broken and
// not coming back. Subsequent tests will run or fewer nodes (some of the tests
// may fail). Manual intervention is required in such case (recreating the
// cluster solves the problem too).
err := wait.Poll(time.Millisecond*100, time.Second*30, func() (bool, error) {
result, err := SSH(undropCmd, from, TestContext.Provider)
if result.Code == 0 && err == nil {
return true, nil
}
LogSSHResult(result)
if err != nil {
Logf("Unexpected error: %v", err)
}
return false, nil
})
if err != nil {
Failf("Failed to remove the iptable REJECT rule. Manual intervention is "+
"required on host %s: remove rule %s, if exists", from, iptablesRule)
}
}
func isElementOf(podUID types.UID, pods *v1.PodList) bool {
for _, pod := range pods.Items {
if pod.UID == podUID {
return true
}
}
return false
}
// timeout for proxy requests.
const proxyTimeout = 2 * time.Minute
// NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) {
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
// This will leak a goroutine if proxy hangs. #22165
var result restclient.Result
finished := make(chan struct{})
go func() {
result = c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
finished <- struct{}{}
}()
select {
case <-finished:
return result, nil
case <-time.After(proxyTimeout):
return restclient.Result{}, nil
}
}
// GetKubeletPods retrieves the list of pods on the kubelet
func GetKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
return getKubeletPods(c, node, "pods")
}
// GetKubeletRunningPods retrieves the list of running pods on the kubelet. The pods
// includes necessary information (e.g., UID, name, namespace for
// pods/containers), but do not contain the full spec.
func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, error) {
return getKubeletPods(c, node, "runningpods")
}
func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) {
result := &v1.PodList{}
client, err := NodeProxyRequest(c, node, resource)
if err != nil {
return &v1.PodList{}, err
}
if err = client.Into(result); err != nil {
return &v1.PodList{}, err
}
return result, nil
}
// LaunchWebserverPod launches a pod serving http on port 8080 to act
// as the target for networking connectivity checks. The ip address
// of the created pod will be returned if the pod is launched
// successfully.
func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) {
containerName := fmt.Sprintf("%s-container", podName)
port := 8080
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: imageutils.GetE2EImage(imageutils.Porter),
Env: []v1.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
},
},
NodeName: nodeName,
RestartPolicy: v1.RestartPolicyNever,
},
}
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
ExpectNoError(err)
ExpectNoError(f.WaitForPodRunning(podName))
createdPod, err := podClient.Get(podName, metav1.GetOptions{})
ExpectNoError(err)
ip = net.JoinHostPort(createdPod.Status.PodIP, strconv.Itoa(port))
Logf("Target pod IP:port is %s", ip)
return
}
type PingCommand string
const (
IPv4PingCommand PingCommand = "ping"
IPv6PingCommand PingCommand = "ping6"
)
// CheckConnectivityToHost launches a pod to test connectivity to the specified
// host. An error will be returned if the host is not reachable from the pod.
//
// An empty nodeName will use the schedule to choose where the pod is executed.
func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, pingCmd PingCommand, timeout int) error {
contName := fmt.Sprintf("%s-container", podName)
command := []string{
string(pingCmd),
"-c", "3", // send 3 pings
"-W", "2", // wait at most 2 seconds for a reply
"-w", strconv.Itoa(timeout),
host,
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: contName,
Image: BusyBoxImage,
Command: command,
},
},
NodeName: nodeName,
RestartPolicy: v1.RestartPolicyNever,
},
}
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
if err != nil {
return err
}
err = WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name)
if err != nil {
logs, logErr := GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, contName)
if logErr != nil {
Logf("Warning: Failed to get logs from pod %q: %v", pod.Name, logErr)
} else {
Logf("pod %s/%s logs:\n%s", f.Namespace.Name, pod.Name, logs)
}
}
return err
}
// CoreDump SSHs to the master and all nodes and dumps their logs into dir.
// It shells out to cluster/log-dump/log-dump.sh to accomplish this.
func CoreDump(dir string) {
if TestContext.DisableLogDump {
Logf("Skipping dumping logs from cluster")
return
}
var cmd *exec.Cmd
if TestContext.LogexporterGCSPath != "" {
Logf("Dumping logs from nodes to GCS directly at path: %s", TestContext.LogexporterGCSPath)
cmd = exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump", "log-dump.sh"), dir, TestContext.LogexporterGCSPath)
} else {
Logf("Dumping logs locally to: %s", dir)
cmd = exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump", "log-dump.sh"), dir)
}
cmd.Env = append(os.Environ(), fmt.Sprintf("LOG_DUMP_SYSTEMD_SERVICES=%s", parseSystemdServices(TestContext.SystemdServices)))
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
Logf("Error running cluster/log-dump/log-dump.sh: %v", err)
}
}
// parseSystemdServices converts services separator from comma to space.
func parseSystemdServices(services string) string {
return strings.TrimSpace(strings.Replace(services, ",", " ", -1))
}
func UpdatePodWithRetries(client clientset.Interface, ns, name string, update func(*v1.Pod)) (*v1.Pod, error) {
for i := 0; i < 3; i++ {
pod, err := client.CoreV1().Pods(ns).Get(name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("Failed to get pod %q: %v", name, err)
}
update(pod)
pod, err = client.CoreV1().Pods(ns).Update(pod)
if err == nil {
return pod, nil
}
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
return nil, fmt.Errorf("Failed to update pod %q: %v", name, err)
}
}
return nil, fmt.Errorf("Too many retries updating Pod %q", name)
}
func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) {
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
return []*v1.Pod{}, err
}
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
filtered := []*v1.Pod{}
for _, p := range pods.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) {
continue
}
filtered = append(filtered, &p)
}
return filtered, nil
}
// RunCmd runs cmd using args and returns its stdout and stderr. It also outputs
// cmd's stdout and stderr to their respective OS streams.
func RunCmd(command string, args ...string) (string, string, error) {
return RunCmdEnv(nil, command, args...)
}
// RunCmdEnv runs cmd with the provided environment and args and
// returns its stdout and stderr. It also outputs cmd's stdout and
// stderr to their respective OS streams.
func RunCmdEnv(env []string, command string, args ...string) (string, string, error) {
Logf("Running %s %v", command, args)
var bout, berr bytes.Buffer
cmd := exec.Command(command, args...)
// We also output to the OS stdout/stderr to aid in debugging in case cmd
// hangs and never returns before the test gets killed.
//
// This creates some ugly output because gcloud doesn't always provide
// newlines.
cmd.Stdout = io.MultiWriter(os.Stdout, &bout)
cmd.Stderr = io.MultiWriter(os.Stderr, &berr)
cmd.Env = env
err := cmd.Run()
stdout, stderr := bout.String(), berr.String()
if err != nil {
return "", "", fmt.Errorf("error running %s %v; got error %v, stdout %q, stderr %q",
command, args, err, stdout, stderr)
}
return stdout, stderr, nil
}
// retryCmd runs cmd using args and retries it for up to SingleCallTimeout if
// it returns an error. It returns stdout and stderr.
func retryCmd(command string, args ...string) (string, string, error) {
var err error
stdout, stderr := "", ""
wait.Poll(Poll, SingleCallTimeout, func() (bool, error) {
stdout, stderr, err = RunCmd(command, args...)
if err != nil {
Logf("Got %v", err)
return false, nil
}
return true, nil
})
return stdout, stderr, err
}
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
for _, pod := range pods.Items {
if !masterNodes.Has(pod.Spec.NodeName) {
if pod.Spec.NodeName != "" {
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(v1.ConditionTrue))
scheduledPods = append(scheduledPods, pod)
} else {
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(v1.ConditionFalse))
if scheduledCondition.Reason == "Unschedulable" {
notScheduledPods = append(notScheduledPods, pod)
}
}
}
}
return
}
// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
timeout := 10 * time.Minute
startTime := time.Now()
allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
ExpectNoError(err)
// API server returns also Pods that succeeded. We need to filter them out.
currentPods := make([]v1.Pod, 0, len(allPods.Items))
for _, pod := range allPods.Items {
if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
currentPods = append(currentPods, pod)
}
}
allPods.Items = currentPods
scheduledPods, currentlyNotScheduledPods := GetPodsScheduled(masterNodes, allPods)
for len(currentlyNotScheduledPods) != 0 {
time.Sleep(2 * time.Second)
allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
ExpectNoError(err)
scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(masterNodes, allPods)
if startTime.Add(timeout).Before(time.Now()) {
Failf("Timed out after %v waiting for stable cluster.", timeout)
break
}
}
return len(scheduledPods)
}
// GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes
func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) {
nodes := &v1.NodeList{}
masters := sets.NewString()
all, _ := c.CoreV1().Nodes().List(metav1.ListOptions{})
for _, n := range all.Items {
if system.IsMasterNode(n.Name) {
masters.Insert(n.Name)
} else if isNodeSchedulable(&n) && isNodeUntainted(&n) {
nodes.Items = append(nodes.Items, n)
}
}
return masters, nodes
}
func ListNamespaceEvents(c clientset.Interface, ns string) error {
ls, err := c.CoreV1().Events(ns).List(metav1.ListOptions{})
if err != nil {
return err
}
for _, event := range ls.Items {
glog.Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message)
}
return nil
}
// E2ETestNodePreparer implements testutil.TestNodePreparer interface, which is used
// to create/modify Nodes before running a test.
type E2ETestNodePreparer struct {
client clientset.Interface
// Specifies how many nodes should be modified using the given strategy.
// Only one strategy can be applied to a single Node, so there needs to
// be at least <sum_of_keys> Nodes in the cluster.
countToStrategy []testutil.CountToStrategy
nodeToAppliedStrategy map[string]testutil.PrepareNodeStrategy
}
func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutil.CountToStrategy) testutil.TestNodePreparer {
return &E2ETestNodePreparer{
client: client,
countToStrategy: countToStrategy,
nodeToAppliedStrategy: make(map[string]testutil.PrepareNodeStrategy),
}
}
func (p *E2ETestNodePreparer) PrepareNodes() error {
nodes := GetReadySchedulableNodesOrDie(p.client)
numTemplates := 0
for k := range p.countToStrategy {
numTemplates += k
}
if numTemplates > len(nodes.Items) {
return fmt.Errorf("Can't prepare Nodes. Got more templates than existing Nodes.")
}
index := 0
sum := 0
for _, v := range p.countToStrategy {
sum += v.Count
for ; index < sum; index++ {
if err := testutil.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {
glog.Errorf("Aborting node preparation: %v", err)
return err
}
p.nodeToAppliedStrategy[nodes.Items[index].Name] = v.Strategy
}
}
return nil
}
func (p *E2ETestNodePreparer) CleanupNodes() error {
var encounteredError error
nodes := GetReadySchedulableNodesOrDie(p.client)
for i := range nodes.Items {
var err error
name := nodes.Items[i].Name
strategy, found := p.nodeToAppliedStrategy[name]
if found {
if err = testutil.DoCleanupNode(p.client, name, strategy); err != nil {
glog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err)
encounteredError = err
}
}
}
return encounteredError
}
// CleanupGCEResources cleans up GCE Service Type=LoadBalancer resources with
// the given name. The name is usually the UUID of the Service prefixed with an
// alpha-numeric character ('a') to work around cloudprovider rules.
func CleanupGCEResources(c clientset.Interface, loadBalancerName, region, zone string) (retErr error) {
gceCloud, err := GetGCECloud()
if err != nil {
return err
}
if region == "" {
// Attempt to parse region from zone if no region is given.
region, err = gcecloud.GetGCERegion(zone)
if err != nil {
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
}
}
if err := gceCloud.DeleteFirewall(gcecloud.MakeFirewallName(loadBalancerName)); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = err
}
if err := gceCloud.DeleteRegionForwardingRule(loadBalancerName, region); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, err)
}
if err := gceCloud.DeleteRegionAddress(loadBalancerName, region); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, err)
}
clusterID, err := GetClusterID(c)
if err != nil {
retErr = fmt.Errorf("%v\n%v", retErr, err)
return
}
hcNames := []string{gcecloud.MakeNodesHealthCheckName(clusterID)}
hc, getErr := gceCloud.GetHttpHealthCheck(loadBalancerName)
if getErr != nil && !IsGoogleAPIHTTPErrorCode(getErr, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, getErr)
return
}
if hc != nil {
hcNames = append(hcNames, hc.Name)
}
if err := gceCloud.DeleteExternalTargetPoolAndChecks(&v1.Service{}, loadBalancerName, region, clusterID, hcNames...); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, err)
}
return
}
// IsHTTPErrorCode returns true if the error is a google api
// error matching the corresponding HTTP error code.
func IsGoogleAPIHTTPErrorCode(err error, code int) bool {
apiErr, ok := err.(*googleapi.Error)
return ok && apiErr.Code == code
}
// getMaster populates the externalIP, internalIP and hostname fields of the master.
// If any of these is unavailable, it is set to "".
func getMaster(c clientset.Interface) Address {
master := Address{}
// Populate the internal IP.
eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil {
Failf("Failed to get kubernetes endpoints: %v", err)
}
if len(eps.Subsets) != 1 || len(eps.Subsets[0].Addresses) != 1 {
Failf("There are more than 1 endpoints for kubernetes service: %+v", eps)
}
master.internalIP = eps.Subsets[0].Addresses[0].IP
// Populate the external IP/hostname.
url, err := url.Parse(TestContext.Host)
if err != nil {
Failf("Failed to parse hostname: %v", err)
}
if net.ParseIP(url.Host) != nil {
// TODO: Check that it is external IP (not having a reserved IP address as per RFC1918).
master.externalIP = url.Host
} else {
master.hostname = url.Host
}
return master
}
// GetMasterAddress returns the hostname/external IP/internal IP as appropriate for e2e tests on a particular provider
// which is the address of the interface used for communication with the kubelet.
func GetMasterAddress(c clientset.Interface) string {
master := getMaster(c)
switch TestContext.Provider {
case "gce", "gke":
return master.externalIP
case "aws":
return awsMasterIP
default:
Failf("This test is not supported for provider %s and should be disabled", TestContext.Provider)
}
return ""
}
// GetNodeExternalIP returns node external IP concatenated with port 22 for ssh
// e.g. 1.2.3.4:22
func GetNodeExternalIP(node *v1.Node) string {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP {
host = net.JoinHostPort(a.Address, sshPort)
break
}
}
if host == "" {
Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host
}
// SimpleGET executes a get on the given url, returns error if non-200 returned.
func SimpleGET(c *http.Client, url, host string) (string, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return "", err
}
req.Host = host
res, err := c.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
rawBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
body := string(rawBody)
if res.StatusCode != http.StatusOK {
err = fmt.Errorf(
"GET returned http error %v", res.StatusCode)
}
return body, err
}
// PollURL polls till the url responds with a healthy http code. If
// expectUnreachable is true, it breaks on first non-healthy http code instead.
func PollURL(route, host string, timeout time.Duration, interval time.Duration, httpClient *http.Client, expectUnreachable bool) error {
var lastBody string
pollErr := wait.PollImmediate(interval, timeout, func() (bool, error) {
var err error
lastBody, err = SimpleGET(httpClient, route, host)
if err != nil {
Logf("host %v path %v: %v unreachable", host, route, err)
return expectUnreachable, nil
}
return !expectUnreachable, nil
})
if pollErr != nil {
return fmt.Errorf("Failed to execute a successful GET within %v, Last response body for %v, host %v:\n%v\n\n%v\n",
timeout, route, host, lastBody, pollErr)
}
return nil
}
func DescribeIng(ns string) {
Logf("\nOutput of kubectl describe ing:\n")
desc, _ := RunKubectl(
"describe", "ing", fmt.Sprintf("--namespace=%v", ns))
Logf(desc)
}
// NewTestPod returns a pod that has the specified requests and limits
func (f *Framework) NewTestPod(name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: GetPauseImageName(f.ClientSet),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
},
},
},
}
}
// create empty file at given path on the pod.
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
_, err := RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
return err
}
// GetAzureCloud returns azure cloud provider
func GetAzureCloud() (*azure.Cloud, error) {
cloud, ok := TestContext.CloudConfig.Provider.(*azure.Cloud)
if !ok {
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to Azure: %#v", TestContext.CloudConfig.Provider)
}
return cloud, nil
}
func PrintSummaries(summaries []TestDataSummary, testBaseName string) {
now := time.Now()
for i := range summaries {
Logf("Printing summary: %v", summaries[i].SummaryKind())
switch TestContext.OutputPrintType {
case "hr":
if TestContext.ReportDir == "" {
Logf(summaries[i].PrintHumanReadable())
} else {
// TODO: learn to extract test name and append it to the kind instead of timestamp.
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt")
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintHumanReadable()), 0644); err != nil {
Logf("Failed to write file %v with test performance data: %v", filePath, err)
}
}
case "json":
fallthrough
default:
if TestContext.OutputPrintType != "json" {
Logf("Unknown output type: %v. Printing JSON", TestContext.OutputPrintType)
}
if TestContext.ReportDir == "" {
Logf("%v JSON\n%v", summaries[i].SummaryKind(), summaries[i].PrintJSON())
Logf("Finished")
} else {
// TODO: learn to extract test name and append it to the kind instead of timestamp.
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".json")
Logf("Writing to %s", filePath)
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintJSON()), 0644); err != nil {
Logf("Failed to write file %v with test performance data: %v", filePath, err)
}
}
}
}
}
func DumpDebugInfo(c clientset.Interface, ns string) {
sl, _ := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
for _, s := range sl.Items {
desc, _ := RunKubectl("describe", "po", s.Name, fmt.Sprintf("--namespace=%v", ns))
Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
l, _ := RunKubectl("logs", s.Name, fmt.Sprintf("--namespace=%v", ns), "--tail=100")
Logf("\nLast 100 log lines of %v:\n%v", s.Name, l)
}
}
func IsRetryableAPIError(err error) bool {
return apierrs.IsTimeout(err) || apierrs.IsServerTimeout(err) || apierrs.IsTooManyRequests(err) || apierrs.IsInternalError(err)
}
// DsFromManifest reads a .json/yaml file and returns the daemonset in it.
func DsFromManifest(url string) (*extensions.DaemonSet, error) {
var controller extensions.DaemonSet
Logf("Parsing ds from %v", url)
var response *http.Response
var err error
for i := 1; i <= 5; i++ {
response, err = http.Get(url)
if err == nil && response.StatusCode == 200 {
break
}
time.Sleep(time.Duration(i) * time.Second)
}
if err != nil {
return nil, fmt.Errorf("failed to get url: %v", err)
}
if response.StatusCode != 200 {
return nil, fmt.Errorf("invalid http response status: %v", response.StatusCode)
}
defer response.Body.Close()
data, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, fmt.Errorf("failed to read html response body: %v", err)
}
json, err := utilyaml.ToJSON(data)
if err != nil {
return nil, fmt.Errorf("failed to parse data to json: %v", err)
}
err = runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), json, &controller)
if err != nil {
return nil, fmt.Errorf("failed to decode DaemonSet spec: %v", err)
}
return &controller, nil
}
// waitForServerPreferredNamespacedResources waits until server preferred namespaced resources could be successfully discovered.
// TODO: Fix https://github.com/kubernetes/kubernetes/issues/55768 and remove the following retry.
func waitForServerPreferredNamespacedResources(d discovery.DiscoveryInterface, timeout time.Duration) ([]*metav1.APIResourceList, error) {
Logf("Waiting up to %v for server preferred namespaced resources to be successfully discovered", timeout)
var resources []*metav1.APIResourceList
if err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
var err error
resources, err = d.ServerPreferredNamespacedResources()
if err == nil || isDynamicDiscoveryError(err) {
return true, nil
}
if !discovery.IsGroupDiscoveryFailedError(err) {
return false, err
}
Logf("Error discoverying server preferred namespaced resources: %v, retrying in %v.", err, Poll)
return false, nil
}); err != nil {
return nil, err
}
return resources, nil
}
func GetClusterZones(c clientset.Interface) (sets.String, error) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
}
// collect values of zone label from all nodes
zones := sets.NewString()
for _, node := range nodes.Items {
if zone, found := node.Labels[kubeletapis.LabelZoneFailureDomain]; found {
zones.Insert(zone)
}
}
return zones, nil
}
| [
"\"KUBE_SSH_USER\"",
"\"USER\"",
"\"HOME\"",
"\"AWS_SSH_KEY\"",
"\"VAGRANT_SSH_KEY\"",
"\"LOCAL_SSH_KEY\"",
"\"KUBE_SSH_KEY\""
]
| []
| [
"KUBE_SSH_KEY",
"VAGRANT_SSH_KEY",
"LOCAL_SSH_KEY",
"AWS_SSH_KEY",
"KUBE_SSH_USER",
"USER",
"HOME"
]
| [] | ["KUBE_SSH_KEY", "VAGRANT_SSH_KEY", "LOCAL_SSH_KEY", "AWS_SSH_KEY", "KUBE_SSH_USER", "USER", "HOME"] | go | 7 | 0 | |
manager/controllers/app/fybrikapplication_controller.go | // Copyright 2020 IBM Corp.
// SPDX-License-Identifier: Apache-2.0
package app
import (
"context"
"encoding/json"
"fmt"
"os"
"strings"
"time"
"emperror.dev/errors"
"github.com/rs/zerolog"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/validation/field"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"fybrik.io/fybrik/pkg/vault"
api "fybrik.io/fybrik/manager/apis/app/v1alpha1"
"fybrik.io/fybrik/manager/controllers"
"fybrik.io/fybrik/manager/controllers/utils"
"fybrik.io/fybrik/pkg/adminconfig"
dcclient "fybrik.io/fybrik/pkg/connectors/datacatalog/clients"
pmclient "fybrik.io/fybrik/pkg/connectors/policymanager/clients"
"fybrik.io/fybrik/pkg/environment"
"fybrik.io/fybrik/pkg/infrastructure"
"fybrik.io/fybrik/pkg/logging"
"fybrik.io/fybrik/pkg/model/datacatalog"
"fybrik.io/fybrik/pkg/model/policymanager"
"fybrik.io/fybrik/pkg/model/taxonomy"
"fybrik.io/fybrik/pkg/multicluster"
local "fybrik.io/fybrik/pkg/multicluster/local"
"fybrik.io/fybrik/pkg/storage"
"fybrik.io/fybrik/pkg/taxonomy/validate"
)
// FybrikApplicationReconciler reconciles a FybrikApplication object
type FybrikApplicationReconciler struct {
client.Client
Name string
Log zerolog.Logger
Scheme *runtime.Scheme
PolicyManager pmclient.PolicyManager
DataCatalog dcclient.DataCatalog
ResourceInterface ContextInterface
ClusterManager multicluster.ClusterLister
Provision storage.ProvisionInterface
ConfigEvaluator adminconfig.EvaluatorInterface
Infrastructure *infrastructure.AttributeManager
}
type ApplicationContext struct {
Log *zerolog.Logger
Application *api.FybrikApplication
UUID string
}
const (
ApplicationTaxonomy = "/tmp/taxonomy/fybrik_application.json"
DataCatalogTaxonomy = "/tmp/taxonomy/datacatalog.json#/definitions/GetAssetResponse"
FybrikApplicationKind = "FybrikApplication"
Interval = 10
)
// Reconcile reconciles FybrikApplication CRD
// It receives FybrikApplication CRD and selects the appropriate modules that will run
// The outcome is a Plotter containing multiple Blueprints that run on different clusters
//nolint:gocyclo
func (r *FybrikApplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
sublog := r.Log.With().Str(FybrikApplicationKind, req.NamespacedName.String()).Logger()
sublog.Trace().Msg("*** FybrikApplication Reconcile ***")
// obtain FybrikApplication resource
application := &api.FybrikApplication{}
if err := r.Get(ctx, req.NamespacedName, application); err != nil {
sublog.Warn().Msg("The reconciled object was not found")
return ctrl.Result{}, client.IgnoreNotFound(err)
}
uuid := utils.GetFybrikApplicationUUID(application)
log := sublog.With().Str(utils.FybrikAppUUID, uuid).Logger()
// Log the fybrikapplication
logging.LogStructure(FybrikApplicationKind, application, &log, zerolog.TraceLevel, true, true)
applicationContext := ApplicationContext{Log: &log, Application: application, UUID: uuid}
if err := r.reconcileFinalizers(ctx, applicationContext); err != nil {
log.Error().Err(err).Msg("Could not reconcile finalizers.")
return ctrl.Result{}, err
}
// If the object has a scheduled deletion time, update status and return
if !application.DeletionTimestamp.IsZero() {
// The object is being deleted
return ctrl.Result{}, nil
}
observedStatus := application.Status.DeepCopy()
appVersion := application.GetGeneration()
// check if webhooks are enabled and application has been validated before
// or if validated application is outdated
if os.Getenv("ENABLE_WEBHOOKS") != "true" &&
(string(application.Status.ValidApplication) == "" || observedStatus.ValidatedGeneration != appVersion) {
// do validation on applicationContext
err := application.ValidateFybrikApplication(ApplicationTaxonomy)
log.Debug().Msg("Reconciler validating Fybrik application")
application.Status.ValidatedGeneration = appVersion
// if validation fails
if err != nil {
// set error message
log.Error().Err(err).Bool(logging.FORUSER, true).Bool(logging.AUDIT, true).Msg("FybrikApplication valdiation failed")
application.Status.ErrorMessage = err.Error()
application.Status.ValidApplication = v1.ConditionFalse
if err := utils.UpdateStatus(ctx, r.Client, application, observedStatus); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
application.Status.ValidApplication = v1.ConditionTrue
}
if application.Status.ValidApplication == v1.ConditionFalse {
return ctrl.Result{}, nil
}
// check if reconcile is required
// reconcile is required if the spec has been changed, or the previous reconcile has failed to allocate a Plotter resource
generationComplete := r.ResourceInterface.ResourceExists(observedStatus.Generated) && (observedStatus.Generated.AppVersion == appVersion)
if (!generationComplete) || (observedStatus.ObservedGeneration != appVersion) {
if result, err := r.reconcile(applicationContext); err != nil {
// another attempt will be done
// users should be informed in case of errors
// ignore an update error, a new reconcile will be made in any case
_ = utils.UpdateStatus(ctx, r.Client, application, observedStatus)
return result, err
}
application.Status.ObservedGeneration = appVersion
} else {
resourceStatus, err := r.ResourceInterface.GetResourceStatus(application.Status.Generated)
if err != nil {
return ctrl.Result{}, err
}
r.checkReadiness(applicationContext, resourceStatus)
}
application.Status.Ready = isReady(application)
// Update CRD status in case of change (other than deletion, which was handled separately)
if application.DeletionTimestamp.IsZero() {
log.Trace().Str(logging.ACTION, logging.UPDATE).Msg("Updating status for desired generation " + fmt.Sprint(application.GetGeneration()))
if err := utils.UpdateStatus(ctx, r.Client, application, observedStatus); err != nil {
return ctrl.Result{}, err
}
}
errorMsg := getErrorMessages(application)
if errorMsg != "" {
log.Warn().Str(logging.ACTION, logging.UPDATE).Msg("Reconcile failed with errors")
}
// trigger a new reconcile if required (the fybrikapplication is not ready)
if !isReady(application) {
return ctrl.Result{RequeueAfter: Interval * time.Second}, nil
}
return ctrl.Result{}, nil
}
func getBucketResourceRef(name string) *types.NamespacedName {
return &types.NamespacedName{Name: name, Namespace: utils.GetSystemNamespace()}
}
func (r *FybrikApplicationReconciler) checkReadiness(applicationContext ApplicationContext, status api.ObservedState) {
if applicationContext.Application.Status.AssetStates == nil {
initStatus(applicationContext.Application)
}
// TODO(shlomitk1): receive status per asset and update accordingly
// Temporary fix: all assets that are not in Deny state are updated based on the received status
for _, dataCtx := range applicationContext.Application.Spec.Data {
assetID := dataCtx.DataSetID
if applicationContext.Application.Status.AssetStates[assetID].Conditions[DenyConditionIndex].Status == v1.ConditionTrue {
// should not appear in the plotter status
continue
}
if status.Error != "" {
setErrorCondition(applicationContext, assetID, status.Error)
continue
}
if !status.Ready {
continue
}
// register assets if necessary if the ready state has been received
if dataCtx.Requirements.FlowParams.Catalog != "" {
if applicationContext.Application.Status.AssetStates[assetID].CatalogedAsset != "" {
// the asset has been already cataloged
continue
}
// mark the bucket as persistent and register the asset
provisionedBucketRef, found := applicationContext.Application.Status.ProvisionedStorage[assetID]
if !found {
message := "No copy has been created for the asset " + assetID + " required to be registered"
setErrorCondition(applicationContext, assetID, message)
continue
}
if err := r.Provision.SetPersistent(getBucketResourceRef(provisionedBucketRef.DatasetRef), true); err != nil {
setErrorCondition(applicationContext, assetID, err.Error())
continue
}
// register the asset: experimental feature
if newAssetID, err := r.RegisterAsset(dataCtx.Requirements.FlowParams.Catalog, &provisionedBucketRef,
applicationContext.Application); err == nil {
state := applicationContext.Application.Status.AssetStates[assetID]
state.CatalogedAsset = newAssetID
applicationContext.Application.Status.AssetStates[assetID] = state
} else {
// log an error and make a new attempt to register the asset
setErrorCondition(applicationContext, assetID, err.Error())
continue
}
}
setReadyCondition(applicationContext, assetID)
}
}
// reconcileFinalizers reconciles finalizers for FybrikApplication
func (r *FybrikApplicationReconciler) reconcileFinalizers(ctx context.Context, applicationContext ApplicationContext) error {
// finalizer
finalizerName := r.Name + ".finalizer"
hasFinalizer := ctrlutil.ContainsFinalizer(applicationContext.Application, finalizerName)
// If the object has a scheduled deletion time, delete it and all resources it has created
if !applicationContext.Application.DeletionTimestamp.IsZero() || (len(applicationContext.Application.Spec.Data) == 0) {
// The object is being deleted, or no datasets are defined
if hasFinalizer { // Finalizer was created when the object was created
// the finalizer is present - delete the allocated resources
if err := r.deleteExternalResources(applicationContext); err != nil {
return err
}
// remove the finalizer from the list and update it, because it needs to be deleted together with the object
ctrlutil.RemoveFinalizer(applicationContext.Application, finalizerName)
if err := utils.UpdateFinalizers(ctx, r.Client, applicationContext.Application); err != nil {
return err
}
}
return nil
}
// Make sure this CRD instance has a finalizer
if !hasFinalizer {
ctrlutil.AddFinalizer(applicationContext.Application, finalizerName)
if err := utils.UpdateFinalizers(ctx, r.Client, applicationContext.Application); err != nil {
return err
}
}
return nil
}
func (r *FybrikApplicationReconciler) deleteExternalResources(applicationContext ApplicationContext) error {
// clear provisioned storage
// References to buckets (Dataset resources) are deleted. Buckets that are persistent will not be removed upon Dataset deletion.
var deletedKeys []string
var errMsgs []string
for datasetID, datasetDetails := range applicationContext.Application.Status.ProvisionedStorage {
if err := r.Provision.DeleteDataset(getBucketResourceRef(datasetDetails.DatasetRef)); err != nil {
errMsgs = append(errMsgs, err.Error())
} else {
deletedKeys = append(deletedKeys, datasetID)
}
}
for _, datasetID := range deletedKeys {
delete(applicationContext.Application.Status.ProvisionedStorage, datasetID)
}
if len(errMsgs) != 0 {
return errors.New(strings.Join(errMsgs, ";"))
}
// delete the generated resource
if applicationContext.Application.Status.Generated == nil {
return nil
}
applicationContext.Log.Trace().Str(logging.ACTION, logging.DELETE).
Msgf("Reconcile: FybrikApplication is deleting the generated %s", applicationContext.Application.Status.Generated.Kind)
if err := r.ResourceInterface.DeleteResource(applicationContext.Application.Status.Generated); err != nil {
return err
}
applicationContext.Application.Status.Generated = nil
return nil
}
// setVirtualEndpoints populates the endpoints in the status of the fybrikapplication
func setVirtualEndpoints(application *api.FybrikApplication, flows []api.Flow) {
endpointMap := make(map[string]taxonomy.Connection)
for _, flow := range flows {
// sanity check
if len(flow.SubFlows) == 0 {
continue
}
subflow := flow.SubFlows[len(flow.SubFlows)-1]
for _, sequentialSteps := range subflow.Steps {
// Check the last step in the sequential flow (this will expose the api)
lastStep := sequentialSteps[len(sequentialSteps)-1]
if lastStep.Parameters.API != nil {
endpointMap[flow.AssetID] = lastStep.Parameters.API.Connection
}
}
}
// populate endpoints in application status
for _, asset := range application.Spec.Data {
state := application.Status.AssetStates[asset.DataSetID]
state.Endpoint = endpointMap[asset.DataSetID]
application.Status.AssetStates[asset.DataSetID] = state
}
}
// reconcile receives either FybrikApplication CRD
// or a status update from the generated resource
func (r *FybrikApplicationReconciler) reconcile(applicationContext ApplicationContext) (ctrl.Result, error) {
// Log the request received - i.e. the fybrikapplication.spec
applicationContext.Log.Trace().Msg("*** reconcile ***")
// Data User created or updated the FybrikApplication
// clear status
initStatus(applicationContext.Application)
if applicationContext.Application.Status.ProvisionedStorage == nil {
applicationContext.Application.Status.ProvisionedStorage = make(map[string]api.DatasetDetails)
}
if len(applicationContext.Application.Spec.Data) == 0 {
applicationContext.Log.Info().Msg("No plotter will be generated since no datasets are specified")
return ctrl.Result{}, nil
}
// create a list of requirements for creating a data flow (actions, interface to app, data format) per a single data set
// workload cluster is common for all datasets in the given application
workloadCluster, err := r.GetWorkloadCluster(applicationContext)
if err != nil {
// fatal
applicationContext.Log.Info().Err(err).Bool(logging.FORUSER, true).Bool(logging.AUDIT, true).
Str(logging.ACTION, logging.CREATE).Msg("Could not determine in which cluster the workload runs")
return ctrl.Result{}, err
}
env, err := r.Environment()
if err != nil {
return ctrl.Result{}, err
}
var requirements []DataInfo
for _, dataset := range applicationContext.Application.Spec.Data {
req := DataInfo{
Context: dataset.DeepCopy(),
DataDetails: &datacatalog.GetAssetResponse{},
StorageRequirements: make(map[taxonomy.ProcessingLocation][]taxonomy.Action),
}
if err = r.constructDataInfo(&req, applicationContext, workloadCluster, env); err != nil {
AnalyzeError(applicationContext, req.Context.DataSetID, err)
continue
}
requirements = append(requirements, req)
}
// check if can proceed
if len(requirements) == 0 {
return ctrl.Result{}, nil
}
provisionedStorage, plotterSpec, err := r.buildSolution(applicationContext, env, requirements)
if err != nil {
applicationContext.Log.Error().Err(err).Bool(logging.FORUSER, true).Bool(logging.AUDIT, true).Msg("Plotter construction failed")
}
// check if can proceed
if err != nil || getErrorMessages(applicationContext.Application) != "" {
return ctrl.Result{}, err
}
// clean irrelevant buckets and check that the provisioned storage is ready
storageReady, allocationErr := r.updateProvisionedStorageStatus(applicationContext, provisionedStorage)
if !storageReady {
return ctrl.Result{RequeueAfter: 2 * time.Second}, allocationErr
}
setVirtualEndpoints(applicationContext.Application, plotterSpec.Flows)
ownerRef := &api.ResourceReference{Name: applicationContext.Application.Name, Namespace: applicationContext.Application.Namespace,
AppVersion: applicationContext.Application.GetGeneration()}
resourceRef := r.ResourceInterface.CreateResourceReference(ownerRef)
if err := r.ResourceInterface.CreateOrUpdateResource(ownerRef, resourceRef, plotterSpec,
applicationContext.Application.Labels, applicationContext.UUID); err != nil {
applicationContext.Log.Error().Err(err).Str(logging.ACTION, logging.CREATE).Msgf("Error creating %s", resourceRef.Kind)
if err.Error() == api.InvalidClusterConfiguration {
applicationContext.Application.Status.ErrorMessage = err.Error()
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
applicationContext.Application.Status.Generated = resourceRef
applicationContext.Log.Trace().Str(logging.ACTION, logging.CREATE).Msgf("Created %s successfully!", resourceRef.Kind)
return ctrl.Result{}, nil
}
func (r *FybrikApplicationReconciler) Environment() (*Environment, error) {
// get deployed modules
moduleMap, err := r.GetAllModules()
if err != nil {
r.Log.Error().Err(err).Msg("Error while listing modules")
return nil, err
}
r.Log.Info().Msg("Listing modules")
for m := range moduleMap {
r.Log.Info().Msgf("Module: %s", m)
}
accounts, err := r.getStorageAccounts()
if err != nil {
r.Log.Error().Err(err).Msg("Error while listing storage accounts")
return nil, err
}
// get available clusters
clusters, err := r.ClusterManager.GetClusters()
if err != nil {
return nil, err
}
return &Environment{
Modules: moduleMap,
Clusters: clusters,
StorageAccounts: accounts,
AttributeManager: r.Infrastructure,
}, nil
}
// CreateDataRequest generates a new DataRequest object for a specific asset based on FybrikApplication and asset metadata
func CreateDataRequest(application *api.FybrikApplication, dataCtx *api.DataContext,
assetMetadata *datacatalog.ResourceMetadata) adminconfig.DataRequest {
var flow taxonomy.DataFlow
// If a workload selector is provided but no flow, assume read - for backward compatibility
if (application.Spec.Selector.WorkloadSelector.Size() > 0) && (dataCtx.Flow == "") {
flow = taxonomy.ReadFlow
} else {
flow = dataCtx.Flow
}
return adminconfig.DataRequest{
DatasetID: dataCtx.DataSetID,
Interface: dataCtx.Requirements.Interface,
Usage: flow,
Metadata: assetMetadata,
}
}
func (r *FybrikApplicationReconciler) ValidateAssetResponse(response *datacatalog.GetAssetResponse, taxonomyFile, datasetID string) error {
var allErrs []*field.Error
// Convert GetAssetRequest Go struct to JSON
responseJSON, err := json.Marshal(response)
if err != nil {
return err
}
r.Log.Info().Msg("responseJSON:" + string(responseJSON))
// Validate Fybrik module against taxonomy
allErrs, err = validate.TaxonomyCheck(responseJSON, taxonomyFile)
if err != nil {
return err
}
// Return any error
if len(allErrs) == 0 {
return nil
}
return apierrors.NewInvalid(
schema.GroupKind{Group: "app.fybrik.io", Kind: "DataCatalog-AssetResponse"},
datasetID, allErrs)
}
func (r *FybrikApplicationReconciler) constructDataInfo(req *DataInfo, appContext ApplicationContext,
workloadCluster multicluster.Cluster, env *Environment) error {
// Call the DataCatalog service to get info about the dataset
input := appContext.Application
log := appContext.Log.With().Str(logging.DATASETID, req.Context.DataSetID).Logger()
var err error
if !req.Context.Requirements.FlowParams.IsNewDataSet {
var credentialPath string
if input.Spec.SecretRef != "" {
if !utils.IsVaultEnabled() {
log.Error().Str("SecretRef", input.Spec.SecretRef).Msg("SecretRef defined [%s], but vault is disabled")
} else {
credentialPath = utils.GetVaultAddress() + vault.PathForReadingKubeSecret(input.Namespace, input.Spec.SecretRef)
}
}
var response *datacatalog.GetAssetResponse
request := datacatalog.GetAssetRequest{
AssetID: taxonomy.AssetID(req.Context.DataSetID),
OperationType: datacatalog.READ}
if response, err = r.DataCatalog.GetAssetInfo(&request, credentialPath); err != nil {
log.Error().Err(err).Msg("failed to receive the catalog connector response")
return err
}
err = r.ValidateAssetResponse(response, DataCatalogTaxonomy, req.Context.DataSetID)
if err != nil {
log.Error().Err(err).Msg("failed to validate the catalog connector response")
return err
}
logging.LogStructure("Catalog connector response", response, &log, zerolog.DebugLevel, false, false)
response.DeepCopyInto(req.DataDetails)
}
configEvaluatorInput := &adminconfig.EvaluatorInput{}
configEvaluatorInput.Workload.UUID = utils.GetFybrikApplicationUUID(input)
input.Spec.AppInfo.DeepCopyInto(&configEvaluatorInput.Workload.Properties)
configEvaluatorInput.Workload.Cluster = workloadCluster
configEvaluatorInput.Request = CreateDataRequest(input, req.Context, &req.DataDetails.ResourceMetadata)
// Governance actions
err = r.checkGovernanceActions(configEvaluatorInput, req, appContext, env)
if err != nil {
return err
}
configDecisions, err := r.ConfigEvaluator.Evaluate(configEvaluatorInput)
if err != nil {
appContext.Log.Error().Err(err).Msg("Error evaluating config policies")
return err
}
logging.LogStructure("Config Policy Decisions", configDecisions, appContext.Log, zerolog.DebugLevel, false, false)
req.WorkloadCluster = configEvaluatorInput.Workload.Cluster
req.Configuration = configDecisions
return nil
}
func (r *FybrikApplicationReconciler) checkGovernanceActions(configEvaluatorInput *adminconfig.EvaluatorInput,
req *DataInfo, appContext ApplicationContext, env *Environment) error {
var err error
switch configEvaluatorInput.Request.Usage {
case taxonomy.WriteFlow:
if !req.Context.Requirements.FlowParams.IsNewDataSet {
// update an existing dataset
// query the policy manager whether the operation is allowed
reqAction := policymanager.RequestAction{
ActionType: configEvaluatorInput.Request.Usage,
Destination: req.DataDetails.ResourceMetadata.Geography,
ProcessingLocation: taxonomy.ProcessingLocation(configEvaluatorInput.Workload.Cluster.Metadata.Region),
}
req.Actions, err = LookupPolicyDecisions(req.Context.DataSetID, r.PolicyManager, appContext, &reqAction)
}
case taxonomy.ReadFlow, taxonomy.DeleteFlow:
reqAction := policymanager.RequestAction{
ActionType: configEvaluatorInput.Request.Usage,
Destination: configEvaluatorInput.Workload.Cluster.Metadata.Region,
ProcessingLocation: taxonomy.ProcessingLocation(configEvaluatorInput.Workload.Cluster.Metadata.Region),
}
req.Actions, err = LookupPolicyDecisions(req.Context.DataSetID, r.PolicyManager, appContext, &reqAction)
}
if err != nil {
return err
}
// query the policy manager whether WRITE operation is allowed
// not relevant for new datasets
if req.Context.Requirements.FlowParams.IsNewDataSet {
return nil
}
for accountInd := range env.StorageAccounts {
region := env.StorageAccounts[accountInd].Spec.Region
reqAction := policymanager.RequestAction{
ActionType: taxonomy.WriteFlow,
Destination: string(region),
ProcessingLocation: region,
}
actions, err := LookupPolicyDecisions(req.Context.DataSetID, r.PolicyManager, appContext, &reqAction)
if err == nil {
req.StorageRequirements[region] = actions
} else if err.Error() != api.WriteNotAllowed {
return err
}
}
return nil
}
// GetWorkloadCluster returns a workload cluster
// If no cluster has been specified for a workload, a local cluster is assumed.
func (r *FybrikApplicationReconciler) GetWorkloadCluster(appContext ApplicationContext) (multicluster.Cluster, error) {
clusterName := appContext.Application.Spec.Selector.ClusterName
if clusterName == "" {
// if no workload selector is specified - it is not a read scenario, skip
if appContext.Application.Spec.Selector.WorkloadSelector.Size() == 0 {
return multicluster.Cluster{}, nil
}
// the workload runs in a local cluster
appContext.Log.Warn().Err(errors.New("selector.clusterName field is not specified")).
Str(logging.ACTION, logging.CREATE).Msg("No workload cluster indicated, so a local cluster is assumed")
localClusterManager, err := local.NewClusterManager(r.Client, utils.GetSystemNamespace())
if err != nil {
return multicluster.Cluster{}, err
}
clusters, err := localClusterManager.GetClusters()
if err != nil || len(clusters) != 1 {
return multicluster.Cluster{}, err
}
return clusters[0], nil
}
// find the cluster by its name as it is specified in FybrikApplication workload selector
clusters, err := r.ClusterManager.GetClusters()
if err != nil {
return multicluster.Cluster{}, err
}
for _, cluster := range clusters {
if cluster.Name == clusterName {
return cluster, nil
}
}
return multicluster.Cluster{}, errors.New("Cluster " + clusterName + " is not available")
}
// NewFybrikApplicationReconciler creates a new reconciler for FybrikApplications
func NewFybrikApplicationReconciler(mgr ctrl.Manager, name string,
policyManager pmclient.PolicyManager, catalog dcclient.DataCatalog, cm multicluster.ClusterLister,
provision storage.ProvisionInterface, evaluator adminconfig.EvaluatorInterface,
attributeManager *infrastructure.AttributeManager) *FybrikApplicationReconciler {
log := logging.LogInit(logging.CONTROLLER, name)
return &FybrikApplicationReconciler{
Client: mgr.GetClient(),
Name: name,
Log: log,
Scheme: mgr.GetScheme(),
PolicyManager: policyManager,
ResourceInterface: NewPlotterInterface(mgr.GetClient()),
ClusterManager: cm,
Provision: provision,
DataCatalog: catalog,
ConfigEvaluator: evaluator,
Infrastructure: attributeManager,
}
}
// SetupWithManager registers FybrikApplication controller
func (r *FybrikApplicationReconciler) SetupWithManager(mgr ctrl.Manager) error {
mapFn := func(a client.Object) []reconcile.Request {
labels := a.GetLabels()
if labels == nil {
return []reconcile.Request{}
}
if !a.GetDeletionTimestamp().IsZero() {
// the owned resource is deleted - no updates should be sent
return []reconcile.Request{}
}
namespace, foundNamespace := labels[api.ApplicationNamespaceLabel]
name, foundName := labels[api.ApplicationNameLabel]
if !foundNamespace || !foundName {
return []reconcile.Request{}
}
return []reconcile.Request{
{NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
}},
}
}
numReconciles := environment.GetEnvAsInt(controllers.ApplicationConcurrentReconcilesConfiguration,
controllers.DefaultApplicationConcurrentReconciles)
return ctrl.NewControllerManagedBy(mgr).
WithOptions(controller.Options{MaxConcurrentReconciles: numReconciles}).
For(&api.FybrikApplication{}).
Watches(&source.Kind{
Type: &api.Plotter{},
}, handler.EnqueueRequestsFromMapFunc(mapFn)).Complete(r)
}
// AnalyzeError analyzes whether the given error is fatal, or a retrial attempt can be made.
// Reasons for retrial can be either communication problems with external services, or kubernetes
// problems to perform some action on a resource.
// A retrial is achieved by returning an error to the reconcile method
func AnalyzeError(appContext ApplicationContext, assetID string, err error) {
if err == nil {
return
}
switch err.Error() {
case api.InvalidAssetID, api.ReadAccessDenied, api.CopyNotAllowed, api.WriteNotAllowed, api.InvalidAssetDataStore:
setDenyCondition(appContext, assetID, err.Error())
default:
setErrorCondition(appContext, assetID, err.Error())
}
}
func ownerLabels(id types.NamespacedName) map[string]string {
return map[string]string{
api.ApplicationNamespaceLabel: id.Namespace,
api.ApplicationNameLabel: id.Name,
}
}
// GetAllModules returns all CRDs of the kind FybrikModule mapped by their name
func (r *FybrikApplicationReconciler) GetAllModules() (map[string]*api.FybrikModule, error) {
ctx := context.Background()
moduleMap := make(map[string]*api.FybrikModule)
var moduleList api.FybrikModuleList
if err := r.List(ctx, &moduleList, client.InNamespace(utils.GetSystemNamespace())); err != nil {
return moduleMap, err
}
for ind := range moduleList.Items {
moduleMap[moduleList.Items[ind].Name] = &moduleList.Items[ind]
}
return moduleMap, nil
}
// get all available storage accounts
func (r *FybrikApplicationReconciler) getStorageAccounts() ([]*api.FybrikStorageAccount, error) {
var accountList api.FybrikStorageAccountList
if err := r.List(context.Background(), &accountList, client.InNamespace(utils.GetSystemNamespace())); err != nil {
return nil, err
}
accounts := []*api.FybrikStorageAccount{}
for i := range accountList.Items {
accounts = append(accounts, accountList.Items[i].DeepCopy())
}
return accounts, nil
}
func (r *FybrikApplicationReconciler) updateProvisionedStorageStatus(applicationContext ApplicationContext,
provisionedStorage map[string]NewAssetInfo) (bool, error) {
// update allocated storage in the status
// clean irrelevant buckets
for datasetID, details := range applicationContext.Application.Status.ProvisionedStorage {
if _, found := provisionedStorage[datasetID]; !found {
_ = r.Provision.DeleteDataset(getBucketResourceRef(details.DatasetRef))
delete(applicationContext.Application.Status.ProvisionedStorage, datasetID)
}
}
// add or update new buckets
for datasetID, info := range provisionedStorage {
applicationContext.Application.Status.ProvisionedStorage[datasetID] = api.DatasetDetails{
DatasetRef: info.Storage.Name,
SecretRef: info.Storage.SecretRef.Name,
}
}
// check that the buckets have been created successfully using Dataset status
for id, details := range applicationContext.Application.Status.ProvisionedStorage {
res, err := r.Provision.GetDatasetStatus(getBucketResourceRef(details.DatasetRef))
if err != nil {
return false, nil
}
if !res.Provisioned {
applicationContext.Log.Warn().Err(errors.New(res.ErrorMsg)).Str(logging.ACTION, logging.CREATE).
Str(logging.DATASETID, id).Msg("No bucket has been provisioned")
if res.ErrorMsg != "" {
return false, errors.New(res.ErrorMsg)
}
return false, nil
}
}
return true, nil
}
func (r *FybrikApplicationReconciler) buildSolution(applicationContext ApplicationContext, env *Environment,
requirements []DataInfo) (map[string]NewAssetInfo, *api.PlotterSpec, error) {
plotterGen := &PlotterGenerator{
Client: r.Client,
Log: applicationContext.Log,
Owner: client.ObjectKeyFromObject(applicationContext.Application),
Provision: r.Provision,
ProvisionedStorage: make(map[string]NewAssetInfo),
}
plotterSpec := &api.PlotterSpec{
Selector: applicationContext.Application.Spec.Selector,
AppInfo: applicationContext.Application.Spec.AppInfo,
Assets: map[string]api.AssetDetails{},
Flows: []api.Flow{},
ModulesNamespace: utils.GetDefaultModulesNamespace(),
Templates: map[string]api.Template{},
}
for ind := range requirements {
path, err := solve(env, &requirements[ind], applicationContext.Log)
if err != nil {
setErrorCondition(applicationContext, requirements[ind].Context.DataSetID, err.Error())
continue
}
// If the flag IsNewDataSet is true then a new asset must be allocated
if requirements[ind].Context.Requirements.FlowParams.IsNewDataSet {
err = plotterGen.handleNewAsset(&requirements[ind], &path)
if err != nil {
setErrorCondition(applicationContext, requirements[ind].Context.DataSetID, err.Error())
return plotterGen.ProvisionedStorage, plotterSpec, err
}
}
err = plotterGen.AddFlowInfoForAsset(&requirements[ind], applicationContext.Application, &path, plotterSpec)
if err != nil {
setErrorCondition(applicationContext, requirements[ind].Context.DataSetID, err.Error())
return plotterGen.ProvisionedStorage, plotterSpec, err
}
}
return plotterGen.ProvisionedStorage, plotterSpec, nil
}
| [
"\"ENABLE_WEBHOOKS\""
]
| []
| [
"ENABLE_WEBHOOKS"
]
| [] | ["ENABLE_WEBHOOKS"] | go | 1 | 0 | |
unomaha_utils/util.py | import json
from collections import OrderedDict
from os import path
import sys
import logging
from flask import current_app
logging.basicConfig(level=logging.INFO)
def get_term_from_date(date):
term_ranges = {('aug', 'dec'): "Fall",
('jan', "apr"): "Spring",
('jan', "may"): "Spring",
('may', "jun"): "Summer",
('may', 'jul'): "Summer",
('may', 'aug'): "Summer",
('jul', 'aug'): "Summer",
('jun', 'aug'): "Summer"
}
term = "Unknown"
for months, term_name in term_ranges.items():
if months[0] in date.lower() and months[1] in date.lower():
term = term_name
year = date.split("-")[0].split(",")[-1].strip()
if term == "Unknown":
logging.warn("Unknown term, date is: %s", date)
return "{} {}".format(term, year), term != "Unknown"
def get_term_name(colleges):
for _, courses in colleges.items():
for _, course_info in courses.items():
for _, section_info in course_info['sections'].items():
potential_term_date = section_info.get('Date')
term_str, is_good = get_term_from_date(potential_term_date)
if is_good:
return term_str
def get_term_mapping(term_data):
mapping = {}
for term_key, colleges in term_data.items():
term_name = get_term_name(colleges)
mapping[term_key] = term_name
return mapping
| []
| []
| []
| [] | [] | python | null | null | null |
Lib/test/test_compileall.py | import sys
import compileall
import importlib.util
import test.test_importlib.util
import os
import pathlib
import py_compile
import shutil
import struct
import tempfile
import time
import unittest
import io
from unittest import mock, skipUnless
try:
from concurrent.futures import ProcessPoolExecutor
_have_multiprocessing = True
except ImportError:
_have_multiprocessing = False
from test import support
from test.support import script_helper
from .test_py_compile import without_source_date_epoch
from .test_py_compile import SourceDateEpochTestMeta
class CompileallTestsBase:
def setUp(self):
self.directory = tempfile.mkdtemp()
self.source_path = os.path.join(self.directory, '_test.py')
self.bc_path = importlib.util.cache_from_source(self.source_path)
with open(self.source_path, 'w') as file:
file.write('x = 123\n')
self.source_path2 = os.path.join(self.directory, '_test2.py')
self.bc_path2 = importlib.util.cache_from_source(self.source_path2)
shutil.copyfile(self.source_path, self.source_path2)
self.subdirectory = os.path.join(self.directory, '_subdir')
os.mkdir(self.subdirectory)
self.source_path3 = os.path.join(self.subdirectory, '_test3.py')
shutil.copyfile(self.source_path, self.source_path3)
def tearDown(self):
shutil.rmtree(self.directory)
def add_bad_source_file(self):
self.bad_source_path = os.path.join(self.directory, '_test_bad.py')
with open(self.bad_source_path, 'w') as file:
file.write('x (\n')
def timestamp_metadata(self):
with open(self.bc_path, 'rb') as file:
data = file.read(12)
mtime = int(os.stat(self.source_path).st_mtime)
compare = struct.pack('<4sll', importlib.util.MAGIC_NUMBER, 0, mtime)
return data, compare
def recreation_check(self, metadata):
"""Check that compileall recreates bytecode when the new metadata is
used."""
if os.environ.get('SOURCE_DATE_EPOCH'):
raise unittest.SkipTest('SOURCE_DATE_EPOCH is set')
py_compile.compile(self.source_path)
self.assertEqual(*self.timestamp_metadata())
with open(self.bc_path, 'rb') as file:
bc = file.read()[len(metadata):]
with open(self.bc_path, 'wb') as file:
file.write(metadata)
file.write(bc)
self.assertNotEqual(*self.timestamp_metadata())
compileall.compile_dir(self.directory, force=False, quiet=True)
self.assertTrue(*self.timestamp_metadata())
def test_mtime(self):
# Test a change in mtime leads to a new .pyc.
self.recreation_check(struct.pack('<4sll', importlib.util.MAGIC_NUMBER,
0, 1))
def test_magic_number(self):
# Test a change in mtime leads to a new .pyc.
self.recreation_check(b'\0\0\0\0')
def test_compile_files(self):
# Test compiling a single file, and complete directory
for fn in (self.bc_path, self.bc_path2):
try:
os.unlink(fn)
except:
pass
self.assertTrue(compileall.compile_file(self.source_path,
force=False, quiet=True))
self.assertTrue(os.path.isfile(self.bc_path) and
not os.path.isfile(self.bc_path2))
os.unlink(self.bc_path)
self.assertTrue(compileall.compile_dir(self.directory, force=False,
quiet=True))
self.assertTrue(os.path.isfile(self.bc_path) and
os.path.isfile(self.bc_path2))
os.unlink(self.bc_path)
os.unlink(self.bc_path2)
# Test against bad files
self.add_bad_source_file()
self.assertFalse(compileall.compile_file(self.bad_source_path,
force=False, quiet=2))
self.assertFalse(compileall.compile_dir(self.directory,
force=False, quiet=2))
def test_compile_file_pathlike(self):
self.assertFalse(os.path.isfile(self.bc_path))
# we should also test the output
with support.captured_stdout() as stdout:
self.assertTrue(compileall.compile_file(pathlib.Path(self.source_path)))
self.assertRegex(stdout.getvalue(), r'Compiling ([^WindowsPath|PosixPath].*)')
self.assertTrue(os.path.isfile(self.bc_path))
def test_compile_file_pathlike_ddir(self):
self.assertFalse(os.path.isfile(self.bc_path))
self.assertTrue(compileall.compile_file(pathlib.Path(self.source_path),
ddir=pathlib.Path('ddir_path'),
quiet=2))
self.assertTrue(os.path.isfile(self.bc_path))
def test_compile_path(self):
with test.test_importlib.util.import_state(path=[self.directory]):
self.assertTrue(compileall.compile_path(quiet=2))
with test.test_importlib.util.import_state(path=[self.directory]):
self.add_bad_source_file()
self.assertFalse(compileall.compile_path(skip_curdir=False,
force=True, quiet=2))
def test_no_pycache_in_non_package(self):
# Bug 8563 reported that __pycache__ directories got created by
# compile_file() for non-.py files.
data_dir = os.path.join(self.directory, 'data')
data_file = os.path.join(data_dir, 'file')
os.mkdir(data_dir)
# touch data/file
with open(data_file, 'w'):
pass
compileall.compile_file(data_file)
self.assertFalse(os.path.exists(os.path.join(data_dir, '__pycache__')))
def test_optimize(self):
# make sure compiling with different optimization settings than the
# interpreter's creates the correct file names
optimize, opt = (1, 1) if __debug__ else (0, '')
compileall.compile_dir(self.directory, quiet=True, optimize=optimize)
cached = importlib.util.cache_from_source(self.source_path,
optimization=opt)
self.assertTrue(os.path.isfile(cached))
cached2 = importlib.util.cache_from_source(self.source_path2,
optimization=opt)
self.assertTrue(os.path.isfile(cached2))
cached3 = importlib.util.cache_from_source(self.source_path3,
optimization=opt)
self.assertTrue(os.path.isfile(cached3))
def test_compile_dir_pathlike(self):
self.assertFalse(os.path.isfile(self.bc_path))
with support.captured_stdout() as stdout:
compileall.compile_dir(pathlib.Path(self.directory))
line = stdout.getvalue().splitlines()[0]
self.assertRegex(line, r'Listing ([^WindowsPath|PosixPath].*)')
self.assertTrue(os.path.isfile(self.bc_path))
@mock.patch('concurrent.futures.ProcessPoolExecutor')
def test_compile_pool_called(self, pool_mock):
compileall.compile_dir(self.directory, quiet=True, workers=5)
self.assertTrue(pool_mock.called)
def test_compile_workers_non_positive(self):
with self.assertRaisesRegex(ValueError,
"workers must be greater or equal to 0"):
compileall.compile_dir(self.directory, workers=-1)
@mock.patch('concurrent.futures.ProcessPoolExecutor')
def test_compile_workers_cpu_count(self, pool_mock):
compileall.compile_dir(self.directory, quiet=True, workers=0)
self.assertEqual(pool_mock.call_args[1]['max_workers'], None)
@mock.patch('concurrent.futures.ProcessPoolExecutor')
@mock.patch('compileall.compile_file')
def test_compile_one_worker(self, compile_file_mock, pool_mock):
compileall.compile_dir(self.directory, quiet=True)
self.assertFalse(pool_mock.called)
self.assertTrue(compile_file_mock.called)
@mock.patch('concurrent.futures.ProcessPoolExecutor', new=None)
@mock.patch('compileall.compile_file')
def test_compile_missing_multiprocessing(self, compile_file_mock):
compileall.compile_dir(self.directory, quiet=True, workers=5)
self.assertTrue(compile_file_mock.called)
class CompileallTestsWithSourceEpoch(CompileallTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=True):
pass
class CompileallTestsWithoutSourceEpoch(CompileallTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=False):
pass
class EncodingTest(unittest.TestCase):
"""Issue 6716: compileall should escape source code when printing errors
to stdout."""
def setUp(self):
self.directory = tempfile.mkdtemp()
self.source_path = os.path.join(self.directory, '_test.py')
with open(self.source_path, 'w', encoding='utf-8') as file:
file.write('# -*- coding: utf-8 -*-\n')
file.write('print u"\u20ac"\n')
def tearDown(self):
shutil.rmtree(self.directory)
def test_error(self):
try:
orig_stdout = sys.stdout
sys.stdout = io.TextIOWrapper(io.BytesIO(),encoding='ascii')
compileall.compile_dir(self.directory)
finally:
sys.stdout = orig_stdout
class CommandLineTestsBase:
"""Test compileall's CLI."""
@classmethod
def setUpClass(cls):
for path in filter(os.path.isdir, sys.path):
directory_created = False
directory = pathlib.Path(path) / '__pycache__'
path = directory / 'test.try'
try:
if not directory.is_dir():
directory.mkdir()
directory_created = True
with path.open('w') as file:
file.write('# for test_compileall')
except OSError:
sys_path_writable = False
break
finally:
support.unlink(str(path))
if directory_created:
directory.rmdir()
else:
sys_path_writable = True
# cls._sys_path_writable = sys_path_writable
cls._sys_path_writable = False # embeddedimort does not support this.
def _skip_if_sys_path_not_writable(self):
if not self._sys_path_writable:
raise unittest.SkipTest('not all entries on sys.path are writable')
def _get_run_args(self, args):
return [*support.optim_args_from_interpreter_flags(),
'-S', '-m', 'compileall',
*args]
def assertRunOK(self, *args, **env_vars):
rc, out, err = script_helper.assert_python_ok(
*self._get_run_args(args), **env_vars)
self.assertEqual(b'', err)
return out
def assertRunNotOK(self, *args, **env_vars):
rc, out, err = script_helper.assert_python_failure(
*self._get_run_args(args), **env_vars)
return rc, out, err
def assertCompiled(self, fn):
path = importlib.util.cache_from_source(fn)
self.assertTrue(os.path.exists(path))
def assertNotCompiled(self, fn):
path = importlib.util.cache_from_source(fn)
self.assertFalse(os.path.exists(path))
def setUp(self):
self.directory = tempfile.mkdtemp()
self.addCleanup(support.rmtree, self.directory)
self.pkgdir = os.path.join(self.directory, 'foo')
os.mkdir(self.pkgdir)
self.pkgdir_cachedir = os.path.join(self.pkgdir, '__pycache__')
# Create the __init__.py and a package module.
self.initfn = script_helper.make_script(self.pkgdir, '__init__', '')
self.barfn = script_helper.make_script(self.pkgdir, 'bar', '')
def test_no_args_compiles_path(self):
# Note that -l is implied for the no args case.
self._skip_if_sys_path_not_writable()
bazfn = script_helper.make_script(self.directory, 'baz', '')
self.assertRunOK(PYTHONPATH=self.directory)
self.assertCompiled(bazfn)
self.assertNotCompiled(self.initfn)
self.assertNotCompiled(self.barfn)
@without_source_date_epoch # timestamp invalidation test
def test_no_args_respects_force_flag(self):
self._skip_if_sys_path_not_writable()
bazfn = script_helper.make_script(self.directory, 'baz', '')
self.assertRunOK(PYTHONPATH=self.directory)
pycpath = importlib.util.cache_from_source(bazfn)
# Set atime/mtime backward to avoid file timestamp resolution issues
os.utime(pycpath, (time.time()-60,)*2)
mtime = os.stat(pycpath).st_mtime
# Without force, no recompilation
self.assertRunOK(PYTHONPATH=self.directory)
mtime2 = os.stat(pycpath).st_mtime
self.assertEqual(mtime, mtime2)
# Now force it.
self.assertRunOK('-f', PYTHONPATH=self.directory)
mtime2 = os.stat(pycpath).st_mtime
self.assertNotEqual(mtime, mtime2)
def test_no_args_respects_quiet_flag(self):
self._skip_if_sys_path_not_writable()
script_helper.make_script(self.directory, 'baz', '')
noisy = self.assertRunOK(PYTHONPATH=self.directory)
self.assertIn(b'Listing ', noisy)
quiet = self.assertRunOK('-q', PYTHONPATH=self.directory)
self.assertNotIn(b'Listing ', quiet)
# Ensure that the default behavior of compileall's CLI is to create
# PEP 3147/PEP 488 pyc files.
for name, ext, switch in [
('normal', 'pyc', []),
('optimize', 'opt-1.pyc', ['-O']),
('doubleoptimize', 'opt-2.pyc', ['-OO']),
]:
def f(self, ext=ext, switch=switch):
script_helper.assert_python_ok(*(switch +
['-m', 'compileall', '-q', self.pkgdir]))
# Verify the __pycache__ directory contents.
self.assertTrue(os.path.exists(self.pkgdir_cachedir))
expected = sorted(base.format(sys.implementation.cache_tag, ext)
for base in ('__init__.{}.{}', 'bar.{}.{}'))
self.assertEqual(sorted(os.listdir(self.pkgdir_cachedir)), expected)
# Make sure there are no .pyc files in the source directory.
self.assertFalse([fn for fn in os.listdir(self.pkgdir)
if fn.endswith(ext)])
locals()['test_pep3147_paths_' + name] = f
def test_legacy_paths(self):
# Ensure that with the proper switch, compileall leaves legacy
# pyc files, and no __pycache__ directory.
self.assertRunOK('-b', '-q', self.pkgdir)
# Verify the __pycache__ directory contents.
self.assertFalse(os.path.exists(self.pkgdir_cachedir))
expected = sorted(['__init__.py', '__init__.pyc', 'bar.py',
'bar.pyc'])
self.assertEqual(sorted(os.listdir(self.pkgdir)), expected)
def test_multiple_runs(self):
# Bug 8527 reported that multiple calls produced empty
# __pycache__/__pycache__ directories.
self.assertRunOK('-q', self.pkgdir)
# Verify the __pycache__ directory contents.
self.assertTrue(os.path.exists(self.pkgdir_cachedir))
cachecachedir = os.path.join(self.pkgdir_cachedir, '__pycache__')
self.assertFalse(os.path.exists(cachecachedir))
# Call compileall again.
self.assertRunOK('-q', self.pkgdir)
self.assertTrue(os.path.exists(self.pkgdir_cachedir))
self.assertFalse(os.path.exists(cachecachedir))
@without_source_date_epoch # timestamp invalidation test
def test_force(self):
self.assertRunOK('-q', self.pkgdir)
pycpath = importlib.util.cache_from_source(self.barfn)
# set atime/mtime backward to avoid file timestamp resolution issues
os.utime(pycpath, (time.time()-60,)*2)
mtime = os.stat(pycpath).st_mtime
# without force, no recompilation
self.assertRunOK('-q', self.pkgdir)
mtime2 = os.stat(pycpath).st_mtime
self.assertEqual(mtime, mtime2)
# now force it.
self.assertRunOK('-q', '-f', self.pkgdir)
mtime2 = os.stat(pycpath).st_mtime
self.assertNotEqual(mtime, mtime2)
def test_recursion_control(self):
subpackage = os.path.join(self.pkgdir, 'spam')
os.mkdir(subpackage)
subinitfn = script_helper.make_script(subpackage, '__init__', '')
hamfn = script_helper.make_script(subpackage, 'ham', '')
self.assertRunOK('-q', '-l', self.pkgdir)
self.assertNotCompiled(subinitfn)
self.assertFalse(os.path.exists(os.path.join(subpackage, '__pycache__')))
self.assertRunOK('-q', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
def test_recursion_limit(self):
subpackage = os.path.join(self.pkgdir, 'spam')
subpackage2 = os.path.join(subpackage, 'ham')
subpackage3 = os.path.join(subpackage2, 'eggs')
for pkg in (subpackage, subpackage2, subpackage3):
script_helper.make_pkg(pkg)
subinitfn = os.path.join(subpackage, '__init__.py')
hamfn = script_helper.make_script(subpackage, 'ham', '')
spamfn = script_helper.make_script(subpackage2, 'spam', '')
eggfn = script_helper.make_script(subpackage3, 'egg', '')
self.assertRunOK('-q', '-r 0', self.pkgdir)
self.assertNotCompiled(subinitfn)
self.assertFalse(
os.path.exists(os.path.join(subpackage, '__pycache__')))
self.assertRunOK('-q', '-r 1', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
self.assertNotCompiled(spamfn)
self.assertRunOK('-q', '-r 2', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
self.assertCompiled(spamfn)
self.assertNotCompiled(eggfn)
self.assertRunOK('-q', '-r 5', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
self.assertCompiled(spamfn)
self.assertCompiled(eggfn)
def test_quiet(self):
noisy = self.assertRunOK(self.pkgdir)
quiet = self.assertRunOK('-q', self.pkgdir)
self.assertNotEqual(b'', noisy)
self.assertEqual(b'', quiet)
def test_silent(self):
script_helper.make_script(self.pkgdir, 'crunchyfrog', 'bad(syntax')
_, quiet, _ = self.assertRunNotOK('-q', self.pkgdir)
_, silent, _ = self.assertRunNotOK('-qq', self.pkgdir)
self.assertNotEqual(b'', quiet)
self.assertEqual(b'', silent)
def test_regexp(self):
self.assertRunOK('-q', '-x', r'ba[^\\/]*$', self.pkgdir)
self.assertNotCompiled(self.barfn)
self.assertCompiled(self.initfn)
def test_multiple_dirs(self):
pkgdir2 = os.path.join(self.directory, 'foo2')
os.mkdir(pkgdir2)
init2fn = script_helper.make_script(pkgdir2, '__init__', '')
bar2fn = script_helper.make_script(pkgdir2, 'bar2', '')
self.assertRunOK('-q', self.pkgdir, pkgdir2)
self.assertCompiled(self.initfn)
self.assertCompiled(self.barfn)
self.assertCompiled(init2fn)
self.assertCompiled(bar2fn)
def test_d_compile_error(self):
script_helper.make_script(self.pkgdir, 'crunchyfrog', 'bad(syntax')
rc, out, err = self.assertRunNotOK('-q', '-d', 'dinsdale', self.pkgdir)
self.assertRegex(out, b'File "dinsdale')
def test_d_runtime_error(self):
bazfn = script_helper.make_script(self.pkgdir, 'baz', 'raise Exception')
self.assertRunOK('-q', '-d', 'dinsdale', self.pkgdir)
fn = script_helper.make_script(self.pkgdir, 'bing', 'import baz')
pyc = importlib.util.cache_from_source(bazfn)
os.rename(pyc, os.path.join(self.pkgdir, 'baz.pyc'))
os.remove(bazfn)
rc, out, err = script_helper.assert_python_failure(fn, __isolated=False)
self.assertRegex(err, b'File "dinsdale')
def test_include_bad_file(self):
rc, out, err = self.assertRunNotOK(
'-i', os.path.join(self.directory, 'nosuchfile'), self.pkgdir)
self.assertRegex(out, b'rror.*nosuchfile')
self.assertNotRegex(err, b'Traceback')
self.assertFalse(os.path.exists(importlib.util.cache_from_source(
self.pkgdir_cachedir)))
def test_include_file_with_arg(self):
f1 = script_helper.make_script(self.pkgdir, 'f1', '')
f2 = script_helper.make_script(self.pkgdir, 'f2', '')
f3 = script_helper.make_script(self.pkgdir, 'f3', '')
f4 = script_helper.make_script(self.pkgdir, 'f4', '')
with open(os.path.join(self.directory, 'l1'), 'w') as l1:
l1.write(os.path.join(self.pkgdir, 'f1.py')+os.linesep)
l1.write(os.path.join(self.pkgdir, 'f2.py')+os.linesep)
self.assertRunOK('-i', os.path.join(self.directory, 'l1'), f4)
self.assertCompiled(f1)
self.assertCompiled(f2)
self.assertNotCompiled(f3)
self.assertCompiled(f4)
def test_include_file_no_arg(self):
f1 = script_helper.make_script(self.pkgdir, 'f1', '')
f2 = script_helper.make_script(self.pkgdir, 'f2', '')
f3 = script_helper.make_script(self.pkgdir, 'f3', '')
f4 = script_helper.make_script(self.pkgdir, 'f4', '')
with open(os.path.join(self.directory, 'l1'), 'w') as l1:
l1.write(os.path.join(self.pkgdir, 'f2.py')+os.linesep)
self.assertRunOK('-i', os.path.join(self.directory, 'l1'))
self.assertNotCompiled(f1)
self.assertCompiled(f2)
self.assertNotCompiled(f3)
self.assertNotCompiled(f4)
def test_include_on_stdin(self):
f1 = script_helper.make_script(self.pkgdir, 'f1', '')
f2 = script_helper.make_script(self.pkgdir, 'f2', '')
f3 = script_helper.make_script(self.pkgdir, 'f3', '')
f4 = script_helper.make_script(self.pkgdir, 'f4', '')
p = script_helper.spawn_python(*(self._get_run_args(()) + ['-i', '-']))
p.stdin.write((f3+os.linesep).encode('ascii'))
script_helper.kill_python(p)
self.assertNotCompiled(f1)
self.assertNotCompiled(f2)
self.assertCompiled(f3)
self.assertNotCompiled(f4)
def test_compiles_as_much_as_possible(self):
bingfn = script_helper.make_script(self.pkgdir, 'bing', 'syntax(error')
rc, out, err = self.assertRunNotOK('nosuchfile', self.initfn,
bingfn, self.barfn)
self.assertRegex(out, b'rror')
self.assertNotCompiled(bingfn)
self.assertCompiled(self.initfn)
self.assertCompiled(self.barfn)
def test_invalid_arg_produces_message(self):
out = self.assertRunOK('badfilename')
self.assertRegex(out, b"Can't list 'badfilename'")
def test_pyc_invalidation_mode(self):
script_helper.make_script(self.pkgdir, 'f1', '')
pyc = importlib.util.cache_from_source(
os.path.join(self.pkgdir, 'f1.py'))
self.assertRunOK('--invalidation-mode=checked-hash', self.pkgdir)
with open(pyc, 'rb') as fp:
data = fp.read()
self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b11)
self.assertRunOK('--invalidation-mode=unchecked-hash', self.pkgdir)
with open(pyc, 'rb') as fp:
data = fp.read()
self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b01)
@skipUnless(_have_multiprocessing, "requires multiprocessing")
def test_workers(self):
bar2fn = script_helper.make_script(self.directory, 'bar2', '')
files = []
for suffix in range(5):
pkgdir = os.path.join(self.directory, 'foo{}'.format(suffix))
os.mkdir(pkgdir)
fn = script_helper.make_script(pkgdir, '__init__', '')
files.append(script_helper.make_script(pkgdir, 'bar2', ''))
self.assertRunOK(self.directory, '-j', '0')
self.assertCompiled(bar2fn)
for file in files:
self.assertCompiled(file)
@mock.patch('compileall.compile_dir')
def test_workers_available_cores(self, compile_dir):
with mock.patch("sys.argv",
new=[sys.executable, self.directory, "-j0"]):
compileall.main()
self.assertTrue(compile_dir.called)
self.assertEqual(compile_dir.call_args[-1]['workers'], 0)
class CommmandLineTestsWithSourceEpoch(CommandLineTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=True):
pass
class CommmandLineTestsNoSourceEpoch(CommandLineTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=False):
pass
if __name__ == "__main__":
unittest.main()
| []
| []
| [
"SOURCE_DATE_EPOCH"
]
| [] | ["SOURCE_DATE_EPOCH"] | python | 1 | 0 | |
src/test/org/apache/hadoop/chukwa/database/TestDatabaseAggregator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.chukwa.database;
import junit.framework.TestCase;
import java.util.Calendar;
import org.apache.hadoop.chukwa.database.Macro;
import org.apache.hadoop.chukwa.util.DatabaseWriter;
import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
import org.apache.hadoop.chukwa.util.ExceptionUtil;
import org.apache.hadoop.chukwa.database.Aggregator;
import org.apache.hadoop.chukwa.database.TableCreator;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
public class TestDatabaseAggregator extends TestCase {
long[] timeWindow = {7, 30, 91, 365, 3650};
String[] tables = {"system_metrics","disk","cluster_system_metrics","cluster_disk","mr_job","mr_task","dfs_namenode","dfs_datanode","dfs_fsnamesystem","dfs_throughput","hadoop_jvm","hadoop_mapred","hdfs_usage"};
String cluster = "demo";
long current = Calendar.getInstance().getTimeInMillis();
public void setUp() {
System.setProperty("CLUSTER","demo");
DatabaseWriter db = new DatabaseWriter(cluster);
String buffer = "";
File aFile = new File(System.getenv("CHUKWA_CONF_DIR")
+ File.separator + "database_create_tables.sql");
buffer = readFile(aFile);
String tables[] = buffer.split(";");
for(String table : tables) {
if(table.length()>5) {
db.execute(table);
}
}
db.close();
for(int i=0;i<timeWindow.length;i++) {
TableCreator tc = new TableCreator();
long start = current;
long end = current + (timeWindow[i]*1440*60*1000);
tc.createTables(start, end);
}
}
public void tearDown() {
DatabaseWriter db = null;
try {
db = new DatabaseWriter(cluster);
ResultSet rs = db.query("show tables");
ArrayList<String> list = new ArrayList<String>();
while(rs.next()) {
String table = rs.getString(1);
list.add(table);
}
for(String table : list) {
db.execute("drop table "+table);
}
} catch(Throwable ex) {
} finally {
if(db!=null) {
db.close();
}
}
}
public void verifyTable(String table) {
ChukwaConfiguration cc = new ChukwaConfiguration();
String query = "select * from ["+table+"];";
Macro mp = new Macro(current,query);
query = mp.toString();
try {
DatabaseWriter db = new DatabaseWriter(cluster);
ResultSet rs = db.query(query);
while(rs.next()) {
int i = 1;
String value = rs.getString(i);
}
db.close();
} catch(SQLException ex) {
fail("SQL Exception: "+ExceptionUtil.getStackTrace(ex));
}
}
public String readFile(File aFile) {
StringBuffer contents = new StringBuffer();
try {
BufferedReader input = new BufferedReader(new FileReader(aFile));
try {
String line = null; // not declared within while loop
while ((line = input.readLine()) != null) {
contents.append(line);
contents.append(System.getProperty("line.separator"));
}
} finally {
input.close();
}
} catch (IOException ex) {
ex.printStackTrace();
}
return contents.toString();
}
public void testAggregator() {
Aggregator dba = new Aggregator();
DatabaseWriter db = new DatabaseWriter(cluster);
dba.setWriter(db);
String queries = Aggregator.getContents(new File(System
.getenv("CHUKWA_CONF_DIR")
+ File.separator + "aggregator.sql"));
String[] query = queries.split("\n");
for (int i = 0; i < query.length; i++) {
if(query[i].indexOf("#")==-1) {
try {
dba.process(query[i]);
assertTrue("Completed query: "+query[i],true);
} catch(Throwable ex) {
fail("Exception: "+ExceptionUtil.getStackTrace(ex));
}
}
}
db.close();
}
}
| [
"\"CHUKWA_CONF_DIR\""
]
| []
| [
"CHUKWA_CONF_DIR"
]
| [] | ["CHUKWA_CONF_DIR"] | java | 1 | 0 | |
cmd/kube-vip.go | package cmd
import (
"context"
"fmt"
"net/http"
"os"
"time"
"github.com/kube-vip/kube-vip/pkg/kubevip"
"github.com/kube-vip/kube-vip/pkg/manager"
"github.com/kube-vip/kube-vip/pkg/packet"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// Path to the configuration file
var configPath string
// Path to the configuration file
var namespace string
// Disable the Virtual IP (bind to the existing network stack)
var disableVIP bool
// Disable the Virtual IP (bind to the existing network stack)
var controlPlane bool
// Run as a load balancer service (within a pod / kubernetes)
var serviceArp bool
// ConfigMap name within a Kubernetes cluster
var configMap string
// Configure the level of loggin
var logLevel uint32
// Provider Config
var providerConfig string
// Release - this struct contains the release information populated when building kube-vip
var Release struct {
Version string
Build string
}
// Structs used via the various subcommands
var initConfig kubevip.Config
var initLoadBalancer kubevip.LoadBalancer
// Points to a kubernetes configuration file
var kubeConfigPath string
var kubeVipCmd = &cobra.Command{
Use: "kube-vip",
Short: "This is a server for providing a Virtual IP and load-balancer for the Kubernetes control-plane",
}
func init() {
localpeer, err := autoGenLocalPeer()
if err != nil {
log.Fatalln(err)
}
initConfig.LocalPeer = *localpeer
//initConfig.Peers = append(initConfig.Peers, *localpeer)
kubeVipCmd.PersistentFlags().StringVar(&initConfig.Interface, "interface", "", "Name of the interface to bind to")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.ServicesInterface, "serviceInterface", "", "Name of the interface to bind to (for services)")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.VIP, "vip", "", "The Virtual IP address")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.Address, "address", "", "an address (IP or DNS name) to use as a VIP")
kubeVipCmd.PersistentFlags().IntVar(&initConfig.Port, "port", 6443, "listen port for the VIP")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.VIPCIDR, "cidr", "32", "The CIDR range for the virtual IP address")
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableARP, "arp", false, "Enable Arp for Vip changes")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.Annotations, "annotations", "", "Set Node annotations prefix for parsing")
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.DDNS, "ddns", false, "use Dynamic DNS + DHCP to allocate VIP for address")
// Clustering type (leaderElection)
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableLeaderElection, "leaderElection", false, "Use the Kubernetes leader election mechanism for clustering")
kubeVipCmd.PersistentFlags().IntVar(&initConfig.LeaseDuration, "leaseDuration", 5, "Length of time a Kubernetes leader lease can be held for")
kubeVipCmd.PersistentFlags().IntVar(&initConfig.RenewDeadline, "leaseRenewDuration", 3, "Length of time a Kubernetes leader can attempt to renew its lease")
kubeVipCmd.PersistentFlags().IntVar(&initConfig.RetryPeriod, "leaseRetry", 1, "Number of times the host will retry to hold a lease")
// Clustering type (raft)
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.StartAsLeader, "startAsLeader", false, "Start this instance as the cluster leader")
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.AddPeersAsBackends, "addPeersToLB", true, "Add raft peers to the load-balancer")
// Packet flags
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableMetal, "metal", false, "This will use the Equinix Metal API (requires the token ENV) to update the EIP <-> VIP")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.MetalAPIKey, "metalKey", "", "The API token for authenticating with the Equinix Metal API")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.MetalProject, "metalProject", "", "The name of project already created within Equinix Metal")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.MetalProjectID, "metalProjectID", "", "The ID of project already created within Equinix Metal")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.ProviderConfig, "provider-config", "", "The path to a provider configuration")
// Load Balancer flags
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableLoadBalancer, "lbEnable", false, "Enable a load-balancer on the VIP")
kubeVipCmd.PersistentFlags().BoolVar(&initLoadBalancer.BindToVip, "lbBindToVip", true, "Bind example load balancer to VIP")
kubeVipCmd.PersistentFlags().StringVar(&initLoadBalancer.Type, "lbType", "tcp", "Type of load balancer instance (TCP/HTTP)")
kubeVipCmd.PersistentFlags().StringVar(&initLoadBalancer.Name, "lbName", "Kubeadm Load Balancer", "The name of a load balancer instance")
kubeVipCmd.PersistentFlags().IntVar(&initLoadBalancer.Port, "lbPort", 6443, "Port that load balancer will expose on")
kubeVipCmd.PersistentFlags().IntVar(&initLoadBalancer.BackendPort, "lbBackEndPort", 6444, "A port that all backends may be using (optional)")
// BGP flags
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableBGP, "bgp", false, "This will enable BGP support within kube-vip")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPConfig.RouterID, "bgpRouterID", "", "The routerID for the bgp server")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPConfig.SourceIF, "sourceIF", "", "The source interface for bgp peering (not to be used with sourceIP)")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPConfig.SourceIP, "sourceIP", "", "The source address for bgp peering (not to be used with sourceIF)")
kubeVipCmd.PersistentFlags().Uint32Var(&initConfig.BGPConfig.AS, "localAS", 65000, "The local AS number for the bgp server")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPPeerConfig.Address, "peerAddress", "", "The address of a BGP peer")
kubeVipCmd.PersistentFlags().Uint32Var(&initConfig.BGPPeerConfig.AS, "peerAS", 65000, "The AS number for a BGP peer")
kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPPeerConfig.Password, "peerPass", "", "The md5 password for a BGP peer")
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.BGPPeerConfig.MultiHop, "multihop", false, "This will enable BGP multihop support")
kubeVipCmd.PersistentFlags().StringSliceVar(&initConfig.BGPPeers, "bgppeers", []string{}, "Comma seperated BGP Peer, format: address:as:password:multihop")
// Control plane specific flags
kubeVipCmd.PersistentFlags().StringVarP(&initConfig.Namespace, "namespace", "n", "kube-system", "The configuration map defined within the cluster")
// Manage logging
kubeVipCmd.PersistentFlags().Uint32Var(&logLevel, "log", 4, "Set the level of logging")
// Service flags
kubeVipService.Flags().StringVarP(&configMap, "configMap", "c", "plndr", "The configuration map defined within the cluster")
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableControlPane, "controlplane", false, "Enable HA for control plane, hybrid mode")
kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableServices, "services", false, "Enable Kubernetes services, hybrid mode")
// Prometheus HTTP Server
kubeVipCmd.PersistentFlags().StringVar(&initConfig.PrometheusHTTPServer, "promethuesHTTPServer", ":2112", "Host and port used to expose Promethues metrics via an HTTP server")
kubeVipCmd.AddCommand(kubeKubeadm)
kubeVipCmd.AddCommand(kubeManifest)
kubeVipCmd.AddCommand(kubeVipManager)
kubeVipCmd.AddCommand(kubeVipSample)
kubeVipCmd.AddCommand(kubeVipService)
kubeVipCmd.AddCommand(kubeVipStart)
kubeVipCmd.AddCommand(kubeVipVersion)
// Sample commands
kubeVipSample.AddCommand(kubeVipSampleConfig)
kubeVipSample.AddCommand(kubeVipSampleManifest)
}
// Execute - starts the command parsing process
func Execute() {
if err := kubeVipCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
var kubeVipVersion = &cobra.Command{
Use: "version",
Short: "Version and Release information about the Kubernetes Virtual IP Server",
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Kube-VIP Release Information\n")
fmt.Printf("Version: %s\n", Release.Version)
fmt.Printf("Build: %s\n", Release.Build)
},
}
var kubeVipSample = &cobra.Command{
Use: "sample",
Short: "Generate a Sample configuration",
Run: func(cmd *cobra.Command, args []string) {
cmd.Help()
},
}
var kubeVipService = &cobra.Command{
Use: "service",
Short: "Start the Virtual IP / Load balancer as a service within a Kubernetes cluster",
Run: func(cmd *cobra.Command, args []string) {
// Set the logging level for all subsequent functions
log.SetLevel(log.Level(logLevel))
// parse environment variables, these will overwrite anything loaded or flags
err := kubevip.ParseEnvironment(&initConfig)
if err != nil {
log.Fatalln(err)
}
// User Environment variables as an option to make manifest clearer
envConfigMap := os.Getenv("vip_configmap")
if envConfigMap != "" {
configMap = envConfigMap
}
// Define the new service manager
mgr, err := manager.New(configMap, &initConfig)
if err != nil {
log.Fatalf("%v", err)
}
// Start the service manager, this will watch the config Map and construct kube-vip services for it
err = mgr.Start()
if err != nil {
log.Fatalf("%v", err)
}
},
}
var kubeVipManager = &cobra.Command{
Use: "manager",
Short: "Start the kube-vip manager",
Run: func(cmd *cobra.Command, args []string) {
// Set the logging level for all subsequent functions
log.SetLevel(log.Level(logLevel))
go servePrometheusHTTPServer(cmd.Context(), PrometheusHTTPServerConfig{
Addr: initConfig.PrometheusHTTPServer,
})
// parse environment variables, these will overwrite anything loaded or flags
err := kubevip.ParseEnvironment(&initConfig)
if err != nil {
log.Fatalln(err)
}
// User Environment variables as an option to make manifest clearer
envConfigMap := os.Getenv("vip_configmap")
if envConfigMap != "" {
configMap = envConfigMap
}
// If Packet is enabled and there is a provider configuration passed
if initConfig.EnableMetal {
if providerConfig != "" {
providerAPI, providerProject, err := packet.GetPacketConfig(providerConfig)
if err != nil {
log.Fatalf("%v", err)
}
initConfig.MetalAPIKey = providerAPI
initConfig.MetalProject = providerProject
}
}
// Define the new service manager
mgr, err := manager.New(configMap, &initConfig)
if err != nil {
log.Fatalf("%v", err)
}
prometheus.MustRegister(mgr.PrometheusCollector()...)
// Start the service manager, this will watch the config Map and construct kube-vip services for it
err = mgr.Start()
if err != nil {
log.Fatalf("%v", err)
}
},
}
type PrometheusHTTPServerConfig struct {
// Addr sets the http server address used to expose the metric endpoint
Addr string
}
func servePrometheusHTTPServer(ctx context.Context, config PrometheusHTTPServerConfig) {
var err error
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler())
srv := &http.Server{
Addr: config.Addr,
Handler: mux,
}
go func() {
if err = srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
log.Fatalf("listen:%+s\n", err)
}
}()
log.Printf("server started")
<-ctx.Done()
log.Printf("server stopped")
ctxShutDown, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer func() {
cancel()
}()
if err = srv.Shutdown(ctxShutDown); err != nil {
log.Fatalf("server Shutdown Failed:%+s", err)
}
if err == http.ErrServerClosed {
err = nil
}
}
| [
"\"vip_configmap\"",
"\"vip_configmap\""
]
| []
| [
"vip_configmap"
]
| [] | ["vip_configmap"] | go | 1 | 0 | |
systemtest/src/main/java/io/strimzi/systemtest/utils/StUtils.java | /*
* Copyright Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.systemtest.utils;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.dataformat.yaml.YAMLMapper;
import com.jayway.jsonpath.JsonPath;
import io.fabric8.kubernetes.api.model.ConfigMap;
import io.strimzi.api.kafka.model.ContainerEnvVar;
import io.strimzi.api.kafka.model.ContainerEnvVarBuilder;
import io.strimzi.systemtest.Environment;
import io.vertx.core.json.JsonArray;
import io.vertx.core.json.JsonObject;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static io.strimzi.systemtest.resources.ResourceManager.cmdKubeClient;
import static io.strimzi.test.k8s.KubeClusterResource.kubeClient;
public class StUtils {
private static final Logger LOGGER = LogManager.getLogger(StUtils.class);
private static final Pattern KAFKA_COMPONENT_PATTERN = Pattern.compile("([^-|^_]*?)(?<kafka>[-|_]kafka[-|_])(?<version>.*)$");
private static final Pattern IMAGE_PATTERN_FULL_PATH = Pattern.compile("^(?<registry>[^/]*)/(?<org>[^/]*)/(?<image>[^:]*):(?<tag>.*)$");
private static final Pattern IMAGE_PATTERN = Pattern.compile("^(?<org>[^/]*)/(?<image>[^:]*):(?<tag>.*)$");
private static final Pattern VERSION_IMAGE_PATTERN = Pattern.compile("(?<version>[0-9.]+)=(?<image>[^\\s]*)");
private static final Pattern BETWEEN_JSON_OBJECTS_PATTERN = Pattern.compile("}[\\n\\r]+\\{");
private static final Pattern ALL_BEFORE_JSON_PATTERN = Pattern.compile("(.*\\s)}, \\{", Pattern.DOTALL);
private StUtils() { }
/**
* Method for check if test is allowed on specific testing environment
* @param envVariableForCheck environment variable which is specific for a specific environment
* @return true if test is allowed, false if not
*/
public static boolean isAllowOnCurrentEnvironment(String envVariableForCheck) {
return System.getenv().get(envVariableForCheck) == null;
}
/**
* The method to configure docker image to use proper docker registry, docker org and docker tag.
* @param image Image that needs to be changed
* @return Updated docker image with a proper registry, org, tag
*/
public static String changeOrgAndTag(String image) {
Matcher m = IMAGE_PATTERN_FULL_PATH.matcher(image);
if (m.find()) {
String registry = setImageProperties(m.group("registry"), Environment.STRIMZI_REGISTRY, Environment.STRIMZI_REGISTRY_DEFAULT);
String org = setImageProperties(m.group("org"), Environment.STRIMZI_ORG, Environment.STRIMZI_ORG_DEFAULT);
return registry + "/" + org + "/" + m.group("image") + ":" + buildTag(m.group("tag"));
}
m = IMAGE_PATTERN.matcher(image);
if (m.find()) {
String org = setImageProperties(m.group("org"), Environment.STRIMZI_ORG, Environment.STRIMZI_ORG_DEFAULT);
return Environment.STRIMZI_REGISTRY + "/" + org + "/" + m.group("image") + ":" + buildTag(m.group("tag"));
}
return image;
}
public static String changeOrgAndTagInImageMap(String imageMap) {
Matcher m = VERSION_IMAGE_PATTERN.matcher(imageMap);
StringBuffer sb = new StringBuffer();
while (m.find()) {
m.appendReplacement(sb, m.group("version") + "=" + changeOrgAndTag(m.group("image")));
}
m.appendTail(sb);
return sb.toString();
}
private static String setImageProperties(String current, String envVar, String defaultEnvVar) {
if (!envVar.equals(defaultEnvVar) && !current.equals(envVar)) {
return envVar;
}
return current;
}
private static String buildTag(String currentTag) {
if (!currentTag.equals(Environment.STRIMZI_TAG) && !Environment.STRIMZI_TAG_DEFAULT.equals(Environment.STRIMZI_TAG)) {
Matcher t = KAFKA_COMPONENT_PATTERN.matcher(currentTag);
if (t.find()) {
currentTag = Environment.STRIMZI_TAG + t.group("kafka") + t.group("version");
} else {
currentTag = Environment.STRIMZI_TAG;
}
}
return currentTag;
}
public static List<ContainerEnvVar> createContainerEnvVarsFromMap(Map<String, String> envVars) {
List<ContainerEnvVar> testEnvs = new ArrayList<>();
for (Map.Entry<String, String> entry : envVars.entrySet()) {
testEnvs.add(new ContainerEnvVarBuilder()
.withName(entry.getKey())
.withValue(entry.getValue()).build());
}
return testEnvs;
}
public static String checkEnvVarInPod(String podName, String envVarName) {
return kubeClient().getPod(podName).getSpec().getContainers().get(0).getEnv()
.stream().filter(envVar -> envVar.getName().equals(envVarName)).findFirst().get().getValue();
}
/**
* Translate key/value pairs formatted like properties into a Map
* @param keyValuePairs Pairs in key=value format; pairs are separated by newlines
* @return THe map of key/values
*/
@SuppressWarnings("unchecked")
public static Map<String, Object> loadProperties(String keyValuePairs) {
try {
Properties actual = new Properties();
actual.load(new StringReader(keyValuePairs));
return (Map) actual;
} catch (IOException e) {
throw new AssertionError("Invalid Properties definition", e);
}
}
/**
* Get a Map of properties from an environment variable in json.
* @param containerIndex name of the container
* @param json The json from which to extract properties
* @param envVar The environment variable name
* @return The properties which the variable contains
*/
public static Map<String, Object> getPropertiesFromJson(int containerIndex, String json, String envVar) {
List<String> array = JsonPath.parse(json).read(globalVariableJsonPathBuilder(containerIndex, envVar));
return StUtils.loadProperties(array.get(0));
}
/**
* Get a jsonPath which can be used to extract envariable variables from a spec
* @param containerIndex index of the container
* @param envVar The environment variable name
* @return The json path
*/
public static String globalVariableJsonPathBuilder(int containerIndex, String envVar) {
return "$.spec.containers[" + containerIndex + "].env[?(@.name=='" + envVar + "')].value";
}
public static Properties stringToProperties(String str) {
Properties result = new Properties();
List<String> list = getLinesWithoutCommentsAndEmptyLines(str);
for (String line: list) {
String[] split = line.split("=");
result.put(split[0], split.length == 1 ? "" : split[1]);
}
return result;
}
public static Properties configMap2Properties(ConfigMap cm) {
return stringToProperties(cm.getData().get("server.config"));
}
public static List<String> getLinesWithoutCommentsAndEmptyLines(String config) {
List<String> allLines = Arrays.asList(config.split("\\r?\\n"));
List<String> validLines = new ArrayList<>();
for (String line : allLines) {
if (!line.replace(" ", "").startsWith("#") && !line.isEmpty()) {
validLines.add(line.replace(" ", ""));
}
}
return validLines;
}
public static JsonArray expectedServiceDiscoveryInfo(int port, String protocol, String auth) {
JsonObject jsonObject = new JsonObject();
jsonObject.put("port", port);
jsonObject.put("tls", port == 9093);
jsonObject.put("protocol", protocol);
jsonObject.put("auth", auth);
JsonArray jsonArray = new JsonArray();
jsonArray.add(jsonObject);
return jsonArray;
}
public static JsonArray expectedServiceDiscoveryInfo(String plainAuth, String tlsAuth) {
JsonArray jsonArray = new JsonArray();
jsonArray.add(expectedServiceDiscoveryInfo(9092, "kafka", plainAuth).getValue(0));
jsonArray.add(expectedServiceDiscoveryInfo(9093, "kafka", tlsAuth).getValue(0));
return jsonArray;
}
/**
* Method for checking if JSON format logging is set for the {@code pods}
* Steps:
* 1. get log from pod
* 2. find every occurrence of `}\n{` which will be replaced with `}, {` - by {@link #BETWEEN_JSON_OBJECTS_PATTERN}
* 3. replace everything from beginning to the first proper JSON object with `{`- by {@link #ALL_BEFORE_JSON_PATTERN}
* 4. also add `[` to beginning and `]` to the end of String to create proper JsonArray
* 5. try to parse the JsonArray
* @param pods snapshot of pods to be checked
* @param containerName name of container from which to take the log
* @return if JSON format was set up or not
*/
public static boolean checkLogForJSONFormat(Map<String, String> pods, String containerName) {
boolean isJSON = false;
//this is only for decrease the number of records - kafka have record/line, operators record/11lines
String tail = "--tail=" + (containerName.contains("operator") ? "50" : "10");
for (String podName : pods.keySet()) {
String log = cmdKubeClient().execInCurrentNamespace(false, "logs", podName, "-c", containerName, tail).out();
Matcher matcher = BETWEEN_JSON_OBJECTS_PATTERN.matcher(log);
log = matcher.replaceAll("}, \\{");
matcher = ALL_BEFORE_JSON_PATTERN.matcher(log);
log = "[" + matcher.replaceFirst("{") + "]";
try {
new JsonArray(log);
LOGGER.info("JSON format logging successfully set for {} - {}", podName, containerName);
isJSON = true;
} catch (Exception e) {
LOGGER.info(log);
LOGGER.info("Failed to set JSON format logging for {} - {}", podName, containerName);
isJSON = false;
break;
}
}
return isJSON;
}
/**
* Method for check if test is allowed on current Kubernetes version
* @param maxKubernetesVersion kubernetes version which test needs
* @return true if test is allowed, false if not
*/
public static boolean isAllowedOnCurrentK8sVersion(String maxKubernetesVersion) {
if (maxKubernetesVersion.equals("latest")) {
return true;
}
return Double.parseDouble(kubeClient().clusterKubernetesVersion()) < Double.parseDouble(maxKubernetesVersion);
}
/**
* Method which returns log from last {@code timeSince}
* @param podName name of pod to take a log from
* @param containerName name of container
* @param timeSince time from which the log should be taken - 3s, 5m, 2h -- back
* @return log from the pod
*/
public static String getLogFromPodByTime(String podName, String containerName, String timeSince) {
return cmdKubeClient().execInCurrentNamespace("logs", podName, "-c", containerName, "--since=" + timeSince).out();
}
/**
* Change Deployment configuration before applying it. We set different namespace, log level and image pull policy.
* It's mostly used for use cases where we use direct kubectl command instead of fabric8 calls to api.
* @param deploymentFile loaded Strimzi deployment file
* @param namespace namespace where Strimzi should be installed
* @return deployment file content as String
*/
public static String changeDeploymentNamespace(File deploymentFile, String namespace) {
YAMLMapper mapper = new YAMLMapper();
try {
JsonNode node = mapper.readTree(deploymentFile);
// Change the docker org of the images in the 060-deployment.yaml
ObjectNode containerNode = (ObjectNode) node.at("/spec/template/spec/containers").get(0);
for (JsonNode envVar : containerNode.get("env")) {
String varName = envVar.get("name").textValue();
if (varName.matches("STRIMZI_NAMESPACE")) {
// Replace all the default images with ones from the $DOCKER_ORG org and with the $DOCKER_TAG tag
((ObjectNode) envVar).remove("valueFrom");
((ObjectNode) envVar).put("value", namespace);
}
if (varName.matches("STRIMZI_LOG_LEVEL")) {
((ObjectNode) envVar).put("value", Environment.STRIMZI_LOG_LEVEL);
}
}
// Change image pull policy
ObjectMapper objectMapper = new ObjectMapper();
ObjectNode imagePulPolicyEnvVar = objectMapper.createObjectNode();
imagePulPolicyEnvVar.put("name", "STRIMZI_IMAGE_PULL_POLICY");
imagePulPolicyEnvVar.put("value", Environment.COMPONENTS_IMAGE_PULL_POLICY);
((ArrayNode) containerNode.get("env")).add(imagePulPolicyEnvVar);
return mapper.writeValueAsString(node);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
features/environment.py | import abc
import datetime
import os
import json
import shutil
import signal
import six
import subprocess
import sys
import tempfile
import threading
import time
import yaml
import patroni.psycopg as psycopg
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
@six.add_metaclass(abc.ABCMeta)
class AbstractController(object):
def __init__(self, context, name, work_directory, output_dir):
self._context = context
self._name = name
self._work_directory = work_directory
self._output_dir = output_dir
self._handle = None
self._log = None
def _has_started(self):
return self._handle and self._handle.pid and self._handle.poll() is None
def _is_running(self):
return self._has_started()
@abc.abstractmethod
def _is_accessible(self):
"""process is accessible for queries"""
@abc.abstractmethod
def _start(self):
"""start process"""
def start(self, max_wait_limit=5):
if self._is_running():
return True
self._log = open(os.path.join(self._output_dir, self._name + '.log'), 'a')
self._handle = self._start()
assert self._has_started(), "Process {0} is not running after being started".format(self._name)
max_wait_limit *= self._context.timeout_multiplier
for _ in range(max_wait_limit):
if self._is_accessible():
break
time.sleep(1)
else:
assert False,\
"{0} instance is not available for queries after {1} seconds".format(self._name, max_wait_limit)
def stop(self, kill=False, timeout=15, _=False):
term = False
start_time = time.time()
timeout *= self._context.timeout_multiplier
while self._handle and self._is_running():
if kill:
self._handle.kill()
elif not term:
self._handle.terminate()
term = True
time.sleep(1)
if not kill and time.time() - start_time > timeout:
kill = True
if self._log:
self._log.close()
def cancel_background(self):
pass
class PatroniController(AbstractController):
__PORT = 5360
PATRONI_CONFIG = '{}.yml'
""" starts and stops individual patronis"""
def __init__(self, context, name, work_directory, output_dir, custom_config=None):
super(PatroniController, self).__init__(context, 'patroni_' + name, work_directory, output_dir)
PatroniController.__PORT += 1
self._data_dir = os.path.join(work_directory, 'data', name)
self._connstring = None
if custom_config and 'watchdog' in custom_config:
self.watchdog = WatchdogMonitor(name, work_directory, output_dir)
custom_config['watchdog'] = {'driver': 'testing', 'device': self.watchdog.fifo_path, 'mode': 'required'}
else:
self.watchdog = None
self._scope = (custom_config or {}).get('scope', 'batman')
self._config = self._make_patroni_test_config(name, custom_config)
self._closables = []
self._conn = None
self._curs = None
def write_label(self, content):
with open(os.path.join(self._data_dir, 'label'), 'w') as f:
f.write(content)
def read_label(self, label):
try:
with open(os.path.join(self._data_dir, label), 'r') as f:
return f.read().strip()
except IOError:
return None
@staticmethod
def recursive_update(dst, src):
for k, v in src.items():
if k in dst and isinstance(dst[k], dict):
PatroniController.recursive_update(dst[k], v)
else:
dst[k] = v
def update_config(self, custom_config):
with open(self._config) as r:
config = yaml.safe_load(r)
self.recursive_update(config, custom_config)
with open(self._config, 'w') as w:
yaml.safe_dump(config, w, default_flow_style=False)
self._scope = config.get('scope', 'batman')
def add_tag_to_config(self, tag, value):
self.update_config({'tags': {tag: value}})
def _start(self):
if self.watchdog:
self.watchdog.start()
if isinstance(self._context.dcs_ctl, KubernetesController):
self._context.dcs_ctl.create_pod(self._name[8:], self._scope)
os.environ['PATRONI_KUBERNETES_POD_IP'] = '10.0.0.' + self._name[-1]
return subprocess.Popen([sys.executable, '-m', 'coverage', 'run',
'--source=patroni', '-p', 'patroni.py', self._config],
stdout=self._log, stderr=subprocess.STDOUT, cwd=self._work_directory)
def stop(self, kill=False, timeout=15, postgres=False):
if postgres:
return subprocess.call(['pg_ctl', '-D', self._data_dir, 'stop', '-mi', '-w'])
super(PatroniController, self).stop(kill, timeout)
if isinstance(self._context.dcs_ctl, KubernetesController):
self._context.dcs_ctl.delete_pod(self._name[8:])
if self.watchdog:
self.watchdog.stop()
def _is_accessible(self):
cursor = self.query("SELECT 1", fail_ok=True)
if cursor is not None:
cursor.execute("SET synchronous_commit TO 'local'")
return True
def _make_patroni_test_config(self, name, custom_config):
patroni_config_name = self.PATRONI_CONFIG.format(name)
patroni_config_path = os.path.join(self._output_dir, patroni_config_name)
with open(patroni_config_name) as f:
config = yaml.safe_load(f)
config.pop('etcd', None)
raft_port = os.environ.get('RAFT_PORT')
if raft_port:
os.environ['RAFT_PORT'] = str(int(raft_port) + 1)
config['raft'] = {'data_dir': self._output_dir, 'self_addr': 'localhost:' + os.environ['RAFT_PORT']}
host = config['postgresql']['listen'].split(':')[0]
config['postgresql']['listen'] = config['postgresql']['connect_address'] = '{0}:{1}'.format(host, self.__PORT)
config['name'] = name
config['postgresql']['data_dir'] = self._data_dir
config['postgresql']['basebackup'] = [{'checkpoint': 'fast'}]
config['postgresql']['use_unix_socket'] = os.name != 'nt' # windows doesn't yet support unix-domain sockets
config['postgresql']['use_unix_socket_repl'] = os.name != 'nt' # windows doesn't yet support unix-domain sockets
config['postgresql']['pgpass'] = os.path.join(tempfile.gettempdir(), 'pgpass_' + name)
config['postgresql']['parameters'].update({
'logging_collector': 'on', 'log_destination': 'csvlog', 'log_directory': self._output_dir,
'log_filename': name + '.log', 'log_statement': 'all', 'log_min_messages': 'debug1',
'unix_socket_directories': self._data_dir})
if 'bootstrap' in config:
config['bootstrap']['post_bootstrap'] = 'psql -w -c "SELECT 1"'
if 'initdb' in config['bootstrap']:
config['bootstrap']['initdb'].extend([{'auth': 'md5'}, {'auth-host': 'md5'}])
if custom_config is not None:
self.recursive_update(config, custom_config)
self.recursive_update(config, {
'bootstrap': {'dcs': {'postgresql': {'parameters': {'wal_keep_segments': 100}}}}})
if config['postgresql'].get('callbacks', {}).get('on_role_change'):
config['postgresql']['callbacks']['on_role_change'] += ' ' + str(self.__PORT)
with open(patroni_config_path, 'w') as f:
yaml.safe_dump(config, f, default_flow_style=False)
user = config['postgresql'].get('authentication', config['postgresql']).get('superuser', {})
self._connkwargs = {k: user[n] for n, k in [('username', 'user'), ('password', 'password')] if n in user}
self._connkwargs.update({'host': host, 'port': self.__PORT, 'dbname': 'postgres'})
self._replication = config['postgresql'].get('authentication', config['postgresql']).get('replication', {})
self._replication.update({'host': host, 'port': self.__PORT, 'dbname': 'postgres'})
return patroni_config_path
def _connection(self):
if not self._conn or self._conn.closed != 0:
self._conn = psycopg.connect(**self._connkwargs)
self._conn.autocommit = True
return self._conn
def _cursor(self):
if not self._curs or self._curs.closed or self._curs.connection.closed != 0:
self._curs = self._connection().cursor()
return self._curs
def query(self, query, fail_ok=False):
try:
cursor = self._cursor()
cursor.execute(query)
return cursor
except psycopg.Error:
if not fail_ok:
raise
def check_role_has_changed_to(self, new_role, timeout=10):
bound_time = time.time() + timeout
recovery_status = new_role != 'primary'
while time.time() < bound_time:
cur = self.query("SELECT pg_is_in_recovery()", fail_ok=True)
if cur:
row = cur.fetchone()
if row and row[0] == recovery_status:
return True
time.sleep(1)
return False
def get_watchdog(self):
return self.watchdog
def _get_pid(self):
try:
pidfile = os.path.join(self._data_dir, 'postmaster.pid')
if not os.path.exists(pidfile):
return None
return int(open(pidfile).readline().strip())
except Exception:
return None
def patroni_hang(self, timeout):
hang = ProcessHang(self._handle.pid, timeout)
self._closables.append(hang)
hang.start()
def cancel_background(self):
for obj in self._closables:
obj.close()
self._closables = []
@property
def backup_source(self):
return 'postgres://{username}:{password}@{host}:{port}/{dbname}'.format(**self._replication)
def backup(self, dest=os.path.join('data', 'basebackup')):
subprocess.call(PatroniPoolController.BACKUP_SCRIPT + ['--walmethod=none',
'--datadir=' + os.path.join(self._work_directory, dest),
'--dbname=' + self.backup_source])
class ProcessHang(object):
"""A background thread implementing a cancelable process hang via SIGSTOP."""
def __init__(self, pid, timeout):
self._cancelled = threading.Event()
self._thread = threading.Thread(target=self.run)
self.pid = pid
self.timeout = timeout
def start(self):
self._thread.start()
def run(self):
os.kill(self.pid, signal.SIGSTOP)
try:
self._cancelled.wait(self.timeout)
finally:
os.kill(self.pid, signal.SIGCONT)
def close(self):
self._cancelled.set()
self._thread.join()
class AbstractDcsController(AbstractController):
_CLUSTER_NODE = '/service/{0}'
def __init__(self, context, mktemp=True):
work_directory = mktemp and tempfile.mkdtemp() or None
super(AbstractDcsController, self).__init__(context, self.name(), work_directory, context.pctl.output_dir)
def _is_accessible(self):
return self._is_running()
def stop(self, kill=False, timeout=15):
""" terminate process and wipe out the temp work directory, but only if we actually started it"""
super(AbstractDcsController, self).stop(kill=kill, timeout=timeout)
if self._work_directory:
shutil.rmtree(self._work_directory)
def path(self, key=None, scope='batman'):
return self._CLUSTER_NODE.format(scope) + (key and '/' + key or '')
@abc.abstractmethod
def query(self, key, scope='batman'):
""" query for a value of a given key """
@abc.abstractmethod
def cleanup_service_tree(self):
""" clean all contents stored in the tree used for the tests """
@classmethod
def get_subclasses(cls):
for subclass in cls.__subclasses__():
for subsubclass in subclass.get_subclasses():
yield subsubclass
yield subclass
@classmethod
def name(cls):
return cls.__name__[:-10].lower()
class ConsulController(AbstractDcsController):
def __init__(self, context):
super(ConsulController, self).__init__(context)
os.environ['PATRONI_CONSUL_HOST'] = 'localhost:8500'
os.environ['PATRONI_CONSUL_REGISTER_SERVICE'] = 'on'
self._config_file = None
import consul
self._client = consul.Consul()
def _start(self):
self._config_file = self._work_directory + '.json'
with open(self._config_file, 'wb') as f:
f.write(b'{"session_ttl_min":"5s","server":true,"bootstrap":true,"advertise_addr":"127.0.0.1"}')
return subprocess.Popen(['consul', 'agent', '-config-file', self._config_file, '-data-dir',
self._work_directory], stdout=self._log, stderr=subprocess.STDOUT)
def stop(self, kill=False, timeout=15):
super(ConsulController, self).stop(kill=kill, timeout=timeout)
if self._config_file:
os.unlink(self._config_file)
def _is_running(self):
try:
return bool(self._client.status.leader())
except Exception:
return False
def path(self, key=None, scope='batman'):
return super(ConsulController, self).path(key, scope)[1:]
def query(self, key, scope='batman'):
_, value = self._client.kv.get(self.path(key, scope))
return value and value['Value'].decode('utf-8')
def cleanup_service_tree(self):
self._client.kv.delete(self.path(scope=''), recurse=True)
def start(self, max_wait_limit=15):
super(ConsulController, self).start(max_wait_limit)
class AbstractEtcdController(AbstractDcsController):
""" handles all etcd related tasks, used for the tests setup and cleanup """
def __init__(self, context, client_cls):
super(AbstractEtcdController, self).__init__(context)
self._client_cls = client_cls
def _start(self):
return subprocess.Popen(["etcd", "--debug", "--data-dir", self._work_directory],
stdout=self._log, stderr=subprocess.STDOUT)
def _is_running(self):
from patroni.dcs.etcd import DnsCachingResolver
# if etcd is running, but we didn't start it
try:
self._client = self._client_cls({'host': 'localhost', 'port': 2379, 'retry_timeout': 30,
'patronictl': 1}, DnsCachingResolver())
return True
except Exception:
return False
class EtcdController(AbstractEtcdController):
def __init__(self, context):
from patroni.dcs.etcd import EtcdClient
super(EtcdController, self).__init__(context, EtcdClient)
os.environ['PATRONI_ETCD_HOST'] = 'localhost:2379'
def query(self, key, scope='batman'):
import etcd
try:
return self._client.get(self.path(key, scope)).value
except etcd.EtcdKeyNotFound:
return None
def cleanup_service_tree(self):
import etcd
try:
self._client.delete(self.path(scope=''), recursive=True)
except (etcd.EtcdKeyNotFound, etcd.EtcdConnectionFailed):
return
except Exception as e:
assert False, "exception when cleaning up etcd contents: {0}".format(e)
class Etcd3Controller(AbstractEtcdController):
def __init__(self, context):
from patroni.dcs.etcd3 import Etcd3Client
super(Etcd3Controller, self).__init__(context, Etcd3Client)
os.environ['PATRONI_ETCD3_HOST'] = 'localhost:2379'
def query(self, key, scope='batman'):
import base64
response = self._client.range(self.path(key, scope))
for k in response.get('kvs', []):
return base64.b64decode(k['value']).decode('utf-8') if 'value' in k else None
def cleanup_service_tree(self):
try:
self._client.deleteprefix(self.path(scope=''))
except Exception as e:
assert False, "exception when cleaning up etcd contents: {0}".format(e)
class KubernetesController(AbstractDcsController):
def __init__(self, context):
super(KubernetesController, self).__init__(context)
self._namespace = 'default'
self._labels = {"application": "patroni"}
self._label_selector = ','.join('{0}={1}'.format(k, v) for k, v in self._labels.items())
os.environ['PATRONI_KUBERNETES_LABELS'] = json.dumps(self._labels)
os.environ['PATRONI_KUBERNETES_USE_ENDPOINTS'] = 'true'
os.environ['PATRONI_KUBERNETES_BYPASS_API_SERVICE'] = 'true'
from patroni.dcs.kubernetes import k8s_client, k8s_config
k8s_config.load_kube_config(context='local')
self._client = k8s_client
self._api = self._client.CoreV1Api()
def _start(self):
pass
def create_pod(self, name, scope):
labels = self._labels.copy()
labels['cluster-name'] = scope
metadata = self._client.V1ObjectMeta(namespace=self._namespace, name=name, labels=labels)
spec = self._client.V1PodSpec(containers=[self._client.V1Container(name=name, image='empty')])
body = self._client.V1Pod(metadata=metadata, spec=spec)
self._api.create_namespaced_pod(self._namespace, body)
def delete_pod(self, name):
try:
self._api.delete_namespaced_pod(name, self._namespace, body=self._client.V1DeleteOptions())
except Exception:
pass
while True:
try:
self._api.read_namespaced_pod(name, self._namespace)
except Exception:
break
def query(self, key, scope='batman'):
if key.startswith('members/'):
pod = self._api.read_namespaced_pod(key[8:], self._namespace)
return (pod.metadata.annotations or {}).get('status', '')
else:
try:
ep = scope + {'leader': '', 'history': '-config', 'initialize': '-config'}.get(key, '-' + key)
e = self._api.read_namespaced_endpoints(ep, self._namespace)
if key != 'sync':
return e.metadata.annotations[key]
else:
return json.dumps(e.metadata.annotations)
except Exception:
return None
def cleanup_service_tree(self):
try:
self._api.delete_collection_namespaced_pod(self._namespace, label_selector=self._label_selector)
except Exception:
pass
try:
self._api.delete_collection_namespaced_endpoints(self._namespace, label_selector=self._label_selector)
except Exception:
pass
while True:
result = self._api.list_namespaced_pod(self._namespace, label_selector=self._label_selector)
if len(result.items) < 1:
break
def _is_running(self):
return True
class ZooKeeperController(AbstractDcsController):
""" handles all zookeeper related tasks, used for the tests setup and cleanup """
def __init__(self, context, export_env=True):
super(ZooKeeperController, self).__init__(context, False)
if export_env:
os.environ['PATRONI_ZOOKEEPER_HOSTS'] = "'localhost:2181'"
import kazoo.client
self._client = kazoo.client.KazooClient()
def _start(self):
pass # TODO: implement later
def query(self, key, scope='batman'):
import kazoo.exceptions
try:
return self._client.get(self.path(key, scope))[0].decode('utf-8')
except kazoo.exceptions.NoNodeError:
return None
def cleanup_service_tree(self):
import kazoo.exceptions
try:
self._client.delete(self.path(scope=''), recursive=True)
except (kazoo.exceptions.NoNodeError):
return
except Exception as e:
assert False, "exception when cleaning up zookeeper contents: {0}".format(e)
def _is_running(self):
# if zookeeper is running, but we didn't start it
if self._client.connected:
return True
try:
return self._client.start(1) or True
except Exception:
return False
class MockExhibitor(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write(b'{"servers":["127.0.0.1"],"port":2181}')
def log_message(self, fmt, *args):
pass
class ExhibitorController(ZooKeeperController):
def __init__(self, context):
super(ExhibitorController, self).__init__(context, False)
port = 8181
exhibitor = HTTPServer(('', port), MockExhibitor)
exhibitor.daemon_thread = True
exhibitor_thread = threading.Thread(target=exhibitor.serve_forever)
exhibitor_thread.daemon = True
exhibitor_thread.start()
os.environ.update({'PATRONI_EXHIBITOR_HOSTS': 'localhost', 'PATRONI_EXHIBITOR_PORT': str(port)})
class RaftController(AbstractDcsController):
CONTROLLER_ADDR = 'localhost:1234'
PASSWORD = '12345'
def __init__(self, context):
super(RaftController, self).__init__(context)
os.environ.update(PATRONI_RAFT_PARTNER_ADDRS="'" + self.CONTROLLER_ADDR + "'",
PATRONI_RAFT_PASSWORD=self.PASSWORD, RAFT_PORT='1234')
self._raft = None
def _start(self):
env = os.environ.copy()
del env['PATRONI_RAFT_PARTNER_ADDRS']
env['PATRONI_RAFT_SELF_ADDR'] = self.CONTROLLER_ADDR
env['PATRONI_RAFT_DATA_DIR'] = self._work_directory
return subprocess.Popen([sys.executable, '-m', 'coverage', 'run',
'--source=patroni', '-p', 'patroni_raft_controller.py'],
stdout=self._log, stderr=subprocess.STDOUT, env=env)
def query(self, key, scope='batman'):
ret = self._raft.get(self.path(key, scope))
return ret and ret['value']
def set(self, key, value):
self._raft.set(self.path(key), value)
def cleanup_service_tree(self):
from patroni.dcs.raft import KVStoreTTL
if self._raft:
self._raft.destroy()
self.stop()
os.makedirs(self._work_directory)
self.start()
ready_event = threading.Event()
self._raft = KVStoreTTL(ready_event.set, None, None, partner_addrs=[self.CONTROLLER_ADDR], password=self.PASSWORD)
self._raft.startAutoTick()
ready_event.wait()
class PatroniPoolController(object):
BACKUP_SCRIPT = [sys.executable, 'features/backup_create.py']
ARCHIVE_RESTORE_SCRIPT = ' '.join((sys.executable, os.path.abspath('features/archive-restore.py')))
def __init__(self, context):
self._context = context
self._dcs = None
self._output_dir = None
self._patroni_path = None
self._processes = {}
self.create_and_set_output_directory('')
self.known_dcs = {subclass.name(): subclass for subclass in AbstractDcsController.get_subclasses()}
@property
def patroni_path(self):
if self._patroni_path is None:
cwd = os.path.realpath(__file__)
while True:
cwd, entry = os.path.split(cwd)
if entry == 'features' or cwd == '/':
break
self._patroni_path = cwd
return self._patroni_path
@property
def output_dir(self):
return self._output_dir
def start(self, name, max_wait_limit=20, custom_config=None):
if name not in self._processes:
self._processes[name] = PatroniController(self._context, name, self.patroni_path,
self._output_dir, custom_config)
self._processes[name].start(max_wait_limit)
def __getattr__(self, func):
if func not in ['stop', 'query', 'write_label', 'read_label', 'check_role_has_changed_to',
'add_tag_to_config', 'get_watchdog', 'patroni_hang', 'backup']:
raise AttributeError("PatroniPoolController instance has no attribute '{0}'".format(func))
def wrapper(name, *args, **kwargs):
return getattr(self._processes[name], func)(*args, **kwargs)
return wrapper
def stop_all(self):
for ctl in self._processes.values():
ctl.cancel_background()
ctl.stop()
self._processes.clear()
def create_and_set_output_directory(self, feature_name):
feature_dir = os.path.join(self.patroni_path, 'features', 'output', feature_name.replace(' ', '_'))
if os.path.exists(feature_dir):
shutil.rmtree(feature_dir)
os.makedirs(feature_dir)
self._output_dir = feature_dir
def clone(self, from_name, cluster_name, to_name):
f = self._processes[from_name]
custom_config = {
'scope': cluster_name,
'bootstrap': {
'method': 'pg_basebackup',
'pg_basebackup': {
'command': " ".join(self.BACKUP_SCRIPT) + ' --walmethod=stream --dbname=' + f.backup_source
},
'dcs': {
'postgresql': {
'parameters': {
'max_connections': 101
}
}
}
},
'postgresql': {
'parameters': {
'archive_mode': 'on',
'archive_command': (self.ARCHIVE_RESTORE_SCRIPT + ' --mode archive ' +
'--dirname {} --filename %f --pathname %p').format(
os.path.join(self.patroni_path, 'data', 'wal_archive'))
},
'authentication': {
'superuser': {'password': 'zalando1'},
'replication': {'password': 'rep-pass1'}
}
}
}
self.start(to_name, custom_config=custom_config)
def bootstrap_from_backup(self, name, cluster_name):
custom_config = {
'scope': cluster_name,
'bootstrap': {
'method': 'backup_restore',
'backup_restore': {
'command': (sys.executable + ' features/backup_restore.py --sourcedir=' +
os.path.join(self.patroni_path, 'data', 'basebackup')),
'recovery_conf': {
'recovery_target_action': 'promote',
'recovery_target_timeline': 'latest',
'restore_command': (self.ARCHIVE_RESTORE_SCRIPT + ' --mode restore ' +
'--dirname {} --filename %f --pathname %p').format(
os.path.join(self.patroni_path, 'data', 'wal_archive'))
}
}
},
'postgresql': {
'authentication': {
'superuser': {'password': 'zalando2'},
'replication': {'password': 'rep-pass2'}
}
}
}
self.start(name, custom_config=custom_config)
@property
def dcs(self):
if self._dcs is None:
self._dcs = os.environ.pop('DCS', 'etcd')
assert self._dcs in self.known_dcs, 'Unsupported dcs: ' + self._dcs
return self._dcs
class WatchdogMonitor(object):
"""Testing harness for emulating a watchdog device as a named pipe. Because we can't easily emulate ioctl's we
require a custom driver on Patroni side. The device takes no action, only notes if it was pinged and/or triggered.
"""
def __init__(self, name, work_directory, output_dir):
self.fifo_path = os.path.join(work_directory, 'data', 'watchdog.{0}.fifo'.format(name))
self.fifo_file = None
self._stop_requested = False # Relying on bool setting being atomic
self._thread = None
self.last_ping = None
self.was_pinged = False
self.was_closed = False
self._was_triggered = False
self.timeout = 60
self._log_file = open(os.path.join(output_dir, 'watchdog.{0}.log'.format(name)), 'w')
self._log("watchdog {0} initialized".format(name))
def _log(self, msg):
tstamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")
self._log_file.write("{0}: {1}\n".format(tstamp, msg))
def start(self):
assert self._thread is None
self._stop_requested = False
self._log("starting fifo {0}".format(self.fifo_path))
fifo_dir = os.path.dirname(self.fifo_path)
if os.path.exists(self.fifo_path):
os.unlink(self.fifo_path)
elif not os.path.exists(fifo_dir):
os.mkdir(fifo_dir)
os.mkfifo(self.fifo_path)
self.last_ping = time.time()
self._thread = threading.Thread(target=self.run)
self._thread.start()
def run(self):
try:
while not self._stop_requested:
self._log("opening")
self.fifo_file = os.open(self.fifo_path, os.O_RDONLY)
try:
self._log("Fifo {0} connected".format(self.fifo_path))
self.was_closed = False
while not self._stop_requested:
c = os.read(self.fifo_file, 1)
if c == b'X':
self._log("Stop requested")
return
elif c == b'':
self._log("Pipe closed")
break
elif c == b'C':
command = b''
c = os.read(self.fifo_file, 1)
while c != b'\n' and c != b'':
command += c
c = os.read(self.fifo_file, 1)
command = command.decode('utf8')
if command.startswith('timeout='):
self.timeout = int(command.split('=')[1])
self._log("timeout={0}".format(self.timeout))
elif c in [b'V', b'1']:
cur_time = time.time()
if cur_time - self.last_ping > self.timeout:
self._log("Triggered")
self._was_triggered = True
if c == b'V':
self._log("magic close")
self.was_closed = True
elif c == b'1':
self.was_pinged = True
self._log("ping after {0} seconds".format(cur_time - (self.last_ping or cur_time)))
self.last_ping = cur_time
else:
self._log('Unknown command {0} received from fifo'.format(c))
finally:
self.was_closed = True
self._log("closing")
os.close(self.fifo_file)
except Exception as e:
self._log("Error {0}".format(e))
finally:
self._log("stopping")
self._log_file.flush()
if os.path.exists(self.fifo_path):
os.unlink(self.fifo_path)
def stop(self):
self._log("Monitor stop")
self._stop_requested = True
try:
if os.path.exists(self.fifo_path):
fd = os.open(self.fifo_path, os.O_WRONLY)
os.write(fd, b'X')
os.close(fd)
except Exception as e:
self._log("err while closing: {0}".format(str(e)))
if self._thread:
self._thread.join()
self._thread = None
def reset(self):
self._log("reset")
self.was_pinged = self.was_closed = self._was_triggered = False
@property
def was_triggered(self):
delta = time.time() - self.last_ping
triggered = self._was_triggered or not self.was_closed and delta > self.timeout
self._log("triggered={0}, {1}s left".format(triggered, self.timeout - delta))
return triggered
# actions to execute on start/stop of the tests and before running individual features
def before_all(context):
os.environ.update({'PATRONI_RESTAPI_USERNAME': 'username', 'PATRONI_RESTAPI_PASSWORD': 'password'})
context.ci = any(a in os.environ for a in ('TRAVIS_BUILD_NUMBER', 'BUILD_NUMBER', 'GITHUB_ACTIONS'))
context.timeout_multiplier = 5 if context.ci else 1 # MacOS sometimes is VERY slow
context.pctl = PatroniPoolController(context)
context.dcs_ctl = context.pctl.known_dcs[context.pctl.dcs](context)
context.dcs_ctl.start()
try:
context.dcs_ctl.cleanup_service_tree()
except AssertionError: # after_all handlers won't be executed in before_all
context.dcs_ctl.stop()
raise
def after_all(context):
context.dcs_ctl.stop()
subprocess.call([sys.executable, '-m', 'coverage', 'combine'])
subprocess.call([sys.executable, '-m', 'coverage', 'report'])
def before_feature(context, feature):
""" create per-feature output directory to collect Patroni and PostgreSQL logs """
context.pctl.create_and_set_output_directory(feature.name)
def after_feature(context, feature):
""" stop all Patronis, remove their data directory and cleanup the keys in etcd """
context.pctl.stop_all()
shutil.rmtree(os.path.join(context.pctl.patroni_path, 'data'))
context.dcs_ctl.cleanup_service_tree()
if feature.status == 'failed':
shutil.copytree(context.pctl.output_dir, context.pctl.output_dir + '_failed')
| []
| []
| [
"PATRONI_CONSUL_HOST",
"PATRONI_KUBERNETES_LABELS",
"PATRONI_KUBERNETES_USE_ENDPOINTS",
"PATRONI_ZOOKEEPER_HOSTS",
"RAFT_PORT",
"PATRONI_KUBERNETES_BYPASS_API_SERVICE",
"PATRONI_ETCD_HOST",
"PATRONI_CONSUL_REGISTER_SERVICE",
"PATRONI_ETCD3_HOST",
"PATRONI_KUBERNETES_POD_IP"
]
| [] | ["PATRONI_CONSUL_HOST", "PATRONI_KUBERNETES_LABELS", "PATRONI_KUBERNETES_USE_ENDPOINTS", "PATRONI_ZOOKEEPER_HOSTS", "RAFT_PORT", "PATRONI_KUBERNETES_BYPASS_API_SERVICE", "PATRONI_ETCD_HOST", "PATRONI_CONSUL_REGISTER_SERVICE", "PATRONI_ETCD3_HOST", "PATRONI_KUBERNETES_POD_IP"] | python | 10 | 0 | |
docs/examples/compute/profitbricks/create_lan.py | import os
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.PROFIT_BRICKS)
# Get ProfitBricks credentials from environment variables
pb_username = os.environ.get('PROFITBRICKS_USERNAME')
pb_password = os.environ.get('PROFITBRICKS_PASSWORD')
driver = cls(pb_username, pb_password)
datacenters = driver.list_datacenters()
# Looks for existing data centers named 'demo-dc'
datacenter = [dc for dc in datacenters if dc.name == 'demo-dc'][0]
# Create a public LAN
lan = driver.ex_create_lan(datacenter, is_public=True)
print(lan)
| []
| []
| [
"PROFITBRICKS_PASSWORD",
"PROFITBRICKS_USERNAME"
]
| [] | ["PROFITBRICKS_PASSWORD", "PROFITBRICKS_USERNAME"] | python | 2 | 0 | |
plugins/inputs/sqlserver/azuresqlpoolqueries_test.go | package sqlserver
import (
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
"os"
"testing"
)
func TestAzureSQL_ElasticPool_ResourceStats_Query(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" {
t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING")
}
connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING")
server := &SQLServer{
Servers: []string{connectionString},
IncludeQuery: []string{"AzureSQLPoolResourceStats"},
AuthMethod: "connection_string",
DatabaseType: "AzureSQLPool",
}
var acc testutil.Accumulator
require.NoError(t, server.Start(&acc))
require.NoError(t, server.Gather(&acc))
require.True(t, acc.HasMeasurement("sqlserver_pool_resource_stats"))
require.True(t, acc.HasTag("sqlserver_pool_resource_stats", "sql_instance"))
require.True(t, acc.HasTag("sqlserver_pool_resource_stats", "elastic_pool_name"))
require.True(t, acc.HasField("sqlserver_pool_resource_stats", "snapshot_time"))
require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_cpu_percent"))
require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_data_io_percent"))
require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_log_write_percent"))
require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_storage_percent"))
require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "max_worker_percent"))
require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "max_session_percent"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_stats", "storage_limit_mb"))
require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_instance_cpu_percent"))
require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_allocated_storage_percent"))
// This query should only return one row
require.Equal(t, 1, len(acc.Metrics))
server.Stop()
}
func TestAzureSQL_ElasticPool_ResourceGovernance_Query(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" {
t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING")
}
connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING")
server := &SQLServer{
Servers: []string{connectionString},
IncludeQuery: []string{"AzureSQLPoolResourceGovernance"},
AuthMethod: "connection_string",
DatabaseType: "AzureSQLPool",
}
var acc testutil.Accumulator
require.NoError(t, server.Start(&acc))
require.NoError(t, server.Gather(&acc))
require.True(t, acc.HasMeasurement("sqlserver_pool_resource_governance"))
require.True(t, acc.HasTag("sqlserver_pool_resource_governance", "sql_instance"))
require.True(t, acc.HasTag("sqlserver_pool_resource_governance", "elastic_pool_name"))
require.True(t, acc.HasTag("sqlserver_pool_resource_governance", "slo_name"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "dtu_limit"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "cpu_limit"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "max_cpu"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "cap_cpu"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "max_db_memory"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "max_db_max_size_in_mb"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "db_file_growth_in_mb"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "log_size_in_mb"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "instance_cap_cpu"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "instance_max_log_rate"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "instance_max_worker_threads"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "checkpoint_rate_mbps"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "checkpoint_rate_io"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_group_max_workers"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_min_log_rate"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_max_log_rate"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_group_min_io"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_group_max_io"))
require.True(t, acc.HasFloatField("sqlserver_pool_resource_governance", "primary_group_min_cpu"))
require.True(t, acc.HasFloatField("sqlserver_pool_resource_governance", "primary_group_max_cpu"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_pool_max_workers"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "pool_max_io"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_local_iops"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_managed_xstore_iops"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_external_xstore_iops"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_local_iops"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_managed_xstore_iops"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_external_xstore_iops"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_pfs_iops"))
require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_pfs_iops"))
// This query should only return one row
require.Equal(t, 1, len(acc.Metrics))
server.Stop()
}
func TestAzureSQL_ElasticPool_DatabaseIO_Query(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" {
t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING")
}
connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING")
server := &SQLServer{
Servers: []string{connectionString},
IncludeQuery: []string{"AzureSQLPoolDatabaseIO"},
AuthMethod: "connection_string",
DatabaseType: "AzureSQLPool",
}
var acc testutil.Accumulator
require.NoError(t, server.Start(&acc))
require.NoError(t, server.Gather(&acc))
require.True(t, acc.HasMeasurement("sqlserver_database_io"))
require.True(t, acc.HasTag("sqlserver_database_io", "sql_instance"))
require.True(t, acc.HasTag("sqlserver_database_io", "elastic_pool_name"))
require.True(t, acc.HasTag("sqlserver_database_io", "database_name"))
require.True(t, acc.HasInt64Field("sqlserver_database_io", "database_id"))
require.True(t, acc.HasInt64Field("sqlserver_database_io", "file_id"))
require.True(t, acc.HasTag("sqlserver_database_io", "file_type"))
require.True(t, acc.HasInt64Field("sqlserver_database_io", "reads"))
require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_bytes"))
require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_latency_ms"))
require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_latency_ms"))
require.True(t, acc.HasInt64Field("sqlserver_database_io", "writes"))
require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_bytes"))
require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_read_stall_ms"))
require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_write_stall_ms"))
require.True(t, acc.HasInt64Field("sqlserver_database_io", "size_on_disk_bytes"))
require.True(t, acc.HasInt64Field("sqlserver_database_io", "size_on_disk_mb"))
server.Stop()
}
func TestAzureSQL_ElasticPool_OsWaitStats_Query(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" {
t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING")
}
connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING")
server := &SQLServer{
Servers: []string{connectionString},
IncludeQuery: []string{"AzureSQLPoolOsWaitStats"},
AuthMethod: "connection_string",
DatabaseType: "AzureSQLPool",
}
var acc testutil.Accumulator
require.NoError(t, server.Start(&acc))
require.NoError(t, server.Gather(&acc))
require.True(t, acc.HasMeasurement("sqlserver_waitstats"))
require.True(t, acc.HasTag("sqlserver_waitstats", "sql_instance"))
require.True(t, acc.HasTag("sqlserver_waitstats", "elastic_pool_name"))
require.True(t, acc.HasTag("sqlserver_waitstats", "wait_type"))
require.True(t, acc.HasInt64Field("sqlserver_waitstats", "waiting_tasks_count"))
require.True(t, acc.HasInt64Field("sqlserver_waitstats", "wait_time_ms"))
require.True(t, acc.HasInt64Field("sqlserver_waitstats", "max_wait_time_ms"))
require.True(t, acc.HasInt64Field("sqlserver_waitstats", "signal_wait_time_ms"))
require.True(t, acc.HasInt64Field("sqlserver_waitstats", "resource_wait_ms"))
require.True(t, acc.HasTag("sqlserver_waitstats", "wait_category"))
server.Stop()
}
func TestAzureSQL_ElasticPool_MemoryClerks_Query(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" {
t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING")
}
connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING")
server := &SQLServer{
Servers: []string{connectionString},
IncludeQuery: []string{"AzureSQLPoolMemoryClerks"},
AuthMethod: "connection_string",
DatabaseType: "AzureSQLPool",
}
var acc testutil.Accumulator
require.NoError(t, server.Start(&acc))
require.NoError(t, server.Gather(&acc))
require.True(t, acc.HasMeasurement("sqlserver_memory_clerks"))
require.True(t, acc.HasTag("sqlserver_memory_clerks", "sql_instance"))
require.True(t, acc.HasTag("sqlserver_memory_clerks", "elastic_pool_name"))
require.True(t, acc.HasTag("sqlserver_memory_clerks", "clerk_type"))
require.True(t, acc.HasInt64Field("sqlserver_memory_clerks", "size_kb"))
server.Stop()
}
func TestAzureSQL_ElasticPool_PerformanceCounters_Query(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" {
t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING")
}
connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING")
server := &SQLServer{
Servers: []string{connectionString},
IncludeQuery: []string{"AzureSQLPoolPerformanceCounters"},
AuthMethod: "connection_string",
DatabaseType: "AzureSQLPool",
}
var acc testutil.Accumulator
require.NoError(t, server.Start(&acc))
require.NoError(t, server.Gather(&acc))
require.True(t, acc.HasMeasurement("sqlserver_performance"))
require.True(t, acc.HasTag("sqlserver_performance", "sql_instance"))
require.True(t, acc.HasTag("sqlserver_performance", "object"))
require.True(t, acc.HasTag("sqlserver_performance", "counter"))
require.True(t, acc.HasTag("sqlserver_performance", "instance"))
require.True(t, acc.HasFloatField("sqlserver_performance", "value"))
require.True(t, acc.HasTag("sqlserver_performance", "counter_type"))
server.Stop()
}
func TestAzureSQL_ElasticPool_Schedulers_Query(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" {
t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING")
}
connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING")
server := &SQLServer{
Servers: []string{connectionString},
IncludeQuery: []string{"AzureSQLPoolSchedulers"},
AuthMethod: "connection_string",
DatabaseType: "AzureSQLPool",
}
var acc testutil.Accumulator
require.NoError(t, server.Start(&acc))
require.NoError(t, server.Gather(&acc))
require.True(t, acc.HasMeasurement("sqlserver_schedulers"))
require.True(t, acc.HasTag("sqlserver_schedulers", "sql_instance"))
require.True(t, acc.HasTag("sqlserver_schedulers", "elastic_pool_name"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "scheduler_id"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "cpu_id"))
require.True(t, acc.HasTag("sqlserver_schedulers", "status"))
require.True(t, acc.HasField("sqlserver_schedulers", "is_online"))
require.True(t, acc.HasField("sqlserver_schedulers", "is_idle"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "preemptive_switches_count"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "context_switches_count"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "idle_switches_count"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_tasks_count"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "runnable_tasks_count"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_workers_count"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "active_workers_count"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "work_queue_count"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "pending_disk_io_count"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "load_factor"))
require.True(t, acc.HasField("sqlserver_schedulers", "failed_to_create_worker"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "quantum_length_us"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "yield_count"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_usage_ms"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_idle_capped_ms"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_scheduler_delay_ms"))
require.True(t, acc.HasInt64Field("sqlserver_schedulers", "ideal_workers_limit"))
server.Stop()
}
| [
"\"AZURESQL_POOL_CONNECTION_STRING\"",
"\"AZURESQL_POOL_CONNECTION_STRING\"",
"\"AZURESQL_POOL_CONNECTION_STRING\"",
"\"AZURESQL_POOL_CONNECTION_STRING\"",
"\"AZURESQL_POOL_CONNECTION_STRING\"",
"\"AZURESQL_POOL_CONNECTION_STRING\"",
"\"AZURESQL_POOL_CONNECTION_STRING\"",
"\"AZURESQL_POOL_CONNECTION_STRING\"",
"\"AZURESQL_POOL_CONNECTION_STRING\"",
"\"AZURESQL_POOL_CONNECTION_STRING\"",
"\"AZURESQL_POOL_CONNECTION_STRING\"",
"\"AZURESQL_POOL_CONNECTION_STRING\"",
"\"AZURESQL_POOL_CONNECTION_STRING\"",
"\"AZURESQL_POOL_CONNECTION_STRING\""
]
| []
| [
"AZURESQL_POOL_CONNECTION_STRING"
]
| [] | ["AZURESQL_POOL_CONNECTION_STRING"] | go | 1 | 0 | |
internal/flags/safe_relative_path.go | /*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flags
import (
"fmt"
"path/filepath"
"strings"
securejoin "github.com/cyphar/filepath-securejoin"
)
type SafeRelativePath string
func (p *SafeRelativePath) String() string {
return string(*p)
}
func (p *SafeRelativePath) ToSlash() string {
return filepath.ToSlash(p.String())
}
func (p *SafeRelativePath) Set(str string) error {
// The result of secure joining on a relative base dir is a flattened relative path.
cleanP, err := securejoin.SecureJoin("./", strings.TrimSpace(str))
if err != nil {
return fmt.Errorf("invalid relative path '%s': %w", cleanP, err)
}
// NB: required, as a secure join of "./" will result in "."
cleanP = fmt.Sprintf("./%s", strings.TrimPrefix(cleanP, "."))
*p = SafeRelativePath(cleanP)
return nil
}
func (p *SafeRelativePath) Type() string {
return "safeRelativePath"
}
func (p *SafeRelativePath) Description() string {
return fmt.Sprintf("secure relative path")
}
| []
| []
| []
| [] | [] | go | null | null | null |
changedetection.py | #!/usr/bin/python3
# Launch as a eventlet.wsgi server instance.
import getopt
import os
import sys
import eventlet
import eventlet.wsgi
import changedetectionio
from changedetectionio import store
def main():
ssl_mode = False
port = os.environ.get('PORT') or 5000
do_cleanup = False
# Must be absolute so that send_from_directory doesnt try to make it relative to backend/
datastore_path = os.path.join(os.getcwd(), "datastore")
try:
opts, args = getopt.getopt(sys.argv[1:], "Ccsd:p:", "port")
except getopt.GetoptError:
print('backend.py -s SSL enable -p [port] -d [datastore path]')
sys.exit(2)
create_datastore_dir = False
for opt, arg in opts:
# if opt == '--purge':
# Remove history, the actual files you need to delete manually.
# for uuid, watch in datastore.data['watching'].items():
# watch.update({'history': {}, 'last_checked': 0, 'last_changed': 0, 'previous_md5': None})
if opt == '-s':
ssl_mode = True
if opt == '-p':
port = int(arg)
if opt == '-d':
datastore_path = arg
# Cleanup (remove text files that arent in the index)
if opt == '-c':
do_cleanup = True
# Create the datadir if it doesnt exist
if opt == '-C':
create_datastore_dir = True
# isnt there some @thingy to attach to each route to tell it, that this route needs a datastore
app_config = {'datastore_path': datastore_path}
if not os.path.isdir(app_config['datastore_path']):
if create_datastore_dir:
os.mkdir(app_config['datastore_path'])
else:
print ("ERROR: Directory path for the datastore '{}' does not exist, cannot start, please make sure the directory exists.\n"
"Alternatively, use the -d parameter.".format(app_config['datastore_path']),file=sys.stderr)
sys.exit(2)
datastore = store.ChangeDetectionStore(datastore_path=app_config['datastore_path'], version_tag=changedetectionio.__version__)
app = changedetectionio.changedetection_app(app_config, datastore)
# Go into cleanup mode
if do_cleanup:
datastore.remove_unused_snapshots()
app.config['datastore_path'] = datastore_path
@app.context_processor
def inject_version():
return dict(right_sticky="v{}".format(datastore.data['version_tag']),
new_version_available=app.config['NEW_VERSION_AVAILABLE'],
has_password=datastore.data['settings']['application']['password'] != False
)
# Proxy sub-directory support
# Set environment var USE_X_SETTINGS=1 on this script
# And then in your proxy_pass settings
#
# proxy_set_header Host "localhost";
# proxy_set_header X-Forwarded-Prefix /app;
if os.getenv('USE_X_SETTINGS'):
print ("USE_X_SETTINGS is ENABLED\n")
from werkzeug.middleware.proxy_fix import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app, x_prefix=1, x_host=1)
if ssl_mode:
# @todo finalise SSL config, but this should get you in the right direction if you need it.
eventlet.wsgi.server(eventlet.wrap_ssl(eventlet.listen(('', port)),
certfile='cert.pem',
keyfile='privkey.pem',
server_side=True), app)
else:
eventlet.wsgi.server(eventlet.listen(('', int(port))), app)
if __name__ == '__main__':
main()
| []
| []
| [
"PORT",
"USE_X_SETTINGS"
]
| [] | ["PORT", "USE_X_SETTINGS"] | python | 2 | 0 | |
tests/fixtures/project_django/demo_project/wsgi.py | """
WSGI config for demo_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE', 'demo_project.settings'
)
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
sdk/communication/azure-communication-identity/samples/identity_samples_async.py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: identity_sample_async.py
DESCRIPTION:
These async samples demonstrate creating a user, issuing a token, revoking a token and deleting a user.
USAGE:
python identity_samples_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_COMMUNICATION_SERVICE_ENDPOINT - Communication Service endpoint url
2) COMMUNICATION_SAMPLES_CONNECTION_STRING - the connection string in your ACS account
3) AZURE_CLIENT_ID - the client ID of your active directory application
4) AZURE_CLIENT_SECRET - the secret of your active directory application
5) AZURE_TENANT_ID - the tenant ID of your active directory application
"""
from azure.communication.identity._shared.utils import parse_connection_str
import asyncio
import os
class CommunicationIdentityClientSamples(object):
def __init__(self):
self.connection_string = os.getenv('COMMUNICATION_SAMPLES_CONNECTION_STRING')
self.endpoint = os.getenv('AZURE_COMMUNICATION_SERVICE_ENDPOINT')
self.client_id = os.getenv('AZURE_CLIENT_ID')
self.client_secret = os.getenv('AZURE_CLIENT_SECRET')
self.tenant_id = os.getenv('AZURE_TENANT_ID')
async def get_token(self):
from azure.communication.identity.aio import CommunicationIdentityClient
from azure.communication.identity import CommunicationTokenScope
if self.client_id is not None and self.client_secret is not None and self.tenant_id is not None:
from azure.identity import DefaultAzureCredential
endpoint, _ = parse_connection_str(self.connection_string)
identity_client = CommunicationIdentityClient(endpoint, DefaultAzureCredential())
else:
identity_client = CommunicationIdentityClient.from_connection_string(self.connection_string)
async with identity_client:
user = await identity_client.create_user()
print("Issuing token for: " + user.properties.get('id'))
tokenresponse = await identity_client.get_token(user, scopes=[CommunicationTokenScope.CHAT])
print("Token issued with value: " + tokenresponse.token)
async def revoke_tokens(self):
from azure.communication.identity.aio import CommunicationIdentityClient
from azure.communication.identity import CommunicationTokenScope
if self.client_id is not None and self.client_secret is not None and self.tenant_id is not None:
from azure.identity import DefaultAzureCredential
endpoint, _ = parse_connection_str(self.connection_string)
identity_client = CommunicationIdentityClient(endpoint, DefaultAzureCredential())
else:
identity_client = CommunicationIdentityClient.from_connection_string(self.connection_string)
async with identity_client:
user = await identity_client.create_user()
tokenresponse = await identity_client.get_token(user, scopes=[CommunicationTokenScope.CHAT])
print("Revoking token: " + tokenresponse.token)
await identity_client.revoke_tokens(user)
print(tokenresponse.token + " revoked successfully")
async def create_user(self):
from azure.communication.identity.aio import CommunicationIdentityClient
if self.client_id is not None and self.client_secret is not None and self.tenant_id is not None:
from azure.identity import DefaultAzureCredential
endpoint, _ = parse_connection_str(self.connection_string)
identity_client = CommunicationIdentityClient(endpoint, DefaultAzureCredential())
else:
identity_client = CommunicationIdentityClient.from_connection_string(self.connection_string)
async with identity_client:
print("Creating new user")
user = await identity_client.create_user()
print("User created with id:" + user.properties.get('id'))
async def create_user_and_token(self):
from azure.communication.identity.aio import CommunicationIdentityClient
from azure.communication.identity import CommunicationTokenScope
if self.client_id is not None and self.client_secret is not None and self.tenant_id is not None:
from azure.identity import DefaultAzureCredential
endpoint, _ = parse_connection_str(self.connection_string)
identity_client = CommunicationIdentityClient(endpoint, DefaultAzureCredential())
else:
identity_client = CommunicationIdentityClient.from_connection_string(self.connection_string)
async with identity_client:
print("Creating new user with token")
user, tokenresponse = await identity_client.create_user_and_token(scopes=[CommunicationTokenScope.CHAT])
print("User created with id:" + user.properties.get('id'))
print("Token issued with value: " + tokenresponse.token)
async def delete_user(self):
from azure.communication.identity.aio import CommunicationIdentityClient
if self.client_id is not None and self.client_secret is not None and self.tenant_id is not None:
from azure.identity import DefaultAzureCredential
endpoint, _ = parse_connection_str(self.connection_string)
identity_client = CommunicationIdentityClient(endpoint, DefaultAzureCredential())
else:
identity_client = CommunicationIdentityClient.from_connection_string(self.connection_string)
async with identity_client:
user = await identity_client.create_user()
print("Deleting user: " + user.properties.get('id'))
await identity_client.delete_user(user)
print(user.properties.get('id') + " deleted")
async def main():
sample = CommunicationIdentityClientSamples()
await sample.create_user()
await sample.create_user_and_token()
await sample.get_token()
await sample.revoke_tokens()
await sample.delete_user()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| []
| []
| [
"AZURE_CLIENT_ID",
"COMMUNICATION_SAMPLES_CONNECTION_STRING",
"AZURE_CLIENT_SECRET",
"AZURE_TENANT_ID",
"AZURE_COMMUNICATION_SERVICE_ENDPOINT"
]
| [] | ["AZURE_CLIENT_ID", "COMMUNICATION_SAMPLES_CONNECTION_STRING", "AZURE_CLIENT_SECRET", "AZURE_TENANT_ID", "AZURE_COMMUNICATION_SERVICE_ENDPOINT"] | python | 5 | 0 | |
outlook.py | # imported modules
import sys
import os
import time
from dotenv import load_dotenv
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
# loads in dotenv variables
load_dotenv()
# creates a new Chrome window and maximizes
driver = webdriver.Chrome()
driver.maximize_window()
# creates system variables for later usage
try:
email = str(sys.argv[1])
subject = str(sys.argv[2])
except IndexError:
email = input('Email: ')
subject = input('Subject: ')
# goes to my.utrgv.edu
driver.get("https://my.utrgv.edu/web/myutrgv/home")
# finds text input and inputs username
username = driver.find_element_by_id("_58_username")
username.send_keys(os.environ['USER-NAME'])
# finds text input and inputs password
password = driver.find_element_by_id("_58_password")
password.send_keys(os.environ['PASSWORD'])
# finds submit button and hits it
submit_button = driver.find_element_by_xpath(
'//*[@id="_58_fm"]/div/button').click()
# explicitly waits for page to load in and opens outlook
wait = WebDriverWait(driver, 10)
outlook_select = wait.until(EC.element_to_be_clickable(
(By.XPATH,
'//*[@id="portlet_utrgvgraphemailportlet_WAR_utrgvgraphemailportlet"]/div/div/div/div/article[1]/a'))).click()
# explicitly waits for new window
waitAgain = WebDriverWait(driver, 10)
# gets a list of windows currently open
handles = driver.window_handles
# verifies that second window is open and waits for load-in
driver.switch_to.window(handles[1])
driver.implicitly_wait(10)
# finds New Message button, goes to it, and clicks it
driver.find_element(By.ID, "id__3").click()
element = driver.find_element(By.ID, "id__3")
actions = ActionChains(driver)
actions.move_to_element(element).perform()
# user-defined info
driver.implicitly_wait(10)
actions = ActionChains(driver)
actions.send_keys(email)
actions.perform()
# necessary tabs
actions = ActionChains(driver)
actions.send_keys(Keys.TAB)
actions.perform()
actions = ActionChains(driver)
actions.send_keys(Keys.TAB)
actions.perform()
actions = ActionChains(driver)
actions.send_keys(Keys.TAB)
actions.perform()
actions = ActionChains(driver)
actions.send_keys(Keys.TAB)
actions.perform()
# send subject
actions = ActionChains(driver)
actions.send_keys(subject)
actions.perform()
driver.implicitly_wait(2)
actions = ActionChains(driver)
actions.send_keys(Keys.TAB)
actions.perform()
# send main message template
actions = ActionChains(driver)
actions.send_keys('Hello.\n\n\n\nThank you.\n\nJaime Garcia, Jr.')
actions.perform()
# done message
sys.exit('Done!')
| []
| []
| [
"USER-NAME",
"PASSWORD"
]
| [] | ["USER-NAME", "PASSWORD"] | python | 2 | 0 | |
s3-handler/main.go | package main
import (
"bufio"
"compress/gzip"
"os"
"strings"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/honeycombio/agentless-integrations-for-aws/common"
"github.com/honeycombio/honeytail/httime"
"github.com/honeycombio/honeytail/parsers"
"github.com/honeycombio/libhoney-go"
"github.com/sirupsen/logrus"
)
// Response is a simple structured response
type Response struct {
Ok bool `json:"ok"`
Message string `json:"message"`
}
var parser parsers.LineParser
var parserType, timeFieldName, timeFieldFormat, env string
var matchPatterns, filterPatterns []string
func Handler(request events.S3Event) (Response, error) {
sess := session.Must(session.NewSession(&aws.Config{
Region: aws.String(os.Getenv("AWS_REGION")),
}))
config := &aws.Config{}
svc := s3.New(sess, config)
for _, record := range request.Records {
if filterKey(record.S3.Object.Key, matchPatterns, filterPatterns) {
logrus.WithFields(logrus.Fields{
"key": record.S3.Object.Key,
"match_patterns": matchPatterns,
"filter_patterns": filterPatterns,
}).Info("key doesn't match specified patterns, skipping")
continue
}
resp, err := svc.GetObject(&s3.GetObjectInput{
Bucket: &record.S3.Bucket.Name,
Key: &record.S3.Object.Key,
})
if err != nil {
logrus.WithError(err).WithFields(logrus.Fields{
"key": record.S3.Object.Key,
"bucket": record.S3.Bucket.Name,
}).Warn("unable to get object from bucket")
continue
}
reader := resp.Body
// figure out if this file is gzipped
if resp.ContentType != nil {
if *resp.ContentType == "application/x-gzip" {
reader, err = gzip.NewReader(resp.Body)
if err != nil {
logrus.WithError(err).WithField("key", record.S3.Object.Key).
Warn("unable to create gzip reader for object")
continue
}
}
} else if strings.HasSuffix(record.S3.Object.Key, ".gz") {
reader, err = gzip.NewReader(resp.Body)
if err != nil {
logrus.WithError(err).WithField("key", record.S3.Object.Key).
Warn("unable to create gzip reader for object")
continue
}
}
linesRead := 0
scanner := bufio.NewScanner(reader)
ok := scanner.Scan()
for ok {
linesRead++
if linesRead%10000 == 0 {
logrus.WithFields(logrus.Fields{
"lines_read": linesRead,
"key": record.S3.Object.Key,
}).Info("parser checkpoint")
}
parsedLine, err := parser.ParseLine(scanner.Text())
if err != nil {
logrus.WithError(err).WithField("line", scanner.Text()).
Warn("failed to parse line")
continue
}
hnyEvent := libhoney.NewEvent()
timestamp := httime.GetTimestamp(parsedLine, timeFieldName, timeFieldFormat)
hnyEvent.Timestamp = timestamp
// convert ints and floats if necessary
if parserType != "json" {
hnyEvent.Add(common.ConvertTypes(parsedLine))
} else {
hnyEvent.Add(parsedLine)
}
hnyEvent.AddField("env", env)
hnyEvent.Send()
ok = scanner.Scan()
}
if scanner.Err() != nil {
logrus.WithError(scanner.Err()).WithField("key", record.S3.Object.Key).
Error("s3 read of object ended early due to error")
}
}
libhoney.Flush()
return Response{
Ok: true,
Message: "ok",
}, nil
}
func main() {
var err error
if err = common.InitHoneycombFromEnvVars(); err != nil {
logrus.WithError(err).
Fatal("Unable to initialize libhoney with the supplied environment variables")
return
}
defer libhoney.Close()
parserType = os.Getenv("PARSER_TYPE")
parser, err = common.ConstructParser(parserType)
if err != nil {
logrus.WithError(err).WithField("parser_type", parserType).
Fatal("unable to construct parser")
return
}
common.AddUserAgentMetadata("s3", parserType)
env = os.Getenv("ENVIRONMENT")
timeFieldName = os.Getenv("TIME_FIELD_NAME")
timeFieldFormat = os.Getenv("TIME_FIELD_FORMAT")
matchPatterns = []string{".*"}
filterPatterns = []string{}
if os.Getenv("MATCH_PATTERNS") != "" {
matchPatterns = strings.Split(os.Getenv("MATCH_PATTERNS"), ",")
}
if os.Getenv("FILTER_PATTERNS") != "" {
filterPatterns = strings.Split(os.Getenv("FILTER_PATTERNS"), ",")
}
lambda.Start(Handler)
}
| [
"\"AWS_REGION\"",
"\"PARSER_TYPE\"",
"\"ENVIRONMENT\"",
"\"TIME_FIELD_NAME\"",
"\"TIME_FIELD_FORMAT\"",
"\"MATCH_PATTERNS\"",
"\"MATCH_PATTERNS\"",
"\"FILTER_PATTERNS\"",
"\"FILTER_PATTERNS\""
]
| []
| [
"MATCH_PATTERNS",
"AWS_REGION",
"TIME_FIELD_FORMAT",
"TIME_FIELD_NAME",
"ENVIRONMENT",
"PARSER_TYPE",
"FILTER_PATTERNS"
]
| [] | ["MATCH_PATTERNS", "AWS_REGION", "TIME_FIELD_FORMAT", "TIME_FIELD_NAME", "ENVIRONMENT", "PARSER_TYPE", "FILTER_PATTERNS"] | go | 7 | 0 | |
core/chaincode/platforms/golang/hash.go | /*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package golang
import (
"archive/tar"
"bytes"
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/golang/protobuf/proto"
"github.com/op/go-logging"
"github.com/spf13/viper"
cutil "github.com/hyperledger/fabric/core/container/util"
"github.com/hyperledger/fabric/core/util"
pb "github.com/hyperledger/fabric/protos"
)
var logger = logging.MustGetLogger("golang/hash")
//core hash computation factored out for testing
func computeHash(contents []byte, hash []byte) []byte {
newSlice := make([]byte, len(hash)+len(contents))
//copy the contents
copy(newSlice[0:len(contents)], contents[:])
//add the previous hash
copy(newSlice[len(contents):], hash[:])
//compute new hash
hash = util.ComputeCryptoHash(newSlice)
return hash
}
//hashFilesInDir computes h=hash(h,file bytes) for each file in a directory
//Directory entries are traversed recursively. In the end a single
//hash value is returned for the entire directory structure
func hashFilesInDir(rootDir string, dir string, hash []byte, tw *tar.Writer) ([]byte, error) {
currentDir := filepath.Join(rootDir, dir)
logger.Debugf("hashFiles %s", currentDir)
//ReadDir returns sorted list of files in dir
fis, err := ioutil.ReadDir(currentDir)
if err != nil {
return hash, fmt.Errorf("ReadDir failed %s\n", err)
}
for _, fi := range fis {
name := filepath.Join(dir, fi.Name())
if fi.IsDir() {
var err error
hash, err = hashFilesInDir(rootDir, name, hash, tw)
if err != nil {
return hash, err
}
continue
}
fqp := filepath.Join(rootDir, name)
buf, err := ioutil.ReadFile(fqp)
if err != nil {
fmt.Printf("Error reading %s\n", err)
return hash, err
}
//get the new hash from file contents
hash = computeHash(buf, hash)
if tw != nil {
is := bytes.NewReader(buf)
if err = cutil.WriteStreamToPackage(is, fqp, filepath.Join("src", name), tw); err != nil {
return hash, fmt.Errorf("Error adding file to tar %s", err)
}
}
}
return hash, nil
}
func isCodeExist(tmppath string) error {
file, err := os.Open(tmppath)
if err != nil {
return fmt.Errorf("Download failed %s", err)
}
fi, err := file.Stat()
if err != nil {
return fmt.Errorf("Could not stat file %s", err)
}
if !fi.IsDir() {
return fmt.Errorf("File %s is not dir\n", file.Name())
}
return nil
}
func getCodeFromHTTP(path string) (codegopath string, err error) {
codegopath = ""
err = nil
logger.Debugf("getCodeFromHTTP %s", path)
// The following could be done with os.Getenv("GOPATH") but we need to change it later so this prepares for that next step
env := os.Environ()
var origgopath string
var gopathenvIndex int
for i, v := range env {
if strings.Index(v, "GOPATH=") == 0 {
p := strings.SplitAfter(v, "GOPATH=")
origgopath = p[1]
gopathenvIndex = i
break
}
}
if origgopath == "" {
err = fmt.Errorf("GOPATH not defined")
return
}
// Only take the first element of GOPATH
gopath := filepath.SplitList(origgopath)[0]
// Define a new gopath in which to download the code
newgopath := filepath.Join(gopath, "_usercode_")
//ignore errors.. _usercode_ might exist. TempDir will catch any other errors
os.Mkdir(newgopath, 0755)
if codegopath, err = ioutil.TempDir(newgopath, ""); err != nil {
err = fmt.Errorf("could not create tmp dir under %s(%s)", newgopath, err)
return
}
//go paths can have multiple dirs. We create a GOPATH with two source tree's as follows
//
// <temporary empty folder to download chaincode source> : <local go path with OBC source>
//
//This approach has several goodness:
// . Go will pick the first path to download user code (which we will delete after processing)
// . GO will not download OBC as it is in the second path. GO will use the local OBC for generating chaincode image
// . network savings
// . more secure
// . as we are not downloading OBC, private, password-protected OBC repo's become non-issue
env[gopathenvIndex] = "GOPATH=" + codegopath + string(os.PathListSeparator) + origgopath
// Use a 'go get' command to pull the chaincode from the given repo
logger.Debugf("go get %s", path)
cmd := exec.Command("go", "get", path)
cmd.Env = env
var out bytes.Buffer
cmd.Stdout = &out
var errBuf bytes.Buffer
cmd.Stderr = &errBuf //capture Stderr and print it on error
err = cmd.Start()
// Create a go routine that will wait for the command to finish
done := make(chan error, 1)
go func() {
done <- cmd.Wait()
}()
select {
case <-time.After(time.Duration(viper.GetInt("chaincode.deploytimeout")) * time.Millisecond):
// If pulling repos takes too long, we should give up
// (This can happen if a repo is private and the git clone asks for credentials)
if err = cmd.Process.Kill(); err != nil {
err = fmt.Errorf("failed to kill: %s", err)
} else {
err = errors.New("Getting chaincode took too long")
}
case err = <-done:
// If we're here, the 'go get' command must have finished
if err != nil {
err = fmt.Errorf("'go get' failed with error: \"%s\"\n%s", err, string(errBuf.Bytes()))
}
}
return
}
func getCodeFromFS(path string) (codegopath string, err error) {
logger.Debugf("getCodeFromFS %s", path)
gopath := os.Getenv("GOPATH")
if gopath == "" {
err = fmt.Errorf("GOPATH not defined")
return
}
// Only take the first element of GOPATH
codegopath = filepath.SplitList(gopath)[0]
return
}
//generateHashcode gets hashcode of the code under path. If path is a HTTP(s) url
//it downloads the code first to compute the hash.
//NOTE: for dev mode, user builds and runs chaincode manually. The name provided
//by the user is equivalent to the path. This method will treat the name
//as codebytes and compute the hash from it. ie, user cannot run the chaincode
//with the same (name, ctor, args)
func generateHashcode(spec *pb.ChaincodeSpec, tw *tar.Writer) (string, error) {
if spec == nil {
return "", fmt.Errorf("Cannot generate hashcode from nil spec")
}
chaincodeID := spec.ChaincodeID
if chaincodeID == nil || chaincodeID.Path == "" {
return "", fmt.Errorf("Cannot generate hashcode from empty chaincode path")
}
ctor := spec.CtorMsg
if ctor == nil || len(ctor.Args) == 0 {
return "", fmt.Errorf("Cannot generate hashcode from empty ctor")
}
//code root will point to the directory where the code exists
//in the case of http it will be a temporary dir that
//will have to be deleted
var codegopath string
var ishttp bool
defer func() {
if ishttp && codegopath != "" {
os.RemoveAll(codegopath)
}
}()
path := chaincodeID.Path
var err error
var actualcodepath string
if strings.HasPrefix(path, "http://") {
ishttp = true
actualcodepath = path[7:]
codegopath, err = getCodeFromHTTP(actualcodepath)
} else if strings.HasPrefix(path, "https://") {
ishttp = true
actualcodepath = path[8:]
codegopath, err = getCodeFromHTTP(actualcodepath)
} else {
actualcodepath = path
codegopath, err = getCodeFromFS(path)
}
if err != nil {
return "", fmt.Errorf("Error getting code %s", err)
}
tmppath := filepath.Join(codegopath, "src", actualcodepath)
if err = isCodeExist(tmppath); err != nil {
return "", fmt.Errorf("code does not exist %s", err)
}
ctorbytes, err := proto.Marshal(ctor)
if err != nil {
return "", fmt.Errorf("Error marshalling constructor: %s", err)
}
hash := util.GenerateHashFromSignature(actualcodepath, ctorbytes)
hash, err = hashFilesInDir(filepath.Join(codegopath, "src"), actualcodepath, hash, tw)
if err != nil {
return "", fmt.Errorf("Could not get hashcode for %s - %s\n", path, err)
}
return hex.EncodeToString(hash[:]), nil
}
| [
"\"GOPATH\"",
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
main.go | package main
import (
"context"
"errors"
"fmt"
"net/http"
_ "net/http/pprof"
"os"
"strings"
"time"
"github.com/jenkins-x/lighthouse-telemetry-plugin/internal/kube"
"github.com/jenkins-x/lighthouse-telemetry-plugin/internal/lighthouse"
"github.com/jenkins-x/lighthouse-telemetry-plugin/internal/version"
"github.com/jenkins-x/lighthouse-telemetry-plugin/pkg/trace"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"go.opentelemetry.io/otel/exporters/otlp"
"go.opentelemetry.io/otel/exporters/otlp/otlpgrpc"
"go.opentelemetry.io/otel/exporters/otlp/otlphttp"
"go.opentelemetry.io/otel/exporters/trace/jaeger"
exporttrace "go.opentelemetry.io/otel/sdk/export/trace"
)
var (
options struct {
namespace string
resyncInterval time.Duration
childPullRequestDelay time.Duration
tracesExporterType string
tracesExporterEndpoint string
lighthouseHMACKey string
kubeConfigPath string
listenAddr string
logLevel string
printVersion bool
}
)
func init() {
pflag.StringVar(&options.namespace, "namespace", "jx", "Name of the jx namespace")
pflag.DurationVar(&options.resyncInterval, "resync-interval", 5*time.Minute, "Resync interval between full re-list operations")
pflag.DurationVar(&options.childPullRequestDelay, "child-pr-delay", 10*time.Minute, "Time to wait for a possible child pull request to be created, when generating gitops traces")
pflag.StringVar(&options.tracesExporterType, "traces-exporter-type", os.Getenv("TRACES_EXPORTER_TYPE"), "OpenTelemetry traces exporter type: otlp:grpc:insecure, otlp:http:insecure, jaeger:http:thrift")
pflag.StringVar(&options.tracesExporterEndpoint, "traces-exporter-endpoint", os.Getenv("TRACES_EXPORTER_ENDPOINT"), "OpenTelemetry traces exporter endpoint (host:port)")
pflag.StringVar(&options.lighthouseHMACKey, "lighthouse-hmac-key", os.Getenv("LIGHTHOUSE_HMAC_KEY"), "HMAC key used by Lighthouse to sign the webhooks")
pflag.StringVar(&options.listenAddr, "listen-addr", ":8080", "Address on which the HTTP server will listen for incoming connections")
pflag.StringVar(&options.logLevel, "log-level", "INFO", "Log level - one of: trace, debug, info, warn(ing), error, fatal or panic")
pflag.StringVar(&options.kubeConfigPath, "kubeconfig", kube.DefaultKubeConfigPath(), "Kubernetes Config Path. Default: KUBECONFIG env var value")
pflag.BoolVar(&options.printVersion, "version", false, "Print the version")
}
func main() {
pflag.Parse()
if options.printVersion {
fmt.Printf("Version %s - Revision %s - Date %s", version.Version, version.Revision, version.Date)
return
}
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
logger := logrus.New()
logLevel, err := logrus.ParseLevel(options.logLevel)
if err != nil {
logger.WithField("logLevel", options.logLevel).WithError(err).Error("Invalid log level")
} else {
logger.SetLevel(logLevel)
}
logger.WithField("logLevel", logLevel).Info("Starting")
kConfig, err := kube.NewConfig(options.kubeConfigPath)
if err != nil {
logger.WithError(err).Fatal("failed to create a Kubernetes config")
}
lighthouseHandler := &lighthouse.Handler{
SecretToken: options.lighthouseHMACKey,
Logger: logger,
}
var spanExporter exporttrace.SpanExporter
if len(options.tracesExporterType) > 0 && len(options.tracesExporterEndpoint) > 0 {
logger.WithField("type", options.tracesExporterType).WithField("endpoint", options.tracesExporterEndpoint).Info("Initializing OpenTelemetry Traces Exporter")
switch options.tracesExporterType {
case "otlp:grpc:insecure":
spanExporter, err = otlp.NewExporter(ctx, otlpgrpc.NewDriver(
otlpgrpc.WithEndpoint(options.tracesExporterEndpoint),
otlpgrpc.WithInsecure(),
))
case "otlp:http:insecure":
spanExporter, err = otlp.NewExporter(ctx, otlphttp.NewDriver(
otlphttp.WithEndpoint(options.tracesExporterEndpoint),
otlphttp.WithInsecure(),
))
case "jaeger:http:thrift":
endpoint := fmt.Sprintf("http://%s/api/traces", options.tracesExporterEndpoint)
_, err = http.Post(endpoint, "application/x-thrift", nil)
if err != nil && strings.Contains(err.Error(), "no such host") {
logger.WithError(err).Warning("Traces Exporter Endpoint configuration error. Maybe you need to install/configure the Observability stack? https://jenkins-x.io/v3/admin/guides/observability/ The OpenTelemetry Tracing feature won't be enabled until this is fixed.")
err = nil // ensure we won't fail. we just need to NOT set the exporter
} else {
spanExporter, err = jaeger.NewRawExporter(
jaeger.WithCollectorEndpoint(endpoint),
)
}
}
if err != nil {
logger.WithError(err).Fatal("failed to create an OpenTelemetry Exporter")
}
}
if spanExporter != nil {
logger.WithField("namespace", options.namespace).WithField("resyncInterval", options.resyncInterval).Info("Starting Trace Controller")
err = (&trace.Controller{
KubeConfig: kConfig,
Namespace: options.namespace,
ResyncInterval: options.resyncInterval,
ChildPullRequestDelay: options.childPullRequestDelay,
SpanExporter: spanExporter,
LighthouseHandler: lighthouseHandler,
Logger: logger,
}).Start(ctx)
if err != nil {
logger.WithError(err).Fatal("Failed to start the trace controller")
}
} else {
logger.Warning("NOT starting the Trace Controller!")
}
http.Handle("/lighthouse/events", lighthouseHandler)
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
})
logger.WithField("listenAddr", options.listenAddr).Info("Starting HTTP Server")
err = http.ListenAndServe(options.listenAddr, nil)
if !errors.Is(err, http.ErrServerClosed) {
logger.WithError(err).Fatal("failed to start HTTP server")
}
}
| [
"\"TRACES_EXPORTER_TYPE\"",
"\"TRACES_EXPORTER_ENDPOINT\"",
"\"LIGHTHOUSE_HMAC_KEY\""
]
| []
| [
"TRACES_EXPORTER_ENDPOINT",
"LIGHTHOUSE_HMAC_KEY",
"TRACES_EXPORTER_TYPE"
]
| [] | ["TRACES_EXPORTER_ENDPOINT", "LIGHTHOUSE_HMAC_KEY", "TRACES_EXPORTER_TYPE"] | go | 3 | 0 | |
staging/src/github.com/kubekit/azure/authentication.go | package azure
import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"reflect"
"unicode/utf16"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure/cli"
"github.com/caarlos0/env"
"github.com/dimchansky/utfbom"
)
// Session holds the subscription and authorizer settings so that we do not have to keep creating a new authorizer
type Session struct {
Authorizer autorest.Authorizer
SubscriptionID string
}
// AuthInfo is essentially a combined file and settings struct in github.com/Azure/go-autorest/autorest/auth,
// but made to be exportable and allows for everything to be set through environment variables
type AuthInfo struct {
ClientID string `json:"clientId,omitempty" env:"AZURE_CLIENT_ID"`
ClientSecret string `json:"clientSecret,omitempty" env:"AZURE_CLIENT_SECRET"`
SubscriptionID string `json:"subscriptionId,omitempty" env:"AZURE_CERTIFICATE_PATH"`
TenantID string `json:"tenantId,omitempty" env:"AZURE_CERTIFICATE_PASSWORD"`
ActiveDirectoryEndpoint string `json:"activeDirectoryEndpointUrl,omitempty" env:"AZURE_AD_ENDPOINT"`
ResourceManagerEndpoint string `json:"resourceManagerEndpointUrl,omitempty" env:"AZURE_RM_ENDPOINT"`
GraphResourceID string `json:"activeDirectoryGraphResourceId,omitempty" env:"AZURE_GRAPH_RESOURCE_ID"`
SQLManagementEndpoint string `json:"sqlManagementEndpointUrl,omitempty" env:"AZURE_SQLM_ENDPOINT"`
GalleryEndpoint string `json:"galleryEndpointUrl,omitempty" env:"AZURE_GALLERY_ENDPOINT"`
ManagementEndpoint string `json:"managementEndpointUrl,omitempty" env:"AZURE_MANAGEMENT_ENDPOINT"`
Environment string `json:"environment,omitempty" env:"AZURE_ENVIRONMENT"`
CertificatePath string `json:"certificatePath,omitempty" env:"AZURE_CERTIFICATE_PATH"`
CertificatePassword string `json:"certificatePassword,omitempty" env:"AZURE_CERTIFICATE_PASSWORD"`
Username string `json:"username,omitempty" env:"AZURE_USERNAME"`
Password string `json:"password,omitempty" env:"AZURE_PASSWORD"`
Resource string `json:"resource,omitempty" env:"AZURE_AD_RESOURCE"`
}
// NewSession creates a new sessions based on settings in the AuthInfo
// this is to avoid having to create a new authorizer for multiple clients
func NewSession(a *AuthInfo, authByCLI bool) (*Session, error) {
authorizer, err := a.NewResourceManagerAuthorizer(authByCLI)
if err != nil {
return nil, err
}
// create a new session
session := &Session{
SubscriptionID: a.SubscriptionID,
Authorizer: authorizer,
}
return session, nil
}
// NewResourceManagerAuthorizer retrieves an authorizer for the ResourceManagerEndpoint in the environment settings
func (a *AuthInfo) NewResourceManagerAuthorizer(authByCLI bool) (autorest.Authorizer, error) {
var err error
// get environment endpoints
env, err := EnvironmentFromName(a.Environment)
if err != nil {
return nil, errors.New("No environment provided.")
}
// get oauth config
resource := env.ResourceManagerEndpoint
config, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, a.TenantID)
if err != nil {
return nil, err
}
// get adal token
var adalToken adal.OAuthTokenProvider
if !authByCLI {
adalToken, err = adal.NewServicePrincipalToken(*config, a.ClientID, a.ClientSecret, resource)
if err != nil {
return nil, err
}
} else {
cliToken, err := cli.GetTokenFromCLI(resource)
if err != nil {
return nil, err
}
token, err := cliToken.ToADALToken()
if err != nil {
return nil, err
}
adalToken = &token
}
return autorest.NewBearerAuthorizer(adalToken), nil
}
// this decode function is from:
// github.com/Azure/go-autorest/blob/master/autorest/azure/auth/auth.go
func decode(b []byte) ([]byte, error) {
reader, enc := utfbom.Skip(bytes.NewReader(b))
switch enc {
case utfbom.UTF16LittleEndian:
u16 := make([]uint16, (len(b)/2)-1)
err := binary.Read(reader, binary.LittleEndian, &u16)
if err != nil {
return nil, err
}
return []byte(string(utf16.Decode(u16))), nil
case utfbom.UTF16BigEndian:
u16 := make([]uint16, (len(b)/2)-1)
err := binary.Read(reader, binary.BigEndian, &u16)
if err != nil {
return nil, err
}
return []byte(string(utf16.Decode(u16))), nil
}
return ioutil.ReadAll(reader)
}
// GetAuthInfoFromFile extracts auth info from the file located at the AZURE_AUTH_LOCATION path
// it is similar to getAuthFile() in: github.com/Azure/go-autorest/blob/master/autorest/azure/auth/auth.go
func GetAuthInfoFromFile() (*AuthInfo, error) {
fileLocation := os.Getenv("AZURE_AUTH_LOCATION")
if fileLocation == "" {
return nil, errors.New("environment variable AZURE_AUTH_LOCATION is not set")
}
contents, err := ioutil.ReadFile(fileLocation)
if err != nil {
return nil, err
}
// Auth file might be encoded
decoded, err := decode(contents)
if err != nil {
return nil, err
}
authInfo := AuthInfo{}
err = json.Unmarshal(decoded, &authInfo)
if err != nil {
return nil, err
}
return &authInfo, nil
}
// GetAuthInfoFromEnvVars allows a user to pass in auth info through environment variables instead of reading from file
func GetAuthInfoFromEnvVars() (*AuthInfo, error) {
authInfo := AuthInfo{}
err := env.Parse(&authInfo)
if err != nil {
return nil, err
}
return &authInfo, nil
}
func mergeAuthInfo(a, b *AuthInfo) *AuthInfo {
// merge non-empty values
// if a particular a, b field are both non-empty, then favor b
if a == nil && b != nil {
return b
}
if b == nil && a != nil {
return a
}
if a == nil && b == nil {
return b
}
elemA := reflect.ValueOf(a).Elem()
elemB := reflect.ValueOf(b).Elem()
typeOfB := elemB.Type()
for i := 0; i < elemB.NumField(); i++ {
valueFieldOfB := elemB.Field(i)
fieldName := typeOfB.Field(i).Name
if valueFieldOfB.String() == "" {
valueFieldOfB.SetString(elemA.FieldByName(fieldName).String())
}
}
return b
}
// GetAuthInfo gets auth info from both file and environment variables
// it merges the info and prioritizes non-empty values from the environment variables
func GetAuthInfo() (*AuthInfo, error) {
authFromFile, errFromFile := GetAuthInfoFromFile()
authFromEnvVars, errFromEnvVars := GetAuthInfoFromEnvVars()
if errFromFile != nil && errFromEnvVars != nil {
return nil, fmt.Errorf("Error retrieving auth info from file: %s\nError retrieving from auth info from environment variables: %s\n", errFromFile, errFromEnvVars)
}
return mergeAuthInfo(authFromFile, authFromEnvVars), nil
}
| [
"\"AZURE_AUTH_LOCATION\""
]
| []
| [
"AZURE_AUTH_LOCATION"
]
| [] | ["AZURE_AUTH_LOCATION"] | go | 1 | 0 | |
main.go | package main
import (
"fmt"
"log"
"net/http"
"os"
"github.com/gofiber/fiber/v2"
)
func main() {
app := fiber.New()
app.Get("/", func(c *fiber.Ctx) error {
return c.SendString("Bem-vindo(a) a calculadora API.")
})
registerHandlers(app)
app.Listen(":8000")
}
func registerHandlers(app *fiber.App) {
http.HandleFunc("/soma/", somaHandler)
http.HandleFunc("/sub/", subHandler)
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", os.Getenv("PORT")), nil))
} | [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
qa/rpc-tests/maxblocksinflight.py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import RobbieconTestFramework
from test_framework.util import *
import logging
'''
In this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. We
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
'''
MAX_REQUESTS = 128
class TestManager(NodeConnCB):
# set up NodeConnCB callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# Log the requests
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
self.log = logging.getLogger("BlockRelayTest")
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
self.connection.rpc.generate(1) # Leave IBD
numBlocksToGenerate = [8, 16, 128, 1024]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1 << 256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
# Wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
print("Round %d: success (total requests: %d)" % (count, total_requests))
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(RobbieconTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("ROBBIECOND", "robbiecond"),
help="Binary to test max block requests behavior")
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager()
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start() # Start up network handling in another thread
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
| []
| []
| [
"ROBBIECOND"
]
| [] | ["ROBBIECOND"] | python | 1 | 0 | |
config/wsgi.py | """
WSGI config for Electivapp project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# electivapp directory.
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'electivapp'))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
src/main/java/DB.java | import org.sql2o.Connection;
import org.sql2o.Sql2o;
import org.sql2o.Sql2oException;
import java.net.URI;
import java.net.URISyntaxException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DB {
private static URI dbUri;
public static Sql2o sql2o;
static {
Logger logger = LoggerFactory.getLogger(DB.class);
try {
if (System.getenv("DATABASE_URL") == null) {
dbUri = new URI("postgres://localhost:5432/wildlife_tracker");
} else {
dbUri = new URI(System.getenv("DATABASE_URL"));
}
int port = dbUri.getPort();
String host = dbUri.getHost();
String path = dbUri.getPath();
String username = (dbUri.getUserInfo() == null) ? DatabaseProps.username : dbUri.getUserInfo().split(":")[0];
String password = (dbUri.getUserInfo() == null) ? DatabaseProps.password : dbUri.getUserInfo().split(":")[1];
sql2o = new Sql2o("jdbc:postgresql://" + host + ":" + port + path, username, password);
} catch (URISyntaxException e ) {
logger.error("Unable to connect to database.");
}
}
//production database
// public static Sql2o sql2o = new Sql2o(
// "jdbc:postgresql://ec2-54-247-79-178.eu-west-1.compute.amazonaws.com:5432/d7esparbsibo8e",
// "zhechbctmwouul",
// "0787c335a37c42a426954f7337810b2ef68c27affb9b96c22bf487d9bd606ebd");
//development database
// public static Sql2o sql2o = new Sql2o(
// "jdbc:postgresql://localhost:5432/wildlife_tracker",
// "User",
// "7181");
} | [
"\"DATABASE_URL\"",
"\"DATABASE_URL\""
]
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | java | 1 | 0 | |
sdk/resourcemanager/appplatform/armappplatform/ze_generated_example_certificates_client_test.go | //go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
package armappplatform_test
import (
"context"
"log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/appplatform/armappplatform"
)
// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/tree/main/specification/appplatform/resource-manager/Microsoft.AppPlatform/preview/2022-05-01-preview/examples/Certificates_Get.json
func ExampleCertificatesClient_Get() {
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
log.Fatalf("failed to obtain a credential: %v", err)
}
ctx := context.Background()
client, err := armappplatform.NewCertificatesClient("00000000-0000-0000-0000-000000000000", cred, nil)
if err != nil {
log.Fatalf("failed to create client: %v", err)
}
res, err := client.Get(ctx,
"myResourceGroup",
"myservice",
"mycertificate",
nil)
if err != nil {
log.Fatalf("failed to finish the request: %v", err)
}
// TODO: use response item
_ = res
}
// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/tree/main/specification/appplatform/resource-manager/Microsoft.AppPlatform/preview/2022-05-01-preview/examples/Certificates_CreateOrUpdate.json
func ExampleCertificatesClient_BeginCreateOrUpdate() {
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
log.Fatalf("failed to obtain a credential: %v", err)
}
ctx := context.Background()
client, err := armappplatform.NewCertificatesClient("00000000-0000-0000-0000-000000000000", cred, nil)
if err != nil {
log.Fatalf("failed to create client: %v", err)
}
poller, err := client.BeginCreateOrUpdate(ctx,
"myResourceGroup",
"myservice",
"mycertificate",
armappplatform.CertificateResource{
Properties: &armappplatform.KeyVaultCertificateProperties{
Type: to.Ptr("KeyVaultCertificate"),
CertVersion: to.Ptr("08a219d06d874795a96db47e06fbb01e"),
KeyVaultCertName: to.Ptr("mycert"),
VaultURI: to.Ptr("https://myvault.vault.azure.net"),
},
},
nil)
if err != nil {
log.Fatalf("failed to finish the request: %v", err)
}
res, err := poller.PollUntilDone(ctx, nil)
if err != nil {
log.Fatalf("failed to pull the result: %v", err)
}
// TODO: use response item
_ = res
}
// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/tree/main/specification/appplatform/resource-manager/Microsoft.AppPlatform/preview/2022-05-01-preview/examples/Certificates_Delete.json
func ExampleCertificatesClient_BeginDelete() {
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
log.Fatalf("failed to obtain a credential: %v", err)
}
ctx := context.Background()
client, err := armappplatform.NewCertificatesClient("00000000-0000-0000-0000-000000000000", cred, nil)
if err != nil {
log.Fatalf("failed to create client: %v", err)
}
poller, err := client.BeginDelete(ctx,
"myResourceGroup",
"myservice",
"mycertificate",
nil)
if err != nil {
log.Fatalf("failed to finish the request: %v", err)
}
_, err = poller.PollUntilDone(ctx, nil)
if err != nil {
log.Fatalf("failed to pull the result: %v", err)
}
}
// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/tree/main/specification/appplatform/resource-manager/Microsoft.AppPlatform/preview/2022-05-01-preview/examples/Certificates_List.json
func ExampleCertificatesClient_NewListPager() {
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
log.Fatalf("failed to obtain a credential: %v", err)
}
ctx := context.Background()
client, err := armappplatform.NewCertificatesClient("00000000-0000-0000-0000-000000000000", cred, nil)
if err != nil {
log.Fatalf("failed to create client: %v", err)
}
pager := client.NewListPager("myResourceGroup",
"myService",
nil)
for pager.More() {
nextResult, err := pager.NextPage(ctx)
if err != nil {
log.Fatalf("failed to advance page: %v", err)
}
for _, v := range nextResult.Value {
// TODO: use page item
_ = v
}
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
env/lib/python3.6/site-packages/django/core/management/commands/runserver.py | import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import (
WSGIServer, get_internal_wsgi_application, run,
)
from django.utils import autoreload
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
leave_locale_alone = True
stealth_options = ('shutdown_message',)
default_addr = '127.0.0.1'
default_addr_ipv6 = '::1'
default_port = '8000'
protocol = 'http'
server_cls = WSGIServer
def add_arguments(self, parser):
parser.add_argument(
'addrport', nargs='?',
help='Optional port number, or ipaddr:port'
)
parser.add_argument(
'--ipv6', '-6', action='store_true', dest='use_ipv6',
help='Tells Django to use an IPv6 address.',
)
parser.add_argument(
'--nothreading', action='store_false', dest='use_threading',
help='Tells Django to NOT use threading.',
)
parser.add_argument(
'--noreload', action='store_false', dest='use_reloader',
help='Tells Django to NOT use the auto-reloader.',
)
def execute(self, *args, **options):
if options['no_color']:
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ["DJANGO_COLORS"] = "nocolor"
super().execute(*args, **options)
def get_handler(self, *args, **options):
"""Return the default WSGI handler for the runner."""
return get_internal_wsgi_application()
def handle(self, *args, **options):
from django.conf import settings
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options['use_ipv6']
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options['addrport']:
self.addr = ''
self.port = self.default_port
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = self.default_addr_ipv6 if self.use_ipv6 else self.default_addr
self._raw_ipv6 = self.use_ipv6
self.run(**options)
def run(self, **options):
"""Run the server, using the autoreloader if needed."""
use_reloader = options['use_reloader']
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
threading = options['use_threading']
# 'shutdown_message' is a stealth option.
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
# Need to check migrations here, so can't use the
# requires_migrations_check attribute.
self.check_migrations()
now = datetime.now().strftime('%B %d, %Y - %X')
self.stdout.write(now)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at %(protocol)s://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"protocol": self.protocol,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading, server_cls=self.server_cls)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = e
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
# Kept for backward compatibility
BaseRunserverCommand = Command
| []
| []
| [
"DJANGO_COLORS"
]
| [] | ["DJANGO_COLORS"] | python | 1 | 0 | |
docker-compose/Worker/settings.py | """
Django settings for pixelwalker project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+3ooebxuz1=jky#jtfdi=^29)9@c66h^$5i^jaq$qsxlh=s3(4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# SECURITY WARNING: don't run with '*' allowed hosts in production!
ALLOWED_HOSTS = [ '*' ]
# Application definition
INSTALLED_APPS = [
'worker.apps.WorkerConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pixelwalker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, "engine", "webgui", "templates"),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pixelwalker.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# Media Files upload to
MEDIA_ROOT = '/media_library'
MEDIA_URL = '/media/'
# Redis
REDIS_PORT = 6379
REDIS_DB = 0
REDIS_HOST = os.environ.get('REDIS_PORT_6379_TCP_ADDR', 'redis')
# Celery settings
CELERY_BROKER_URL = 'amqp://guest:guest@rabbit//'
#: Only add pickle to this list if your broker is secured
#: from unwanted access (see userguide/security.html)
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_RESULT_BACKEND = 'redis://%s:%d/%d' % (REDIS_HOST, REDIS_PORT, REDIS_DB)
CELERY_TASK_SERIALIZER = 'json'
| []
| []
| [
"REDIS_PORT_6379_TCP_ADDR"
]
| [] | ["REDIS_PORT_6379_TCP_ADDR"] | python | 1 | 0 | |
src/chatql_line.py | # coding=utf-8
#
# Licensed under the MIT License
"""Line Webhook Server."""
import os
import json
import logging
import subprocess
from flask import Flask, request, abort
import chatql
from linebot import LineBotApi, WebhookHandler
from linebot.exceptions import InvalidSignatureError
from linebot.models import MessageEvent, TextMessage, TextSendMessage
app = Flask(__name__)
line_bot_api = LineBotApi(os.environ.get('LINE_CHANNEL_ACCESS_TOKEN'))
handler = WebhookHandler(os.environ.get('LINE_CHANNEL_SECRET'))
app.logger.setLevel(logging.INFO)
client = chatql.mongodb_client.MongoClient(
**{"db": os.environ.get('MONGO_DB', 'chatql'),
"host": os.environ.get('MONGO_HOST', '127.0.0.1'),
"port": int(os.environ.get('MONGO_PORT', '27017')),
"username": os.environ.get('MONGO_USER'),
"password": os.environ.get('MONGO_PASSWORD')})
engine = chatql.engine.DialogEngine(client)
exitcode, _ = subprocess.getstatusoutput(str(os.environ.get('SCENARIO_DOWNLOAD_COMMAND')))
if exitcode == 0:
client.import_scenario("scenario.json")
def _create_user(**attributes):
"""Create chatql managed user.
Args:
attributes (dict): (Optional) user attributes dictionary
Return:
ID (str): User ID string managed chatql
"""
query = '''
mutation createUser($optionalArgs: String) {
createUser(optionalArgs: $optionalArgs) {
user {
id
}
}
}
'''
result = chatql.schema.execute(
query,
context={'engine': engine},
variables={"optionalArgs": json.dumps(attributes)})
if result.errors is not None:
app.logger.error(result.errors)
abort(500)
return result.data['createUser']['user']['id']
def _get_user(**attributes):
"""Get chatql managed user.
Args:
attributes (dict): target user attributes dictionary
Return:
ID (str): User ID string managed chatql. return None, case user does not exist.
"""
query = '''
query getUser($optionalArgs: String) {
user(optionalArgs: $optionalArgs) {
id
}
}
'''
result = chatql.schema.execute(
query,
context={'engine': engine},
variables={"optionalArgs": json.dumps(attributes)})
if result.errors is not None:
app.logger.error(result.errors)
abort(500)
if result.data['user']['id'] is None:
return _create_user(**attributes)
return result.data['user']['id']
def _generate_response(request, user_id):
"""Generate response with chatql.
Args:
request (str): User input text
user_id (str): User ID string managed chatql
Return:
response (str): Response string
"""
query = '''
query getResponse($request: String!, $user: ID) {
response(request: $request, user: $user) {
id
text
}
}
'''
result = chatql.schema.execute(
query,
context={'engine': engine},
variables={'request': request, 'user': user_id})
if result.errors is not None:
app.logger.error(result.errors)
abort(500)
return result.data['response']['text']
@app.route("/callback", methods=['POST'])
def callback():
"""Line Webhook interface."""
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
"""Text Message Handler."""
if event.reply_token == '00000000000000000000000000000000':
return
user_id = _get_user(**{"user_id": event.source.user_id})
response = _generate_response(event.message.text, user_id)
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=response))
if __name__ == "__main__":
app.run(host='0.0.0.0', port=os.environ.get('PORT', 5000))
| []
| []
| [
"PORT",
"MONGO_HOST",
"LINE_CHANNEL_ACCESS_TOKEN",
"MONGO_USER",
"SCENARIO_DOWNLOAD_COMMAND",
"MONGO_PORT",
"MONGO_PASSWORD",
"MONGO_DB",
"LINE_CHANNEL_SECRET"
]
| [] | ["PORT", "MONGO_HOST", "LINE_CHANNEL_ACCESS_TOKEN", "MONGO_USER", "SCENARIO_DOWNLOAD_COMMAND", "MONGO_PORT", "MONGO_PASSWORD", "MONGO_DB", "LINE_CHANNEL_SECRET"] | python | 9 | 0 | |
go/vt/vtgate/endtoend/main_test.go | /*
Copyright 2019 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endtoend
import (
"flag"
"fmt"
"os"
"path"
"testing"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/vt/vttest"
vschemapb "vitess.io/vitess/go/vt/proto/vschema"
vttestpb "vitess.io/vitess/go/vt/proto/vttest"
)
var (
cluster *vttest.LocalCluster
vtParams mysql.ConnParams
mysqlParams mysql.ConnParams
grpcAddress string
schema = `
create table t1(
id1 bigint,
id2 bigint,
primary key(id1)
) Engine=InnoDB;
create table t1_id2_idx(
id2 bigint,
keyspace_id varbinary(10),
primary key(id2)
) Engine=InnoDB;
create table vstream_test(
id bigint,
val bigint,
primary key(id)
) Engine=InnoDB;
create table aggr_test(
id bigint,
val1 varbinary(16),
val2 bigint,
primary key(id)
) Engine=InnoDB;
`
vschema = &vschemapb.Keyspace{
Sharded: true,
Vindexes: map[string]*vschemapb.Vindex{
"hash": {
Type: "hash",
},
"t1_id2_vdx": {
Type: "consistent_lookup_unique",
Params: map[string]string{
"table": "t1_id2_idx",
"from": "id2",
"to": "keyspace_id",
},
Owner: "t1",
},
},
Tables: map[string]*vschemapb.Table{
"t1": {
ColumnVindexes: []*vschemapb.ColumnVindex{{
Column: "id1",
Name: "hash",
}, {
Column: "id2",
Name: "t1_id2_vdx",
}},
},
"t1_id2_idx": {
ColumnVindexes: []*vschemapb.ColumnVindex{{
Column: "id2",
Name: "hash",
}},
},
"vstream_test": {
ColumnVindexes: []*vschemapb.ColumnVindex{{
Column: "id",
Name: "hash",
}},
},
"aggr_test": {
ColumnVindexes: []*vschemapb.ColumnVindex{{
Column: "id",
Name: "hash",
}},
},
},
}
)
func TestMain(m *testing.M) {
flag.Parse()
exitCode := func() int {
var cfg vttest.Config
cfg.Topology = &vttestpb.VTTestTopology{
Keyspaces: []*vttestpb.Keyspace{{
Name: "ks",
Shards: []*vttestpb.Shard{{
Name: "-80",
}, {
Name: "80-",
}},
}},
}
cfg.ExtraMyCnf = []string{path.Join(os.Getenv("VTTOP"), "config/mycnf/rbr.cnf")}
if err := cfg.InitSchemas("ks", schema, vschema); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.RemoveAll(cfg.SchemaDir)
return 1
}
defer os.RemoveAll(cfg.SchemaDir)
cluster = &vttest.LocalCluster{
Config: cfg,
}
if err := cluster.Setup(); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
cluster.TearDown()
return 1
}
defer cluster.TearDown()
vtParams = mysql.ConnParams{
Host: "localhost",
Port: cluster.Env.PortForProtocol("vtcombo_mysql_port", ""),
}
mysqlParams = cluster.MySQLConnParams()
grpcAddress = fmt.Sprintf("localhost:%d", cluster.Env.PortForProtocol("vtcombo", "grpc"))
return m.Run()
}()
os.Exit(exitCode)
}
| [
"\"VTTOP\""
]
| []
| [
"VTTOP"
]
| [] | ["VTTOP"] | go | 1 | 0 | |
test/runtest.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import random
import re
import setproctitle
import string
import subprocess
import sys
import threading
import time
from collections import defaultdict, namedtuple, OrderedDict
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import pytest
import ray
import ray.ray_constants as ray_constants
import ray.test.cluster_utils
import ray.test.test_utils
logger = logging.getLogger(__name__)
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently np.testing.assert_equal
# fails because we do not properly handle different numerical
# types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), ("Objects {} "
"and {} are "
"different.".format(
obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), ("Objects {} and {} are named tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
if sys.version_info >= (3, 0):
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
long_extras = [
long(0), # noqa: E501,F821
np.array([
["hi", u"hi"],
[1.3, long(1)] # noqa: E501,F821
])
]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world", u"\xff\xfe\x9c\x001\x000\x00",
None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
}
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = namedtuple("Point", ["x", "y"])
NamedTupleExample = namedtuple("Example",
"field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3])
]
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = (
[{
obj: obj
} for obj in PRIMITIVE_OBJECTS
if (obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0: obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS
@pytest.fixture
def ray_start():
# Start the Ray processes.
ray.init(num_cpus=1)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def shutdown_only():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
def test_passing_arguments_by_value(ray_start):
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
def test_ray_recursive_objects(ray_start):
class ClassA(object):
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1]
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
ray.put(obj)
def test_passing_arguments_by_value_out_of_the_box(ray_start):
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
assert ray.get(f.remote(temp))() == 1
assert ray.get(f.remote(lambda x: x + 1))(3) == 4
# Test sets.
assert ray.get(f.remote(set())) == set()
s = {1, (1, 2, "hi")}
assert ray.get(f.remote(s)) == s
# Test types.
assert ray.get(f.remote(int)) == int
assert ray.get(f.remote(float)) == float
assert ray.get(f.remote(str)) == str
class Foo(object):
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
def test_putting_object_that_closes_over_object_id(ray_start):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
class Foo(object):
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
with pytest.raises(ray.raylet.common_error):
ray.put(f)
def test_put_get(shutdown_only):
ray.init(num_cpus=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
def test_custom_serializers(shutdown_only):
ray.init(num_cpus=1)
class Foo(object):
def __init__(self):
self.x = 3
def custom_serializer(obj):
return 3, "string1", type(obj).__name__
def custom_deserializer(serialized_obj):
return serialized_obj, "string2"
ray.register_custom_serializer(
Foo, serializer=custom_serializer, deserializer=custom_deserializer)
assert ray.get(ray.put(Foo())) == ((3, "string1", Foo.__name__), "string2")
class Bar(object):
def __init__(self):
self.x = 3
ray.register_custom_serializer(
Bar, serializer=custom_serializer, deserializer=custom_deserializer)
@ray.remote
def f():
return Bar()
assert ray.get(f.remote()) == ((3, "string1", Bar.__name__), "string2")
def test_serialization_final_fallback(ray_start):
pytest.importorskip("catboost")
# This test will only run when "catboost" is installed.
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=2,
depth=2,
learning_rate=1,
loss_function="Logloss",
logging_level="Verbose")
reconstructed_model = ray.get(ray.put(model))
assert set(model.get_params().items()) == set(
reconstructed_model.get_params().items())
def test_register_class(shutdown_only):
ray.init(num_cpus=2)
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass(object):
pass
ray.get(ray.put(TempClass()))
# Test subtypes of dictionaries.
value_before = OrderedDict([("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
assert value_before == ray.get(object_id)
value_before = defaultdict(lambda: 0, [("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
assert value_before == ray.get(object_id)
value_before = defaultdict(lambda: [], [("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
assert value_before == ray.get(object_id)
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
foo = ray.get(f.remote(Foo(7)))
assert foo == Foo(7)
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# assert regex == new_regex
# Instead, we do this:
assert regex.pattern == new_regex.pattern
# Test returning custom classes created on workers.
@ray.remote
def g():
return SubQux(), Qux()
subqux, qux = ray.get(g.remote())
assert subqux.objs[2].foo.value == 0
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass(object):
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
assert ray.get(h2.remote(10)).value == 10
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
@ray.remote
def k():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
def test_keyword_args(shutdown_only):
@ray.remote
def keyword_fct1(a, b="hello"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct2(a="hello", b="world"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct3(a, b, c="hello", d="world"):
return "{} {} {} {}".format(a, b, c, d)
ray.init(num_cpus=1)
x = keyword_fct1.remote(1)
assert ray.get(x) == "1 hello"
x = keyword_fct1.remote(1, "hi")
assert ray.get(x) == "1 hi"
x = keyword_fct1.remote(1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct1.remote(a=1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct2.remote(a="w", b="hi")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(b="hi", a="w")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(a="w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote(b="hi")
assert ray.get(x) == "hello hi"
x = keyword_fct2.remote("w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote("w", "hi")
assert ray.get(x) == "w hi"
x = keyword_fct3.remote(0, 1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(a=0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, d="hi", c="w")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, c="w")
assert ray.get(x) == "0 1 w world"
x = keyword_fct3.remote(0, 1, d="hi")
assert ray.get(x) == "0 1 hello hi"
x = keyword_fct3.remote(0, 1)
assert ray.get(x) == "0 1 hello world"
x = keyword_fct3.remote(a=0, b=1)
assert ray.get(x) == "0 1 hello world"
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f1.remote(3)
with pytest.raises(Exception):
f1.remote(x=3)
with pytest.raises(Exception):
f2.remote(0, w=0)
with pytest.raises(Exception):
f2.remote(3, x=3)
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
assert ray.get(f3.remote(4)) == 4
def test_variable_number_of_args(shutdown_only):
@ray.remote
def varargs_fct1(*a):
return " ".join(map(str, a))
@ray.remote
def varargs_fct2(a, *b):
return " ".join(map(str, b))
try:
@ray.remote
def kwargs_throw_exception(**c):
return ()
kwargs_exception_thrown = False
except Exception:
kwargs_exception_thrown = True
ray.init(num_cpus=1)
x = varargs_fct1.remote(0, 1, 2)
assert ray.get(x) == "0 1 2"
x = varargs_fct2.remote(0, 1, 2)
assert ray.get(x) == "1 2"
assert kwargs_exception_thrown
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
assert ray.get(f1.remote()) == ()
assert ray.get(f1.remote(1)) == (1, )
assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)
with pytest.raises(Exception):
f2.remote()
with pytest.raises(Exception):
f2.remote(1)
assert ray.get(f2.remote(1, 2)) == (1, 2, ())
assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))
assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))
def testNoArgs(self):
@ray.remote
def no_op():
pass
self.init_ray()
ray.get(no_op.remote())
def test_defining_remote_functions(shutdown_only):
ray.init(num_cpus=3)
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
assert ray.get(f.remote(0)) == 1
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
assert val in [1, 10]
if val == 10:
break
else:
logger.info("Still using old definition of f, trying again.")
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert_equal(ray.get(h.remote()), np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def k2(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(k2.remote(x))
assert ray.get(k.remote(1)) == 2
assert ray.get(k2.remote(1)) == 2
assert ray.get(m.remote(1)) == 2
def test_submit_api(shutdown_only):
ray.init(num_cpus=1, num_gpus=1, resources={"Custom": 1})
@ray.remote
def f(n):
return list(range(n))
@ray.remote
def g():
return ray.get_gpu_ids()
assert f._remote([0], num_return_vals=0) is None
id1 = f._remote(args=[1], num_return_vals=1)
assert ray.get(id1) == [0]
id1, id2 = f._remote(args=[2], num_return_vals=2)
assert ray.get([id1, id2]) == [0, 1]
id1, id2, id3 = f._remote(args=[3], num_return_vals=3)
assert ray.get([id1, id2, id3]) == [0, 1, 2]
assert ray.get(
g._remote(
args=[], num_cpus=1, num_gpus=1,
resources={"Custom": 1})) == [0]
infeasible_id = g._remote(args=[], resources={"NonexistentCustom": 1})
ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)
assert len(ready_ids) == 0
assert len(remaining_ids) == 1
@ray.remote
class Actor(object):
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
def gpu_ids(self):
return ray.get_gpu_ids()
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_return_vals=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
def test_get_multiple(shutdown_only):
ray.init(num_cpus=1)
object_ids = [ray.put(i) for i in range(10)]
assert ray.get(object_ids) == list(range(10))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
assert results == indices
def test_get_multiple_experimental(shutdown_only):
ray.init(num_cpus=1)
object_ids = [ray.put(i) for i in range(10)]
object_ids_tuple = tuple(object_ids)
assert ray.experimental.get(object_ids_tuple) == list(range(10))
object_ids_nparray = np.array(object_ids)
assert ray.experimental.get(object_ids_nparray) == list(range(10))
def test_get_dict(shutdown_only):
ray.init(num_cpus=1)
d = {str(i): ray.put(i) for i in range(5)}
for i in range(5, 10):
d[str(i)] = i
result = ray.experimental.get(d)
expected = {str(i): i for i in range(10)}
assert result == expected
def test_wait(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
ready_ids, remaining_ids = ray.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)
assert set(ready_ids) == set(objectids)
assert remaining_ids == []
objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)
assert time.time() - start_time < 2
assert len(ready_ids) == 3
assert len(remaining_ids) == 1
ray.wait(objectids)
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)
assert time.time() - start_time < 5
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
with pytest.raises(Exception):
ray.wait([x, x])
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
assert ready_ids == []
assert remaining_ids == []
# Test semantics of num_returns with no timeout.
oids = [ray.put(i) for i in range(10)]
(found, rest) = ray.wait(oids, num_returns=2)
assert len(found) == 2
assert len(rest) == 8
# Verify that incorrect usage raises a TypeError.
x = ray.put(1)
with pytest.raises(TypeError):
ray.wait(x)
with pytest.raises(TypeError):
ray.wait(1)
with pytest.raises(TypeError):
ray.wait([1])
def test_wait_iterables(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(shutdown_only):
ray.init(num_cpus=1)
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
def f():
with ray.profile(
"custom_event",
extra_data={"name": "custom name"}) as ray_prof:
ray_prof.set_attribute("key", "value")
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
if time.time() - start_time > timeout_seconds:
raise Exception("Timed out while waiting for information in "
"profile table.")
profile_data = ray.global_state.chrome_tracing_dump()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"worker_idle",
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
@pytest.fixture()
def ray_start_cluster():
cluster = ray.test.cluster_utils.Cluster()
yield cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.global_state.chrome_tracing_object_transfer_dump()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(shutdown_only):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
ray.init(num_cpus=1)
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(shutdown_only):
ray.init(num_cpus=1)
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
def test_multithreading(shutdown_only):
# This test requires at least 2 CPUs to finish since the worker does not
# relase resources when joining the threads.
ray.init(num_cpus=2)
def run_test_in_multi_threads(test_case, num_threads=20, num_repeats=50):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
@ray.remote
class Echo(object):
def echo(self, value):
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor(object):
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000,
)
assert len(ready) == len(wait_objects)
for _ in range(50):
num = 20
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(20)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
def test_free_objects_multi_node(ray_start_cluster):
# This test will do following:
# 1. Create 3 raylets that each hold an actor.
# 2. Each actor creates an object which is the deletion target.
# 3. Invoke 64 methods on each actor to flush plasma client.
# 4. After flushing, the plasma client releases the targets.
# 5. Check that the deletion targets have been deleted.
# Caution: if remote functions are used instead of actor methods,
# one raylet may create more than one worker to execute the
# tasks, so the flushing operations may be executed in different
# workers and the plasma client holding the deletion target
# may not be flushed.
cluster = ray_start_cluster
config = json.dumps({"object_manager_repeated_push_delay_ms": 1000})
for i in range(3):
cluster.add_node(
num_cpus=1,
resources={"Custom{}".format(i): 1},
_internal_config=config)
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"Custom0": 1})
class ActorOnNode0(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"Custom1": 1})
class ActorOnNode1(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"Custom2": 1})
class ActorOnNode2(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
def create(actors):
a = actors[0].get.remote()
b = actors[1].get.remote()
c = actors[2].get.remote()
(l1, l2) = ray.wait([a, b, c], num_returns=3)
assert len(l1) == 3
assert len(l2) == 0
return (a, b, c)
def flush(actors):
# Flush the Release History.
# Current Plasma Client Cache will maintain 64-item list.
# If the number changed, this will fail.
logger.info("Start Flush!")
for i in range(64):
ray.get([actor.get.remote() for actor in actors])
logger.info("Flush finished!")
def run_one_test(actors, local_only):
(a, b, c) = create(actors)
# The three objects should be generated on different object stores.
assert ray.get(a) != ray.get(b)
assert ray.get(a) != ray.get(c)
assert ray.get(c) != ray.get(b)
ray.internal.free([a, b, c], local_only=local_only)
flush(actors)
return (a, b, c)
actors = [
ActorOnNode0.remote(),
ActorOnNode1.remote(),
ActorOnNode2.remote()
]
# Case 1: run this local_only=False. All 3 objects will be deleted.
(a, b, c) = run_one_test(actors, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)
# All the objects are deleted.
assert len(l1) == 0
assert len(l2) == 3
# Case 2: run this local_only=True. Only 1 object will be deleted.
(a, b, c) = run_one_test(actors, True)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)
# One object is deleted and 2 objects are not.
assert len(l1) == 2
assert len(l2) == 1
# The deleted object will have the same store with the driver.
local_return = ray.worker.global_worker.plasma_client.store_socket_name
for object_id in l1:
assert ray.get(object_id) != local_return
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return by value.
assert_equal(xref, np.ones([3, 4, 5]))
# Check that ray.get is the identity.
assert_equal(xref, ray.get(xref))
y = np.random.normal(size=[11, 12])
# Check that ray.put is the identity.
assert_equal(y, ray.put(y))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert_equal(aref, np.array([0, 0]))
bref = local_mode_g.remote(aref)
# Make sure local_mode_g does not mutate aref.
assert_equal(aref, np.array([0, 0]))
assert_equal(bref, np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert_equal(ready, object_ids[:num_returns])
assert_equal(remaining, object_ids[num_returns:])
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
test_actor = LocalModeTestClass.remote(np.arange(10))
# Remote actor functions should return by value
assert_equal(test_actor.get_array.remote(), np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert_equal(test_array, np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert_equal(test_array, test_actor.get_array.remote())
# Check that actor handles work in Python mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 0.3
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 0.3
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
@ray.remote(num_gpus=0)
def f0():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=1)
def f1():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=2)
def f2():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=3)
def f3():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 3
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=4)
def f4():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 4
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=5)
def f5():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 5
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.1)
return os.getpid()
start_time = time.time()
while True:
if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:
break
if time.time() > start_time + 10:
raise Exception("Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
assert set_of_ids == {(i, ) for i in range(10)}
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
assert set(all_ids) == set(range(10))
remaining = [f5.remote() for _ in range(20)]
for _ in range(10):
t1 = time.time()
ready, remaining = ray.wait(remaining, num_returns=2)
t2 = time.time()
# There are only 10 GPUs, and each task uses 2 GPUs, so there
# should only be 2 tasks scheduled at a given time, so if we wait
# for 2 tasks to finish, then it should take at least 0.1 seconds
# for each pair of tasks to finish.
assert t2 - t1 > 0.09
list_of_ids = ray.get(ready)
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
# Commenting out the below assert because it seems to fail a lot.
# assert set(all_ids) == set(range(10))
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
@ray.remote(num_cpus=0)
def f():
return 1
# The task should be able to execute.
ray.get(f.remote())
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(redis_address=cluster.redis_address)
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.plasma_client.store_socket_name
# Make sure tasks and actors run on the remote local scheduler.
a = Foo.remote()
assert ray.get(a.method.remote()) != local_plasma
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1(object):
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2(object):
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_local_schedulers(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific local schedulers, and we will check that they are assigned
# to the correct local schedulers.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(redis_address=cluster.redis_address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and local schedulers (at least right now), this can be
# used to identify which local scheduler the task was assigned to.
# This must be run on the zeroth local scheduler.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first local scheduler.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the second local scheduler.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first or second local scheduler.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the zeroth or second local scheduler.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.plasma_client.store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.global_state.client_table()
store_names = []
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 0
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 5
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource": 0})
cluster.add_node(num_cpus=3, resources={"CustomResource": 1})
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.plasma_client.store_socket_name
# The f tasks should be scheduled on both local schedulers.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The g tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(local_scheduler_ids) == 1
assert list(local_scheduler_ids)[0] != local_plasma
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 1,
"CustomResource2": 2
})
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 3,
"CustomResource2": 4
})
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
# The f and g tasks should be scheduled on both local schedulers.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The h tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(local_scheduler_ids) == 1
assert list(local_scheduler_ids)[0] != local_plasma
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(shutdown_only):
ray.init(num_cpus=1)
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.test.test_utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.test.test_utils.wait_for_pid_to_exit(pid1)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_nodes,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info("Counts are {}.".format(counts))
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(ray_start_cluster):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 7
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.plasma_client.store_socket_name
attempt_to_load_balance(f, [], 100, num_nodes, 10)
attempt_to_load_balance(f, [], 1000, num_nodes, 100)
def test_load_balancing_with_dependencies(ray_start_cluster):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner even when the tasks have
# dependencies.
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.plasma_client.store_socket_name
# This object will be local to one of the local schedulers. Make sure
# this doesn't prevent tasks from being scheduled on other local
# schedulers.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_nodes, 25)
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.task_table()) >= num_tasks:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.object_table()) >= num_objects:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_global_state_api(shutdown_only):
with pytest.raises(Exception):
ray.global_state.object_table()
with pytest.raises(Exception):
ray.global_state.task_table()
with pytest.raises(Exception):
ray.global_state.client_table()
with pytest.raises(Exception):
ray.global_state.function_table()
with pytest.raises(Exception):
ray.global_state.log_files()
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
resources = {"CPU": 5, "GPU": 3, "CustomResource": 1}
assert ray.global_state.cluster_resources() == resources
assert ray.global_state.object_table() == {}
driver_id = ray.experimental.state.binary_to_hex(
ray.worker.global_worker.worker_id)
driver_task_id = ray.experimental.state.binary_to_hex(
ray.worker.global_worker.current_task_id.id())
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
task_spec = task_table[driver_task_id]["TaskSpec"]
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == ray_constants.ID_SIZE * "ff"
assert task_spec["Args"] == []
assert task_spec["DriverID"] == driver_id
assert task_spec["FunctionID"] == ray_constants.ID_SIZE * "ff"
assert task_spec["ReturnObjectIDs"] == []
client_table = ray.global_state.client_table()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
wait_for_num_tasks(1 + 1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1 + 1
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
function_table = ray.global_state.function_table()
task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == ray_constants.ID_SIZE * "ff"
assert task_spec["Args"] == [1, "hi", x_id]
assert task_spec["DriverID"] == driver_id
assert task_spec["ReturnObjectIDs"] == [result_id]
function_table_entry = function_table[task_spec["FunctionID"]]
assert function_table_entry["Name"] == "runtest.f"
assert function_table_entry["DriverID"] == driver_id
assert function_table_entry["Module"] == "runtest"
assert task_table[task_id] == ray.global_state.task_table(task_id)
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.global_state.object_table()
tables_ready = (object_table[x_id]["ManagerIDs"] is not None and
object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for object table to "
"update.")
object_table = ray.global_state.object_table()
assert len(object_table) == 2
assert object_table[x_id]["IsEviction"][0] is False
assert object_table[result_id]["IsEviction"][0] is False
assert object_table[x_id] == ray.global_state.object_table(x_id)
object_table_entry = ray.global_state.object_table(result_id)
assert object_table[result_id] == object_table_entry
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_log_file_api(shutdown_only):
ray.init(num_cpus=1, redirect_worker_output=True)
message = "unique message"
@ray.remote
def f():
logger.info(message)
# The call to sys.stdout.flush() seems to be necessary when using
# the system Python 2.7 on Ubuntu.
sys.stdout.flush()
ray.get(f.remote())
# Make sure that the message appears in the log files.
start_time = time.time()
found_message = False
while time.time() - start_time < 10:
log_files = ray.global_state.log_files()
for ip, innerdict in log_files.items():
for filename, contents in innerdict.items():
contents_str = "".join(contents)
if message in contents_str:
found_message = True
if found_message:
break
time.sleep(0.1)
assert found_message is True
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_workers(shutdown_only):
num_workers = 3
ray.init(redirect_worker_output=True, num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
worker_info = ray.global_state.workers()
assert len(worker_info) >= num_workers
for worker_id, info in worker_info.items():
assert "node_ip_address" in info
assert "plasma_store_socket" in info
assert "stderr_file" in info
assert "stdout_file" in info
def test_specific_driver_id():
dummy_driver_id = ray.ObjectID(b"00112233445566778899")
ray.init(driver_id=dummy_driver_id)
@ray.remote
def f():
return ray.worker.global_worker.task_driver_id.id()
assert_equal(dummy_driver_id.id(), ray.worker.global_worker.worker_id)
task_driver_id = ray.get(f.remote())
assert_equal(dummy_driver_id.id(), task_driver_id)
ray.shutdown()
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(num_cpus=1, object_store_memory=10**8)
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.plasma_client.contains(
ray.pyarrow.plasma.ObjectID(x_id.id()))
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
class UniqueName(object):
def __init__(self):
assert setproctitle.getproctitle() == "ray_UniqueName:__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray_UniqueName:f()"
@ray.remote
def unique_1():
assert setproctitle.getproctitle() == "ray_worker:runtest.unique_1()"
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_duplicate_error_messages(shutdown_only):
ray.init(num_cpus=0)
driver_id = ray.ray_constants.NIL_JOB_ID.id()
error_data = ray.gcs_utils.construct_error_message(driver_id, "test",
"message", 0)
# Push the same message to the GCS twice (they are the same because we
# do not include a timestamp).
r = ray.worker.global_worker.redis_client
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id,
error_data)
# Before https://github.com/ray-project/ray/pull/3316 this would
# give an error
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id,
error_data)
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(shutdown_only):
ray.init(num_cpus=2)
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
# Attempt to parse the "ray stack" call.
output = ray.utils.decode(subprocess.check_output(["ray", "stack"]))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"RAY_USE_NEW_GCS",
"TRAVIS"
]
| [] | ["CUDA_VISIBLE_DEVICES", "RAY_USE_NEW_GCS", "TRAVIS"] | python | 3 | 0 | |
test/test_failover_integration.py | import logging
import os
import time
from kafka import SimpleClient, SimpleConsumer, KeyedProducer
from kafka.errors import (
FailedPayloadsError, ConnectionError, RequestTimedOutError,
NotLeaderForPartitionError)
from kafka.producer.base import Producer
from kafka.structs import TopicPartition
from test.fixtures import ZookeeperFixture, KafkaFixture
from test.testutil import KafkaIntegrationTestCase, random_string
log = logging.getLogger(__name__)
class TestFailover(KafkaIntegrationTestCase):
create_client = False
def setUp(self):
if not os.environ.get('KAFKA_VERSION'):
self.skipTest('integration test requires KAFKA_VERSION')
zk_chroot = random_string(10)
replicas = 3
partitions = 3
# mini zookeeper, 3 kafka brokers
self.zk = ZookeeperFixture.instance()
kk_kwargs = {'zk_chroot': zk_chroot, 'replicas': replicas,
'partitions': partitions}
self.brokers = [KafkaFixture.instance(i, self.zk, **kk_kwargs)
for i in range(replicas)]
hosts = ['%s:%d' % (b.host, b.port) for b in self.brokers]
self.client = SimpleClient(hosts, timeout=2)
super(TestFailover, self).setUp()
def tearDown(self):
super(TestFailover, self).tearDown()
if not os.environ.get('KAFKA_VERSION'):
return
self.client.close()
for broker in self.brokers:
broker.close()
self.zk.close()
def test_switch_leader(self):
topic = self.topic
partition = 0
# Testing the base Producer class here so that we can easily send
# messages to a specific partition, kill the leader for that partition
# and check that after another broker takes leadership the producer
# is able to resume sending messages
# require that the server commit messages to all in-sync replicas
# so that failover doesn't lose any messages on server-side
# and we can assert that server-side message count equals client-side
producer = Producer(self.client, async_send=False,
req_acks=Producer.ACK_AFTER_CLUSTER_COMMIT)
# Send 100 random messages to a specific partition
self._send_random_messages(producer, topic, partition, 100)
# kill leader for partition
self._kill_leader(topic, partition)
# expect failure, but don't wait more than 60 secs to recover
recovered = False
started = time.time()
timeout = 60
while not recovered and (time.time() - started) < timeout:
try:
log.debug("attempting to send 'success' message after leader killed")
producer.send_messages(topic, partition, b'success')
log.debug("success!")
recovered = True
except (FailedPayloadsError, ConnectionError, RequestTimedOutError,
NotLeaderForPartitionError):
log.debug("caught exception sending message -- will retry")
continue
# Verify we successfully sent the message
self.assertTrue(recovered)
# send some more messages to new leader
self._send_random_messages(producer, topic, partition, 100)
# count number of messages
# Should be equal to 100 before + 1 recovery + 100 after
# at_least=True because exactly once delivery isn't really a thing
self.assert_message_count(topic, 201, partitions=(partition,),
at_least=True)
def test_switch_leader_async(self):
topic = self.topic
partition = 0
# Test the base class Producer -- send_messages to a specific partition
producer = Producer(self.client, async_send=True,
batch_send_every_n=15,
batch_send_every_t=3,
req_acks=Producer.ACK_AFTER_CLUSTER_COMMIT,
async_log_messages_on_error=False)
# Send 10 random messages
self._send_random_messages(producer, topic, partition, 10)
self._send_random_messages(producer, topic, partition + 1, 10)
# kill leader for partition
self._kill_leader(topic, partition)
log.debug("attempting to send 'success' message after leader killed")
# in async mode, this should return immediately
producer.send_messages(topic, partition, b'success')
producer.send_messages(topic, partition + 1, b'success')
# send to new leader
self._send_random_messages(producer, topic, partition, 10)
self._send_random_messages(producer, topic, partition + 1, 10)
# Stop the producer and wait for it to shutdown
producer.stop()
started = time.time()
timeout = 60
while (time.time() - started) < timeout:
if not producer.thread.is_alive():
break
time.sleep(0.1)
else:
self.fail('timeout waiting for producer queue to empty')
# count number of messages
# Should be equal to 10 before + 1 recovery + 10 after
# at_least=True because exactly once delivery isn't really a thing
self.assert_message_count(topic, 21, partitions=(partition,),
at_least=True)
self.assert_message_count(topic, 21, partitions=(partition + 1,),
at_least=True)
def test_switch_leader_keyed_producer(self):
topic = self.topic
producer = KeyedProducer(self.client, async_send=False)
# Send 10 random messages
for _ in range(10):
key = random_string(3).encode('utf-8')
msg = random_string(10).encode('utf-8')
producer.send_messages(topic, key, msg)
# kill leader for partition 0
self._kill_leader(topic, 0)
recovered = False
started = time.time()
timeout = 60
while not recovered and (time.time() - started) < timeout:
try:
key = random_string(3).encode('utf-8')
msg = random_string(10).encode('utf-8')
producer.send_messages(topic, key, msg)
if producer.partitioners[topic].partition(key) == 0:
recovered = True
except (FailedPayloadsError, ConnectionError, RequestTimedOutError,
NotLeaderForPartitionError):
log.debug("caught exception sending message -- will retry")
continue
# Verify we successfully sent the message
self.assertTrue(recovered)
# send some more messages just to make sure no more exceptions
for _ in range(10):
key = random_string(3).encode('utf-8')
msg = random_string(10).encode('utf-8')
producer.send_messages(topic, key, msg)
def test_switch_leader_simple_consumer(self):
producer = Producer(self.client, async_send=False)
consumer = SimpleConsumer(self.client, None, self.topic, partitions=None, auto_commit=False, iter_timeout=10)
self._send_random_messages(producer, self.topic, 0, 2)
consumer.get_messages()
self._kill_leader(self.topic, 0)
consumer.get_messages()
def _send_random_messages(self, producer, topic, partition, n):
for j in range(n):
msg = 'msg {0}: {1}'.format(j, random_string(10))
log.debug('_send_random_message %s to %s:%d', msg, topic, partition)
while True:
try:
producer.send_messages(topic, partition, msg.encode('utf-8'))
except Exception:
log.exception('failure in _send_random_messages - retrying')
continue
else:
break
def _kill_leader(self, topic, partition):
leader = self.client.topics_to_brokers[TopicPartition(topic, partition)]
broker = self.brokers[leader.nodeId]
broker.close()
return broker
def assert_message_count(self, topic, check_count, timeout=10,
partitions=None, at_least=False):
hosts = ','.join(['%s:%d' % (broker.host, broker.port)
for broker in self.brokers])
client = SimpleClient(hosts, timeout=2)
consumer = SimpleConsumer(client, None, topic,
partitions=partitions,
auto_commit=False,
iter_timeout=timeout)
started_at = time.time()
pending = -1
while pending < check_count and (time.time() - started_at < timeout):
try:
pending = consumer.pending(partitions)
except FailedPayloadsError:
pass
time.sleep(0.5)
consumer.stop()
client.close()
if pending < check_count:
self.fail('Too few pending messages: found %d, expected %d' %
(pending, check_count))
elif pending > check_count and not at_least:
self.fail('Too many pending messages: found %d, expected %d' %
(pending, check_count))
return True
| []
| []
| [
"KAFKA_VERSION"
]
| [] | ["KAFKA_VERSION"] | python | 1 | 0 | |
asdf.go | package main
import (
"os"
"path"
)
// GetAsdfDataPath returns the path for asdf
func GetAsdfDataPath() string {
dir := os.Getenv("ASDF_DATA_DIR")
if dir != "" {
return dir
}
return path.Join(os.Getenv("HOME"), ".asdf")
}
// GetPluginPath returns the path of the plugin
func GetPluginPath(plugin string) string {
return path.Join(GetAsdfDataPath(), "plugins", plugin)
}
| [
"\"ASDF_DATA_DIR\"",
"\"HOME\""
]
| []
| [
"ASDF_DATA_DIR",
"HOME"
]
| [] | ["ASDF_DATA_DIR", "HOME"] | go | 2 | 0 | |
collipa/helpers.py | # coding: utf-8
import os
import errno
import re
import time
import random
import logging
import math
from datetime import datetime
from HTMLParser import HTMLParser
from collipa import config
from collipa.libs import xss
from collipa.libs.pil import Image
class UsernameParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.names = []
def handle_starttag(self, tag, attrs):
if tag != 'a':
return
for variable, value in attrs:
if variable != 'data-username':
continue
self.names.append(value)
def get_day(timestamp):
FORY = '%d'
os.environ["TZ"] = config.default_timezone
time.tzset()
str = time.strftime(FORY, time.localtime(timestamp))
return str
'''
def get_month(timestamp):
FORY = '%b'
os.environ["TZ"] = config.default_timezone
time.tzset()
str = time.strftime(FORY, time.localtime(timestamp))
return str
'''
def format_date(timestamp):
FORY = '%Y-%m-%d @ %H:%M'
FORM = '%m-%d @ %H:%M'
FORH = '%H:%M'
os.environ["TZ"] = config.default_timezone
time.tzset()
r_time = time.strftime(FORM, time.localtime(timestamp))
h_time = time.strftime(FORH, time.localtime(timestamp))
now = int(time.time())
t = now - timestamp
if t < 60:
format_str = '刚刚'
elif t < 60 * 60:
min = t / 60
format_str = '%d 分钟前' % min
elif t < 60 * 60 * 24:
h = t / (60 * 60)
format_str = '%d 小时前 %s' % (h, h_time)
elif t < 60 * 60 * 24 * 3:
d = t / (60 * 60 * 24)
if d == 1:
format_str = '昨天 ' + r_time
else:
format_str = '前天 ' + r_time
else:
format_str = time.strftime(FORY, time.localtime(timestamp))
return format_str
def format_date2(timestamp):
FORY = '%Y-%m-%d @ %H:%M'
os.environ["TZ"] = config.default_timezone
time.tzset()
format_str = time.strftime(FORY, time.localtime(timestamp))
return format_str
def get_year():
timestamp = int(time.time())
FORY = '%Y'
os.environ["TZ"] = config.default_timezone
time.tzset()
format_str = time.strftime(FORY, time.localtime(timestamp))
return format_str
def get_month():
timestamp = int(time.time())
FORY = '%m'
os.environ["TZ"] = config.default_timezone
time.tzset()
format_str = time.strftime(FORY, time.localtime(timestamp))
return format_str
def format_text(text):
floor = ur'#(\d+)楼\s'
for match in re.finditer(floor, text):
url = match.group(1)
floor = match.group(0)
nurl = '<a class="toreply" href="#;">#<span class="tofloor">%s</span>楼 </a>' % (url)
text = text.replace(floor, nurl)
return text
def reply_content(text):
return text[0:26]
def regex(pattern, data, flags=0):
if isinstance(pattern, basestring):
pattern = re.compile(pattern, flags)
return pattern.match(data)
def email(data):
pattern = r'^.+@[^.].*\.[a-z]{2,10}$'
return regex(pattern, data, re.IGNORECASE)
def url(data):
pattern = (
r'(?i)^((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}'
r'/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+'
r'|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))$')
return regex(pattern, data, re.IGNORECASE)
def username(data):
pattern = r'^[a-zA-Z0-9]+$'
return regex(pattern, data)
def get_mentions(content):
username_re = re.compile(r'@(?P<username>[A-Za-z0-9]+)(</p>| |\n|\s|$)')
match = username_re.finditer(content)
return [(m.start(), m.group('username')) for m in match] if match else []
def strip_tags(html):
if html:
html = html.strip()
html = html.strip("\n")
result = []
parse = HTMLParser()
parse.handle_data = result.append
parse.feed(html)
parse.close()
return "".join(result)
return ''
def strip_xss_tags(html):
return xss.parse_html(html)
def filter_img_tags(htmlstr):
re_img = re.compile('<\s*img[^>]*>', re.L)
re_br = re.compile('<br\s*?/?>')
s = re_img.sub('', htmlstr)
s = re_br.sub('', s)
return s
def get_img_list(text):
img_path = ur'\/static\/[^\s\"]*\.(jpg|jpeg|png|bmp|gif)'
path_list = []
for match in re.finditer(img_path, text):
path = match.group(0)
path_list += [path]
return path_list
def force_int(value, default=1):
try:
return int(value)
except TypeError:
return default
class _Missing(object):
def __repr__(self):
return 'no value'
def __reduce__(self):
return '_missing'
_missing = _Missing()
class cached_property(object):
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
def pattern_image_url(url):
ret = {}
m = re.findall(r"(.*)\.thumb\.(\d+)_(\d+)[_]?([tcb]?)\.(\w+)", url)
if m:
ret['thumb'] = True
ori, w, h, crop, suffix = m[0]
ret['resize'] = (int(w), int(h))
ret['width'] = int(w)
ret['height'] = int(h)
ret['crop'] = crop
ret['gaussian'] = True if crop == 'g' else False
ret['origin'] = '%s.%s' % (ori, suffix)
return ret
def gen_thumb_url(url, size, position='c'):
width, height = size
img_param = pattern_image_url(url)
if img_param:
url = img_param['origin']
m = re.findall(r"(.*)\.(\w+)$", url)
if not m:
return url
ori, suffix = m[0]
return '%s.thumb.%d_%d_%s.%s' % (ori, width, height, position, suffix)
def save_image(image, path):
image.save(path)
def rcd(x):
return int(math.ceil(x))
def crop(url, size, position='c', force=False):
url = "%s/%s" % (config.root_path, url.lstrip('/'))
path = gen_thumb_url(url, size, position=position)
width, height = size
try:
image = Image.open(url)
except IOError:
logging.error('cannot open %s' % url)
return
w, h = image.size
if (w, h) == (width, height):
return save_image(image, path)
if force and (width >= w or height >= h):
return save_image(image, path)
hr = height * 1.0 / h
wr = width * 1.0 / w
if hr > wr:
wf = rcd(w * hr)
hf = height
else:
wf = width
hf = rcd(h * wr)
resize = (wf, hf)
image = image.resize(resize, Image.ANTIALIAS)
if width * height == 0:
return save_image(image, path)
coo = None
if wf > width:
if position == 't':
coo = (0, 0, width, height)
elif position == 'b':
coo = (wf - width, 0, wf, height)
else:
coo = (rcd((wf - width) / 2.0), 0, rcd((wf + width) / 2.0), height)
elif hf > height:
if position == 't':
coo = (0, 0, width, height)
elif position == 'b':
coo = (0, hf - height, width, hf)
else:
coo = (0, rcd((hf - height) / 2.0), width, rcd((hf + height) / 2.0))
if coo:
image = image.crop(coo)
return save_image(image, path)
def gen_random_str(n=6):
return ''.join(random.sample('ZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjihgfedcba', n))
def gen_upload_dir():
now = datetime.now()
upload_dir = os.path.join(config.root_path, 'static/upload/image', now.strftime("%Y/%m/"))
if not os.path.exists(upload_dir):
os.makedirs(upload_dir)
return upload_dir
def gen_filename(suffix='jpeg'):
timestamp = int(time.time())
filename = '%d_%s.%s' % (timestamp, gen_random_str(), suffix)
return filename
def gen_upload_path(suffix='jpeg'):
upload_dir = gen_upload_dir()
filename = gen_filename(suffix)
upload_path = os.path.join(upload_dir, filename)
return upload_path
def get_relative_path(absolute_path):
return os.path.relpath(absolute_path, config.root_path)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_asset_path(relative_path):
relative_path = relative_path.lstrip('/')
if relative_path.startswith('static/'):
return os.path.join(config.root_path, relative_path)
return os.path.join(config.static_path, relative_path)
def remove_file(file_path):
if not os.path.isfile(file_path):
return
os.remove(file_path)
def collect_items_from_query(query, from_id, limit, attr_name=None):
can_append = False
items = []
i = 0
for item in query:
i += 1
if i > 1000:
break
if len(items) >= limit:
break
if can_append:
items.append(item)
continue
if (attr_name and getattr(item, attr_name) or item) == from_id:
can_append = True
return items
def extract_urls(content):
iter_m = re.finditer('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', content)
return (m.group() for m in iter_m)
def process_content(content):
content = process_music(content)
content = process_video(content)
return content
def process_music(content):
content = process_163music(content)
return content
def process_video(content):
content = process_youtube(content)
return content
def process_163music(content):
embed_tpl = '<div class="music-wrapper"><iframe frameborder="no" border="0" marginwidth="0" marginheight="0" width="330" height="86" src="http://music.163.com/outchain/player?type=2&id={music_id}&auto=0&height=66"></iframe></div>'
match = re.findall(r'(https?://music\.163\.com/#/song/(\d+)/?)', content)
for url, music_id in match:
content = content.replace(url, embed_tpl.format(music_id=music_id))
return content
def process_youtube(content):
embed_tpl = '<div class="video-wrapper youtube"><iframe width="560" height="315" src="https://www.youtube.com/embed/{video_id}" frameborder="0" allowfullscreen></iframe></div>'
for url in extract_urls(content):
match = re.search(r'http[s]?://youtu.be/(?P<video_id>[^/]+)', url)
if match:
content = content.replace(url, embed_tpl.format(**match.groupdict()))
match = re.search(r'http[s]?://www\.youtube\.com/watch\?(|.*&)v=(?P<video_id>[^&]+)', url)
if match:
content = content.replace(url, embed_tpl.format(**match.groupdict()))
return content
| []
| []
| [
"TZ"
]
| [] | ["TZ"] | python | 1 | 0 | |
src/pm/mpd/test/test1.py | #!/usr/bin/env python
#
# (C) 2001 by Argonne National Laboratory.
# See COPYRIGHT in top-level directory.
#
# Note that I repeat code for each test just in case I want to
# run one separately. I can simply copy it out of here and run it.
# A single test can typically be chgd simply by altering its value(s)
# for one or more of:
# PYEXT, NMPDS, HFILE
import os, sys, commands, time
sys.path += [os.getcwd()] # do this once
print "mpd tests---------------------------------------------------"
clusterHosts = [ 'bp4%02d' % (i) for i in range(0,8) ]
print "clusterHosts=", clusterHosts
# test: simple with 1 mpd (mpdboot uses mpd's -e and -d options)
print "TEST -e -d"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = 'hello\nhello\nhello\n'
mpdtest.run(cmd="mpiexec%s -n 3 /bin/echo hello" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test: simple with 2 mpds on same machine (mpdboot uses mpd's -n option)
print "TEST -n"
PYEXT = '.py'
NMPDS = 2
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
temph = open(HFILE,'w')
for i in range(NMPDS): print >>temph, '%s' % (socket.gethostname())
temph.close()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = 'hello\nhello\n'
mpdtest.run(cmd="mpiexec%s -n 2 /bin/echo hello" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test: simple with 3 mpds on 3 machines
print "TEST simple hello msg on 3 nodes"
PYEXT = '.py'
NMPDS = 3
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = 'hello\nhello\nhello\n'
mpdtest.run(cmd="mpiexec%s -n 3 /bin/echo hello" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test: simple 2 mpds on local machine (-l, -h, and -p option)
print "TEST -l, -h, and -p"
PYEXT = '.py'
NMPDS = 3
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpd%s -d -l 12345" % (PYEXT) )
os.system("mpd%s -d -n -h %s -p 12345" % (PYEXT,socket.gethostname()) )
expout = 'hello\nhello\nhello\n'
mpdtest.run(cmd="mpiexec%s -n 3 /bin/echo hello" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test: simple with 2 mpds on 2 machines (--ncpus option)
print "TEST --ncpus"
PYEXT = '.py'
NMPDS = 2
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, "%s:2" % (host)
temph.close()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -f %s -n %d --ncpus=2" % (PYEXT,HFILE,NMPDS) )
myHost = socket.gethostname()
expout = '0: %s\n1: %s\n2: %s\n3: %s\n' % (myHost,myHost,clusterHosts[0],clusterHosts[0])
mpdtest.run(cmd="mpiexec%s -l -n 4 /bin/hostname" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test: simple with 2 mpds on 2 machines (--ifhn option)
# this is not a great test, but shows working with real ifhn, then failure with 127.0.0.1
print "TEST minimal use of --ifhn"
PYEXT = '.py'
NMPDS = 2
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
temph = open(HFILE,'w')
for host in clusterHosts:
hostinfo = socket.gethostbyname_ex(host)
IP = hostinfo[2][0]
print >>temph, '%s ifhn=%s' % (host,IP)
temph.close()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
hostinfo = socket.gethostbyname_ex(socket.gethostname())
IP = hostinfo[2][0]
os.system("mpdboot%s -f %s -n %d --ifhn=%s" % (PYEXT,HFILE,NMPDS,IP) )
expout = 'hello\nhello\n'
mpdtest.run(cmd="mpiexec%s -n 2 /bin/echo hello" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
## redo the above test with a local ifhn that should cause failure
lines = commands.getoutput("mpdboot%s -f %s -n %d --ifhn=127.0.0.1" % (PYEXT,HFILE,NMPDS) )
if len(lines) > 0:
if lines.find('failed to ping') < 0:
print "probable error in ifhn test using 127.0.0.1; printing lines of output next:"
print lines
sys.exit(-1)
# test:
print "TEST MPD_CON_INET_HOST_PORT"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.environ['MPD_CON_INET_HOST_PORT'] = 'localhost:4444'
os.system("mpd.py &")
time.sleep(1) ## time to get going
expout = ['0: hello']
rv = mpdtest.run(cmd="mpiexec%s -l -n 1 echo hello" % (PYEXT), expOut=expout,grepOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
| []
| []
| [
"MPD_CON_EXT",
"MPD_CON_INET_HOST_PORT"
]
| [] | ["MPD_CON_EXT", "MPD_CON_INET_HOST_PORT"] | python | 2 | 0 | |
awshoney.go | package awshoney
import (
"net/http"
"os"
"time"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/honeycombio/beeline-go/client"
"github.com/honeycombio/libhoney-go"
)
// Adds aws.* fields to all traces and spans recorded by c. If c is nil,
// the default client will be used. Usually you will invoke this right after
// beeline.Init()
func AddFieldsToClient(c *libhoney.Client) {
if c == nil {
c = client.Get()
}
c.Add(Map())
}
func execEnv() string {
if os.Getenv("ECS_CONTAINER_METADATA_URI") != "" {
return "ecs"
} else if os.Getenv("AWS_LAMBDA_FUNCTION_NAME") != "" {
return "lambda"
}
sess, err := session.NewSession()
if err != nil {
return "unknown"
}
metadata := ec2metadata.New(sess)
if metadata.Available() {
return "ec2"
}
return "unknown"
}
func Map() map[string]string {
m := map[string]string{}
env := execEnv()
switch env {
case "ecs":
m = ecsMap()
case "lambda":
m = lambdaMap()
case "ec2":
m = ec2Map()
}
m["aws.env"] = env
return m
}
var MetadataClient = &http.Client{
Timeout: 3 * time.Second,
}
| [
"\"ECS_CONTAINER_METADATA_URI\"",
"\"AWS_LAMBDA_FUNCTION_NAME\""
]
| []
| [
"ECS_CONTAINER_METADATA_URI",
"AWS_LAMBDA_FUNCTION_NAME"
]
| [] | ["ECS_CONTAINER_METADATA_URI", "AWS_LAMBDA_FUNCTION_NAME"] | go | 2 | 0 | |
tools/prow-jobs-syncer/main.go | /*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// prow-jobs-syncer fetches release branches,
// and creates PRs updating them in knative/test-infra
package main
import (
"flag"
"fmt"
"log"
"os"
"path"
"strings"
"knative.dev/test-infra/pkg/cmd"
"knative.dev/test-infra/pkg/ghutil"
"knative.dev/test-infra/pkg/git"
)
func main() {
githubAccount := flag.String("github-account", "", "Token file for Github authentication")
gitUserID := flag.String("git-userid", "", "The github ID of user for hosting fork, i.e. Github ID of bot")
gitUserName := flag.String("git-username", "", "The username to use on the git commit. Requires --git-email")
gitEmail := flag.String("git-email", "", "The email to use on the git commit. Requires --git-username")
label := flag.String("label", "", "The label to add on the PR")
dryrun := flag.Bool("dry-run", false, "dry run switch")
flag.Parse()
if *dryrun {
log.Println("Running in [dry run mode]")
}
gopath := os.Getenv("GOPATH")
configgenArgs := []string{
"--prow-jobs-config-output",
path.Join(gopath, repoPath, jobConfigPath),
"--testgrid-config-output",
path.Join(gopath, repoPath, testgridConfigPath),
"--upgrade-release-branches",
"--github-token-path",
*githubAccount,
path.Join(gopath, repoPath, templateConfigPath),
}
configgenFullPath := path.Join(gopath, repoPath, configGenPath)
log.Print(cmd.RunCommand(fmt.Sprintf("go run %s %s",
configgenFullPath, strings.Join(configgenArgs, " "))))
// The code gen above updates the template file, which might not be
// sufficient for generating all prow/testgrid configs, rerun config gen
// script to make everything up-to-date
log.Print(cmd.RunCommand(configGenScript))
gc, err := ghutil.NewGithubClient(*githubAccount)
if err != nil {
log.Fatalf("cannot authenticate to github: %v", err)
}
targetGI := git.Info{
Org: org,
Repo: repo,
Head: PRHead,
Base: PRBase,
UserID: *gitUserID,
UserName: *gitUserName,
Email: *gitEmail,
}
gcw := &GHClientWrapper{gc}
if err = createOrUpdatePR(gcw, targetGI, *label, *dryrun); err != nil {
log.Fatalf("failed creating pullrequest: '%v'", err)
}
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
q2/main.py | '''
離散化誤差を計算するプログラム
誤差の評価を行う課題
'''
import math
ENABLE_PRINT = True
ENABLE_DEBUG = False
def display_a(A): # 便利関数
# 表示
print("*"*30, "Display [begin]", "*"*30)
for _a in A:
for _a2 in _a:
print(int(_a2), end="\t")
print()
print("*"*30, "Display [end]", "*"*30)
def solver(A, b, n):
''' 便利関数 '''
def _display_row():
for x2 in range(n-1):
print(int(A[y][x2]), end="\t")
print()
''' 計算処理 '''
for x in range(n-1): # xを固定
if ENABLE_DEBUG:
print("\n", "="*40, f"start x={x}", "="*40, "\n")
for y in range(n-1): # yを選択
if x == y:
continue
if ENABLE_DEBUG:
print("-"*30, f"selected y={y}", "-"*30)
_display_row()
# 係数の計算
try:
# A[x][x] = A[0,0], A[1,1]
coeff = A[y][x]/A[x][x]
if ENABLE_DEBUG:
print("coeff =", coeff)
except:
continue
# データの計算
for x2 in range(n-1):
if ENABLE_DEBUG:
print(f"[{y},{x2}]-({str(coeff)[:4]})*[{x},{x2}]=",
int(A[y][x2]), "-", int(coeff), "*", int(A[x][x2]), "=",
int(A[y][x2] - coeff * A[x][x2]))
A[y][x2] = A[y][x2] - coeff * A[x][x2]
b[y] = b[y] - coeff * b[x]
# データの表示
if ENABLE_DEBUG:
_display_row()
# 表示
if ENABLE_DEBUG:
display_a(A)
# break
# if x == 1:
# break
xs = [b[i] / A[i][i] for i in range(n-1)]
return xs
"""
# 表示
if ENABLE_PRINT:
print("*"*30, "Debug [begin]", "*"*30)
for _a in A:
for _a2 in _a:
print(int(_a2), end="\t")
print()
print("*"*30, "Debug [end]", "*"*30)
# 表示
if ENABLE_PRINT:
print("b=", end="")
for _b in b:
print(int(_b), end="\t")
print()
"""
def main():
import os
d = int(os.getenv("Q2_D", 10000))
n = int(os.getenv("Q2_N", 100))
h = (math.pi / 2 - 0) / d
alpha = 1 - 2 / h**2
beta = 1 / h**2
if ENABLE_PRINT:
print(f"n={n}, d={d}, h={h}, alpha={alpha}, beta={beta}")
''' 係数行列A '''
A = [[0 for _ in range(n-1)] for _ in range(n-1)]
# Alphaで初期化
for i in range(n-1):
A[i][i] = alpha
# Betaで初期化
for i in range(1, n-1):
A[i][i-1] = beta # 横軸
A[i-1][i] = beta # 縦軸
''' 既知ベクトルb '''
b = [0 for _ in range(n-1)]
b[-1] = beta
# 実行時間の計測と実行
import time
elapsed_times = set()
for _ in range(10):
begin = time.time()
solver(A, b, n)
elapsed_times.add(time.time() - begin)
# print(time.time() - begin)
# display_a(A2)
print("time::", n, ",", sum(elapsed_times) / len(elapsed_times))
# 厳密解 y
xs = [h*i for i in range(1, d)]
ys = [math.sin(x) for x in xs]
# 解析解 y2
ys2 = solver(A, b, n)
fraction_top = max(abs(y2 - y) for y2,y in zip(ys2, ys))
fraction_bottom = max(abs(y) for y in ys)
print("error::", d, ",", fraction_top / fraction_bottom)
if __name__ == "__main__":
main()
| []
| []
| [
"Q2_D",
"Q2_N"
]
| [] | ["Q2_D", "Q2_N"] | python | 2 | 0 | |
common/resolvers.go | package common
import (
"github.com/aws/aws-sdk-go/aws"
"strings"
"os"
"path/filepath"
"errors"
"net"
"github.com/aws/aws-sdk-go/service/ec2"
"os/user"
)
// TODO return lazily initialized *ec2.Instance search. We may not actually need it.
func resolveHost(at, explicitHost, instanceId, group, tag string, private bool) (string, *ec2.Instance, error) {
if at != "" || explicitHost != "" {
if at != "" {
inff("Using explicitly given EC2 host in user@host arg: %s", at)
explicitHost = strings.Split(at, "@")[1]
} else {
inff("Using explicitly given EC2 host: %s", explicitHost)
}
dbgf("Is it already an ip address? %s", explicitHost)
ipAddr := net.ParseIP(explicitHost)
if ipAddr == nil {
dbgf("Turn it into ip for public ip address EC2 filter.")
ipAddrs, err := net.LookupIP(explicitHost)
if err == nil {
ipAddr = ipAddrs[0]
} else {
dbgf("It may be an alias. Not much else we can do.")
}
}
ipAttr := "ip-address"
if private {
ipAttr = "private-ip-address"
}
dbgf("Finding EC2 instance by %s=%s", ipAttr, ipAddr)
ec2_, err := findEc2(&ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
{Name: aws.String("instance-state-name"), Values: []*string{aws.String("running")}},
{Name: aws.String(ipAttr), Values: []*string{aws.String(ipAddr.String())}},
},
})
return explicitHost, ec2_, err
}
if instanceId != "" {
inff("Finding EC2 by instance id: %s", instanceId)
ec2_, err := findEc2(&ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String(instanceId)},
})
if err != nil {
return "", nil, err
}
return firstAddress(ec2_, private), ec2_, err
}
if group != "" {
inff("Finding EC2 by auto-scaling group: %s", group)
ec2_, err := findEc2(&ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
{Name: aws.String("instance-state-name"), Values: []*string{aws.String("running")}},
{Name: aws.String("tag:aws:autoscaling:groupName"), Values: []*string{aws.String(group)}},
},
})
if err != nil {
return "", nil, err
}
return firstAddress(ec2_, private), ec2_, err
}
if tag != "" {
inff("Finding EC2 by tag: %s", tag)
tagParts := strings.Split(tag, "=")
ec2_, err := findEc2(&ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
{Name: aws.String("instance-state-name"), Values: []*string{aws.String("running")}},
{Name: aws.String("tag:" + tagParts[0]), Values: []*string{aws.String(tagParts[1])}},
},
})
if err != nil {
return "", nil, err
}
return firstAddress(ec2_, private), ec2_, err
}
return "", nil, errors.New("Unable locate suitable EC2 instance.")
}
func firstAddress(ec2_ *ec2.Instance, private bool) string {
if private && *ec2_.PrivateDnsName != "" {
dbgf("Using PrivateDnsName.")
return *ec2_.PrivateDnsName
} else if *ec2_.PublicDnsName != "" {
dbgf("Using PublicDnsName.")
return *ec2_.PublicDnsName
// TODO This one is problematic because it is omitted from raw JSON when the instance has none
//} else if *ec2_.PublicIpAddress != "" {
// dbgf("Using PublicIpAddress.")
// return *ec2_.PublicIpAddress
} else {
dbgf("Using PrivateDnsName.")
return *ec2_.PrivateDnsName
}
}
func resolveSshConfig(configFilePath string) (*SshConfig, error) {
var paths []string
if configFilePath != "" {
paths = []string{configFilePath}
} else {
var u, err = user.Current()
if err != nil {
return nil, err
}
// Should work for Windows and Unix
windowsCompatHomeDir := strings.Replace(u.HomeDir, "\\", "/", -1)
paths = []string{windowsCompatHomeDir + "/.ssh/config", "/etc/ssh/ssh_config"}
}
return ParseSshConfig(paths, true)
}
func resolveUser(at, explicitUser, resolvedHost string, instance *ec2.Instance, sshConfig *SshConfig) (string, error) {
if at != "" {
explicitUser = strings.Split(at, "@")[0]
inff("Authenticating as given user in user@host arg: %s", at)
return explicitUser, nil
}
if explicitUser != "" {
inff("Authenticating as given user: %s", explicitUser)
return explicitUser, nil
}
resolvedUser, err := sshConfig.GetConfigValue(resolvedHost, "User")
if err != nil {
return "", err
}
if resolvedUser != "" {
dbgf("ssh config specifies user %s for host %s. Will not infer user from EC2 metadata.", resolvedUser, resolvedHost)
return resolvedUser, nil
}
dbgf("Reading details of %s", *instance.ImageId)
ami, err := findAmi(&ec2.DescribeImagesInput{
ImageIds:[]*string{aws.String(*instance.ImageId)},
})
if err != nil {
return "", err
}
switch {
case ami == nil:
inff("Could not find %s. Can't guess user.", *instance.ImageId)
case strings.HasPrefix(*ami.Name, "amzn-"):
explicitUser = "ec2-user"
case strings.HasPrefix(*ami.Name, "ubuntu/"):
explicitUser = "ubuntu"
case strings.HasPrefix(*ami.Name, "debian-"):
explicitUser = "admin"
}
if explicitUser != "" {
inff("Authenticating based on %s as user: %s", *ami.ImageId, explicitUser)
}
return explicitUser, nil
}
func resolveIdent(identity string, useKms bool, ec2 *ec2.Instance, sshConfig *SshConfig) (string, error) {
if identity != "" {
// Is it a valid path already?
_, err := os.Stat(identity)
if err == nil {
inff("Identifying by given file: %s", identity)
return identity, nil
}
if !os.IsNotExist(err) {
// If some error other than not existing...
return "", err
}
// Otherwise we're going to look some more.
// Is it the name of a private key in ~/.ssh/ ?
expandedPath := filepath.Join(os.Getenv("HOME"), ".ssh", identity)
_, err = os.Stat(expandedPath)
if err == nil {
inff("Identifying by file: %s", expandedPath)
return expandedPath, nil
}
if !os.IsNotExist(err) {
// If some error other than not existing...
return "", err
}
// Otherwise we're going to look some more.
// Is it the name of a private key in ~/.ssh/ without the pem suffix?
expandedPath += ".pem"
_, err = os.Stat(expandedPath)
if err == nil {
inff("Identifying by file: %s", expandedPath)
return expandedPath, nil
}
return "", errors.New("Failed to resolve given identity: " + identity)
}
if useKms {
// TODO read key of ec2 instance from kms
return "", nil
}
// TODO If not already covered by ssh config, find key of ec2 instance locally in ~/.ssh/
return "", nil
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
main.py | import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from model.model_Tatarchenko15_attention import ModelTatarchenko15Attention
from model.model_Zhou16_attention import ModelZhou16Attention
from model.model_interface import ModelInterface
from data_container import *
import json
import multiprocessing
import os
import glob
import collections
import pandas as pd
from test_utils import *
dataset = None
current_test_input_images = None
current_test_target_images = None
current_test_poses = None
def initialize_tensorflow():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
config.log_device_placement = True # to log device placement (on which device the operation ran)
# (nothing gets printed in Jupyter, only if you run it standalone)
sess = tf.Session(config=config)
set_session(sess)
def build_model_from_dictionary(data: DataLoader, **kwargs):
model_type = kwargs["model_type"]
model_class = ModelInterface
if model_type == 't':
model_class = ModelTatarchenko15Attention
elif model_type == 'z':
model_class = ModelZhou16Attention
attention_strategy = kwargs.get("attention_strategy", None)
attention_strategy_details = kwargs.get("attention_strategy_details", None)
random_seed_index = kwargs.get("random_seed_index", None)
image_size = kwargs.get("image_size", 256)
k = kwargs.get("k", 2)
pose_input_size = None
if data.name == 'kitti' or data.name == 'synthia':
pose_input_size = data.pose_size
model = model_class(
image_size=image_size,
attention_strategy=attention_strategy,
attention_strategy_details=attention_strategy_details,
additional_name=random_seed_index,
pose_input_size=pose_input_size,
k=k
)
return model
def find_load_model_in_folder(model, parent_folder, dataset_name):
print(model.name)
target_name = "%s/%s_%s*/*.h5" % (parent_folder, model.name, dataset_name)
files = glob.glob(target_name)
print(target_name)
if len(files) > 1:
min_file = None
min_len = 100000
for f in files:
s = len(f.split("_"))
if s < min_len:
min_len = s
min_file = f
load_file = min_file
else:
load_file = files[0]
return load_file
def load_dataset_from_config(**kwargs):
dataset_name = kwargs["dataset"]
dataset_format = kwargs["dataset_format"]
image_size = kwargs.get("image_size", 256)
is_pose_matrix = kwargs.get("is_pose_matrix", False)
train_or_test = kwargs.get("train_or_test", "train")
if dataset_name == "kitti" or dataset_name == "synthia":
return SceneDataLoaderNumpy(dataset_name, use_pose_matrix=is_pose_matrix, image_size=image_size)
elif dataset_name == "car" or dataset_name == "chair":
return ObjectDataLoaderNumpy(dataset_name, image_size=image_size, train_or_test=train_or_test)
def train_single_model(x):
i, gpu_id, config_file_name = x
kwargs = json.load(open(config_file_name))
ith_model_info = kwargs["model_list"][i]
model = build_model_from_dictionary(dataset, **ith_model_info)
print("model constructed!")
additional_name = kwargs.get("additional_name", None)
if additional_name is not None:
random.seed(additional_name * 4219 + 123)
np.random.seed(additional_name * 4219 + 123)
else:
random.seed(1000)
np.random.seed(1000)
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
initialize_tensorflow()
model.train(dataset, **kwargs)
def train_all_using_multiprocessing(config_file_name):
global dataset
print("start to load dataset")
config = json.load(open(config_file_name))
model_counts = len(config["model_list"])
dataset = load_dataset_from_config(**config)
print("dataset loading finished")
available_gpu_ids = config["available_gpu_ids"]
gpu_ids = [available_gpu_ids[i % len(available_gpu_ids)] for i in range(model_counts)]
train_infos = [(i, gpu_ids[i], config_file_name) for i in range(model_counts)]
i = 0
k = config.get("multiprocess_max", model_counts)
print("start multiprocessing training")
while i < model_counts:
with multiprocessing.Pool(k) as p:
p.map(train_single_model, train_infos[i:min(i + k, model_counts)], chunksize=1)
i += k
def test_single_model(x):
i, gpu_id, config_file_name = x
kwargs = json.load(open(config_file_name))
ith_model_info = kwargs["model_list"][i]
model = build_model_from_dictionary(dataset, **ith_model_info)
try:
print("model constructed!")
random.seed(883222)
np.random.seed(883222)
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
initialize_tensorflow()
parent_folder = kwargs["parent_folder"]
load_file = kwargs.get("load_file", find_load_model_in_folder(model, parent_folder, dataset.name))
model.build_model()
model.load_model(load_file)
batch_size = kwargs.get("batch_size", 16)
test_method = kwargs.get("test_method", "exhaustive")
mae_all = None
ssim_all = None
# scene
if dataset.name == 'kitti' or dataset.name == 'synthia':
if test_method == 'exhaustive':
mae, ssim, mae_all, ssim_all = test_for_all_scenes(dataset, model, batch_size=batch_size)
else:
mae, ssim = test_for_random_scene(dataset, model, N=kwargs.get("max_iter", 20000), batch_size=batch_size)
# object
else:
if test_method == 'exhaustive':
mae, ssim, mae_all, ssim_all = test_for_all_objects(dataset, model, batch_size=batch_size)
else:
mae, ssim = test_for_random_scene(dataset, model, N=kwargs.get("max_iter", 20000), batch_size=batch_size)
return mae, ssim, mae_all, ssim_all, model.name
except Exception as ex:
print(ex)
return 0, 0, None, None, model.name
def test_all_using_multiprocessing(config_file_name):
global dataset
config = json.load(open(config_file_name))
model_counts = len(config["model_list"])
config["train_or_test"] = "test"
dataset = load_dataset_from_config(**config)
print("dataset loading finished")
available_gpu_ids = config["available_gpu_ids"]
gpu_ids = [available_gpu_ids[i % len(available_gpu_ids)] for i in range(model_counts)]
train_infos = [(i, gpu_ids[i], config_file_name) for i in range(model_counts)]
k = config.get("multiprocess_max", model_counts)
with multiprocessing.Pool(k) as p:
results = p.map(test_single_model, train_infos, chunksize=1)
maes, ssims, mae_alls, ssim_alls, names = zip(*results)
raw_data = collections.OrderedDict()
raw_data['name'] = names
raw_data['mae'] = maes
raw_data['ssim'] = ssims
df = pd.DataFrame(raw_data)
df = df.set_index("name")
mae_alls = np.array(mae_alls)
ssim_alls = np.array(ssim_alls)
diff_N = mae_alls.shape[1]
mae_all_df = pd.DataFrame(mae_alls, index=names, columns=[i - (diff_N // 2) for i in range(diff_N)])
ssim_all_df = pd.DataFrame(ssim_alls, index=names, columns=[i - (diff_N // 2) for i in range(diff_N)])
result_export_folder = config["result_export_folder"]
if not os.path.exists(result_export_folder):
os.makedirs(result_export_folder)
started_time_date = time.strftime("%Y%m%d_%H%M%S")
df.to_csv("%s/%s_%s.csv" % (result_export_folder, "total_result", started_time_date))
mae_all_df.to_csv("%s/%s_%s.csv" % (result_export_folder, "total_result_mae", started_time_date))
ssim_all_df.to_csv("%s/%s_%s.csv" % (result_export_folder, "total_result_ssim", started_time_date))
def test_and_export_picture_for_single_model(x):
i, gpu_id, config_file_name = x
kwargs = json.load(open(config_file_name))
ith_model_info = kwargs["model_list"][i]
model = build_model_from_dictionary(dataset, **ith_model_info)
try:
print("model constructed!")
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
initialize_tensorflow()
parent_folder = kwargs["parent_folder"]
load_file = kwargs.get("load_file", find_load_model_in_folder(model, parent_folder, dataset.name))
model.build_model()
model.load_model(load_file)
poseinfo_processed = model.process_pose_info(dataset, current_test_poses)
pred_images = model.get_predicted_image((current_test_input_images, poseinfo_processed))
#pred_image_tensor = tf.convert_to_tensor(pred_images, dtype=tf.float32)
#target_image_original_tensor = tf.convert_to_tensor(target_image_original, dtype=tf.float32)
#ssim_values = K.eval(ssim_custom(pred_image_tensor, target_image_original_tensor))
#mae_values = K.eval(mae_custom(pred_image_tensor, target_image_original_tensor))
return pred_images, None, None, model.name
except Exception as ex:
print(ex)
return None, None, None, model.name
def test_and_export_picture_for_models_using_multiprocessing(config_file_name):
global dataset, current_test_input_images, current_test_target_images, current_test_poses
config = json.load(open(config_file_name))
model_counts = len(config["model_list"])
config["train_or_test"] = "test"
dataset = load_dataset_from_config(**config)
print("dataset loading finished")
available_gpu_ids = config["available_gpu_ids"]
gpu_ids = [available_gpu_ids[i % len(available_gpu_ids)] for i in range(model_counts)]
train_infos = [(i, gpu_ids[i], config_file_name) for i in range(model_counts)]
k = config.get("multiprocess_max", model_counts)
target_scene_infos = config.get("target_scene_infos", None)
target_scene_n = config.get("target_scene_n", 5)
result_export_folder = config.get("result_export_folder", None)
index_info = None
if target_scene_infos is None:
test_data, index_info = dataset.get_batched_data(target_scene_n, single_model=False, return_info=True, is_train=False)
print(index_info)
else:
test_data = dataset.get_specific_data(target_scene_infos)
current_test_input_images, current_test_target_images, current_test_poses = test_data
with multiprocessing.Pool(k) as p:
results = p.map(test_and_export_picture_for_single_model, train_infos, chunksize=1)
images, maes, ssims, names = zip(*results)
# 1. export images
xs = []
xs.append(np.concatenate(current_test_input_images, axis=0))
xs.append(np.concatenate(current_test_target_images, axis=0))
pred_image_temp = None
for pred_image in images:
if pred_image is not None:
xs.append(np.concatenate(pred_image, axis=0))
elif pred_image_temp is not None:
xs.append(np.concatenate(np.zeros_like(pred_image_temp), axis=0))
pred_image_temp = pred_image
total_image = np.concatenate(tuple(xs), axis=1)
if not os.path.exists(result_export_folder):
os.makedirs(result_export_folder)
started_time_date = time.strftime("%Y%m%d_%H%M%S")
save_pred_images(total_image, "%s/%s_%s" % (result_export_folder, "total_images", started_time_date))
# export model names
raw_data = collections.OrderedDict()
raw_data['name'] = names
df = pd.DataFrame(raw_data)
df = df.set_index("name")
df.to_csv("%s/%s_%s.csv" % (result_export_folder, "total_images_models", started_time_date))
if index_info is not None:
if dataset.name == 'kitti' or dataset.name == 'synthia':
scene_ids, input_ids, target_ids = zip(*index_info)
raw_data = collections.OrderedDict()
raw_data['scene_id'] = scene_ids
raw_data['input_id'] = input_ids
raw_data['target_id'] = target_ids
df = pd.DataFrame(raw_data)
df.to_csv("%s/%s_%s.csv" % (result_export_folder, "tested_samples_index_info", started_time_date), index=False)
def test_and_export_feature_map_for_single_model(x):
i, gpu_id, config_file_name = x
kwargs = json.load(open(config_file_name))
ith_model_info = kwargs["model_list"][i]
model = build_model_from_dictionary(dataset, **ith_model_info)
print("model constructed!")
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
initialize_tensorflow()
parent_folder = ith_model_info["parent_folder"]
result_export_folder = kwargs["result_export_folder"]
load_file = kwargs.get("load_file", find_load_model_in_folder(model, parent_folder, dataset.name))
model.build_model()
model.load_model(load_file)
poseinfo_processed = model.process_pose_info(dataset, current_test_poses)
current_test_data = (current_test_input_images, current_test_target_images, poseinfo_processed)
feature_map = show_feature_map(current_test_data, model)
started_time_date = time.strftime("%Y%m%d_%H%M%S")
print(feature_map.shape)
save_pred_images(feature_map, "%s/%s_%s" % (result_export_folder, model.name, started_time_date))
def test_and_export_feature_map_for_models_using_multiprocessing(config_file_name):
global dataset, current_test_input_images, current_test_target_images, current_test_poses
config = json.load(open(config_file_name))
model_counts = len(config["model_list"])
dataset = load_dataset_from_config(**config)
print("dataset loading finished")
available_gpu_ids = config["available_gpu_ids"]
gpu_ids = [available_gpu_ids[i % len(available_gpu_ids)] for i in range(model_counts)]
train_infos = [(i, gpu_ids[i], config_file_name) for i in range(model_counts)]
k = config.get("multiprocess_max", model_counts)
target_scene_infos = config.get("target_scene_infos", None)
if target_scene_infos is None:
test_data, index_info = dataset.get_batched_data(1, single_model=False, return_info=True)
print(index_info)
else:
test_data = dataset.get_specific_data(target_scene_infos)
current_test_input_images, current_test_target_images, current_test_poses = test_data
with multiprocessing.Pool(k) as p:
p.map(test_and_export_feature_map_for_single_model, train_infos, chunksize=1)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
modules/runx/runx.py | import os
import platform
import sys
from datetime import datetime
from typing import TYPE_CHECKING, Optional, Tuple
import boto3
import click
import requests
from botocore.config import Config
from getmac import get_mac_address
from git.config import GitConfigParser
from google.api_core.exceptions import NotFound
from google.cloud import secretmanager
from mypy_boto3_ssm.client import SSMClient
from modules.base import ModuleProcessor
from opta.constants import VERSION
from opta.core.gcp import GCP
from opta.exceptions import UserErrors
from opta.utils import logger
if TYPE_CHECKING:
from opta.layer import Layer
from opta.module import Module
if os.environ.get("OPTA_STAGING"):
OPTA_DOMAIN = "api.staging.runx.dev"
else:
OPTA_DOMAIN = "api.app.runx.dev"
class RunxProcessor(ModuleProcessor):
def __init__(self, module: "Module", layer: "Layer"):
if module.data["type"] != "runx":
raise Exception(f"The module {module.name} was expected to be of type runx")
self.user_id = GitConfigParser().get_value("user", "email", "no_user")
self.device_id = get_mac_address()
self.os_name = os.name
self.platform = platform.system()
self.os_version = platform.version()
super(RunxProcessor, self).__init__(module, layer)
def process(self, module_idx: int) -> None:
logger.debug("Checking for runx api key secret")
current_api_key = self.fetch_secret()
if current_api_key is None:
self.set_secret()
else:
self.fetch_jwt(current_api_key)
def fetch_secret(self) -> Optional[str]:
if self.layer.cloud == "aws":
return self._fetch_aws_secret()
elif self.layer.cloud == "google":
return self._fetch_gcp_secret()
else:
raise Exception("Can not handle secrets of type")
def _fetch_aws_secret(self) -> Optional[str]:
providers = self.layer.gen_providers(0)
region = providers["provider"]["aws"]["region"]
ssm_client: SSMClient = boto3.client("ssm", config=Config(region_name=region))
try:
parameter = ssm_client.get_parameter(
Name=f"/opta-{self.layer.get_env()}/runx-api-key", WithDecryption=True
)
return parameter["Parameter"]["Value"]
except ssm_client.exceptions.ParameterNotFound:
return None
def _fetch_gcp_secret(self) -> Optional[str]:
credentials, project_id = GCP.get_credentials()
sm_client = secretmanager.SecretManagerServiceClient(credentials=credentials)
name = f"projects/{project_id}/secrets/opta-{self.layer.get_env()}-runx-api-key/versions/1"
try:
# Access the secret version.
response = sm_client.access_secret_version(
request=secretmanager.AccessSecretVersionRequest({"name": name})
)
return response.payload.data.decode("UTF-8")
except NotFound:
return None
def set_secret(self) -> None:
while True:
value = click.prompt("Please enter your runx api key", type=click.STRING,)
try:
self.fetch_jwt(value)
except UserErrors:
logger.warn(
"The api key which you passed was invalid, please provide a valid api key from runx"
)
else:
break
if self.layer.cloud == "aws":
return self._set_aws_secret(value)
elif self.layer.cloud == "google":
return self._set_gcp_secret(value)
else:
raise Exception("Can not handle secrets of type")
def _set_aws_secret(self, secret: str) -> None:
providers = self.layer.gen_providers(0)
region = providers["provider"]["aws"]["region"]
ssm_client: SSMClient = boto3.client("ssm", config=Config(region_name=region))
ssm_client.put_parameter(
Name=f"/opta-{self.layer.get_env()}/runx-api-key",
Value=secret,
Type="SecureString",
)
def _set_gcp_secret(self, secret: str) -> None:
credentials, project_id = GCP.get_credentials()
sm_client = secretmanager.SecretManagerServiceClient(credentials=credentials)
sm_secret = sm_client.create_secret(
request=secretmanager.CreateSecretRequest(
{
"parent": f"projects/{project_id}",
"secret_id": f"opta-{self.layer.get_env()}-runx-api-key",
"secret": {"replication": {"automatic": {}}},
}
)
)
sm_client.add_secret_version(
request=secretmanager.AddSecretVersionRequest(
{"parent": sm_secret.name, "payload": {"data": secret.encode("utf-8")}}
)
)
def post_hook(self, module_idx: int, exception: Optional[Exception]) -> None:
api_key = self.fetch_secret()
if api_key is None:
raise Exception(
"The api key seems to have just disappeared from the secret storage"
)
validation_data, jwt = self.fetch_jwt(api_key)
is_environment = self.layer.parent is None
url_path = "/config/environments" if is_environment else "/config/services"
body = {
"org_id": validation_data["org_id"],
"name": self.layer.name,
"opta_version": VERSION,
"status": "SUCCESS" if exception is None else "FAILURE",
"spec": self.layer.original_spec,
"metadata": {
"user_id": self.user_id,
"device_id": self.device_id,
"os_name": self.os_name,
"platform": self.platform,
"os_version": self.os_version,
"active_variables": self.layer.variables,
"module_idx": module_idx,
"argv": sys.argv[:],
},
"time": datetime.utcnow().isoformat(),
}
if not is_environment:
body["environment_name"] = self.layer.parent.name # type: ignore
logger.debug("Sending layer deployment data over to opta backend")
resp = requests.post(
f"https://{OPTA_DOMAIN}{url_path}", json=body, headers={"opta": jwt}
)
if resp.status_code != 201:
raise Exception(
f"Invalid response when attempting to send data to backend: {resp.json()}"
)
def fetch_jwt(self, api_key: str) -> Tuple[dict, str]:
resp = requests.post(
f"https://{OPTA_DOMAIN}/user/apikeys/validate", json={"api_key": api_key}
)
if resp.status_code == 404:
raise UserErrors(
f"Looks like it was an invalid api key: {resp.json()['message']}"
)
if resp.status_code != 200:
raise Exception(
f"Invalid response when attempting to validate the api token: {resp.json()}"
)
jwt = resp.headers.get("opta")
if jwt is None:
raise Exception(f"Got an invalid jwt back: {jwt}")
return resp.json(), jwt
| []
| []
| [
"OPTA_STAGING"
]
| [] | ["OPTA_STAGING"] | python | 1 | 0 | |
dltb/thirdparty/caffe/tests/test_caffe_network.py | from .conf import MODELS_DIRECTORY
from unittest import TestCase
import os
import numpy as np
from keras.datasets import mnist
os.environ['GLOG_minloglevel'] = '2' # Suppress verbose output from caffe.
## The following lines allow the test to be run from within the test
## directory (and provide the MODELS_DIRECTORY):
# if not __package__: import __init__
# if __package__: from . import MODELS_DIRECTORY
# else: from __init__ import MODELS_DIRECTORY
from network.caffe import Network as CaffeNetwork
from network.layers import caffe_layers
class TestCaffeNetwork(TestCase):
@classmethod
def setUpClass(cls):
model_def = os.path.join(MODELS_DIRECTORY,
'example_caffe_network_deploy.prototxt')
model_weights = os.path.join(MODELS_DIRECTORY,'mnist.caffemodel')
cls.loaded_network = CaffeNetwork(model_def=model_def,
model_weights=model_weights)
# Load the images from the test set and normalize.
cls.data = mnist.load_data()[1][0]
cls.data = cls.data / cls.data.max()
# Test layer properties from layer dict.
def test_layer_dict(self):
self.assertEqual(
list(self.loaded_network.layer_dict.keys()),
['conv2d_1',
'max_pooling2d_1',
'conv2d_2',
'dropout_1',
'flatten_1',
'dense_1',
'dropout_2',
'dense_2']
)
# Check that the right types where selected.
self.assertTrue(isinstance(self.loaded_network.layer_dict['conv2d_1'], caffe_layers.CaffeConv2D))
self.assertTrue(isinstance(self.loaded_network.layer_dict['max_pooling2d_1'], caffe_layers.CaffeMaxPooling2D))
self.assertTrue(isinstance(self.loaded_network.layer_dict['conv2d_2'], caffe_layers.CaffeConv2D))
self.assertTrue(isinstance(self.loaded_network.layer_dict['dropout_1'], caffe_layers.CaffeDropout))
self.assertTrue(isinstance(self.loaded_network.layer_dict['flatten_1'], caffe_layers.CaffeFlatten))
self.assertTrue(isinstance(self.loaded_network.layer_dict['dense_1'], caffe_layers.CaffeDense))
self.assertTrue(isinstance(self.loaded_network.layer_dict['dropout_2'], caffe_layers.CaffeDropout))
self.assertTrue(isinstance(self.loaded_network.layer_dict['dense_2'], caffe_layers.CaffeDense))
# Testing the layer properties.
def test_input_shape(self):
self.assertEqual((None, 13, 13, 32), self.loaded_network.layer_dict['conv2d_2'].input_shape)
def test_output_shape(self):
self.assertEqual((None, 11, 11, 32), self.loaded_network.layer_dict['conv2d_2'].output_shape)
def test_num_parameters(self):
self.assertEqual(9248, self.loaded_network.layer_dict['conv2d_2'].num_parameters)
def test_weights(self):
self.assertTrue(
np.allclose(
np.array([[0.00441102, 0.03252346, 0.03093702],
[-0.02963322, -0.01514516, 0.00517636],
[-0.04767472, -0.05382977, -0.00228736]], dtype='float32'),
self.loaded_network.layer_dict['conv2d_2'].weights[0, 0]
)
)
def test_bias(self):
# layer.get_weights() gives a list containing weights and bias.
self.assertAlmostEqual(
-0.089870423,
self.loaded_network.layer_dict['conv2d_2'].bias[0]
)
def test_strides(self):
self.assertEqual((1, 1),
self.loaded_network.layer_dict['conv2d_2'].strides)
self.assertEqual((2, 2),
self.loaded_network.layer_dict['max_pooling2d_1'].strides)
with self.assertRaises(AttributeError):
self.loaded_network.layer_dict['dense_1'].strides
def test_kernel_size(self):
self.assertEqual((3, 3),
self.loaded_network.layer_dict['conv2d_2'].kernel_size)
with self.assertRaises(AttributeError):
self.loaded_network.layer_dict['dense_1'].kernel_size
with self.assertRaises(AttributeError):
self.loaded_network.layer_dict['max_pooling2d_1'].kernel_size
def test_filters(self):
self.assertEqual(32,
self.loaded_network.layer_dict['conv2d_2'].filters)
with self.assertRaises(AttributeError):
self.loaded_network.layer_dict['dense_1'].filters
with self.assertRaises(AttributeError):
self.loaded_network.layer_dict['max_pooling2d_1'].filters
def test_pool_size(self):
self.assertEqual((3, 3),
self.loaded_network.layer_dict['max_pooling2d_1'].pool_size)
with self.assertRaises(AttributeError):
self.loaded_network.layer_dict['dense_1'].pool_size
with self.assertRaises(AttributeError):
self.loaded_network.layer_dict['conv2d_2'].pool_size
# Test loaded_network functions.
def test_get_activations(self):
input_image = self.data[0:1, :, :, np.newaxis]
activations = \
self.loaded_network.get_activations(input_image, 'dense_2')
prediction = np.array([ 3.51925933e-09, 2.81613372e-10, 2.09109629e-07,
1.37495732e-07, 3.44873262e-11, 7.33259398e-10,
2.16445026e-13, 9.99998212e-01, 1.22597754e-09,
1.40018460e-06], dtype='float32')
# Increase absolute tolerance a little to make in work.
self.assertTrue(
np.allclose(prediction, activations, atol=1e-6)
)
def test_get_net_input(self):
input_image = self.data[0:1, :, :, np.newaxis]
activations = self.loaded_network.get_net_input('dense_2', input_image)
prediction = np.array([-1.36028111, -3.88575125, 2.72432756, 2.30506134,
-5.98569441, -2.92878628, -11.05670547, 18.10473251,
-2.4147923, 4.62582827], dtype='float32')
# Increase absolute tolerance a little to make it work.
self.assertTrue(
np.allclose(prediction, activations, atol=1e-6)
)
def test_get_layer_input_shape(self):
self.assertEqual((None, 13, 13, 32), self.loaded_network.get_layer_input_shape('conv2d_2'))
def test_get_layer_output_shape(self):
self.assertEqual((None, 11, 11, 32), self.loaded_network.get_layer_output_shape('conv2d_2'))
def test_get_layer_weights(self):
weights = self.loaded_network.get_layer_weights('conv2d_2')[0, 0]
self.assertTrue(
np.allclose(
weights,
np.array([[0.00441102, 0.03252346, 0.03093702],
[-0.02963322, -0.01514516, 0.00517636],
[-0.04767472, -0.05382977, -0.00228736]], dtype='float32')
)
)
| []
| []
| [
"GLOG_minloglevel"
]
| [] | ["GLOG_minloglevel"] | python | 1 | 0 | |
vendor/github.com/operator-framework/api/pkg/validation/internal/operatorhub.go | package internal
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/mail"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/blang/semver"
"github.com/operator-framework/api/pkg/manifests"
"github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/api/pkg/validation/errors"
interfaces "github.com/operator-framework/api/pkg/validation/interfaces"
)
var OperatorHubValidator interfaces.Validator = interfaces.ValidatorFunc(validateOperatorHub)
var validCapabilities = map[string]struct{}{
"Basic Install": struct{}{},
"Seamless Upgrades": struct{}{},
"Full Lifecycle": struct{}{},
"Deep Insights": struct{}{},
"Auto Pilot": struct{}{},
}
var validMediatypes = map[string]struct{}{
"image/gif": struct{}{},
"image/jpeg": struct{}{},
"image/png": struct{}{},
"image/svg+xml": struct{}{},
}
var validCategories = map[string]struct{}{
"AI/Machine Learning": struct{}{},
"Application Runtime": struct{}{},
"Big Data": struct{}{},
"Cloud Provider": struct{}{},
"Developer Tools": struct{}{},
"Database": struct{}{},
"Integration & Delivery": struct{}{},
"Logging & Tracing": struct{}{},
"Monitoring": struct{}{},
"Networking": struct{}{},
"OpenShift Optional": struct{}{},
"Security": struct{}{},
"Storage": struct{}{},
"Streaming & Messaging": struct{}{},
}
func validateOperatorHub(objs ...interface{}) (results []errors.ManifestResult) {
for _, obj := range objs {
switch v := obj.(type) {
case *manifests.Bundle:
results = append(results, validateBundleOperatorHub(v))
}
}
return results
}
func validateBundleOperatorHub(bundle *manifests.Bundle) errors.ManifestResult {
result := errors.ManifestResult{Name: bundle.Name}
if bundle == nil {
result.Add(errors.ErrInvalidBundle("Bundle is nil", nil))
return result
}
if bundle.CSV == nil {
result.Add(errors.ErrInvalidBundle("Bundle csv is nil", bundle.Name))
return result
}
errs := validateHubCSVSpec(*bundle.CSV)
for _, err := range errs {
result.Add(errors.ErrInvalidCSV(err.Error(), bundle.CSV.GetName()))
}
return result
}
func validateHubCSVSpec(csv v1alpha1.ClusterServiceVersion) []error {
var errs []error
if csv.Spec.Provider.Name == "" {
errs = append(errs, fmt.Errorf("csv.Spec.Provider.Name not specified"))
}
for _, maintainer := range csv.Spec.Maintainers {
if maintainer.Name == "" || maintainer.Email == "" {
errs = append(errs, fmt.Errorf("csv.Spec.Maintainers elements should contain both name and email"))
}
if maintainer.Email != "" {
_, err := mail.ParseAddress(maintainer.Email)
if err != nil {
errs = append(errs, fmt.Errorf("csv.Spec.Maintainers email %s is invalid: %v", maintainer.Email, err))
}
}
}
for _, link := range csv.Spec.Links {
if link.Name == "" || link.URL == "" {
errs = append(errs, fmt.Errorf("csv.Spec.Links elements should contain both name and url"))
}
if link.URL != "" {
_, err := url.ParseRequestURI(link.URL)
if err != nil {
errs = append(errs, fmt.Errorf("csv.Spec.Links url %s is invalid: %v", link.URL, err))
}
}
}
if csv.GetAnnotations() == nil {
csv.SetAnnotations(make(map[string]string))
}
if capability, ok := csv.ObjectMeta.Annotations["capabilities"]; ok {
if _, ok := validCapabilities[capability]; !ok {
errs = append(errs, fmt.Errorf("csv.Metadata.Annotations.Capabilities %s is not a valid capabilities level", capability))
}
}
// spec.Version needs to be set
emptyVersion, _ := semver.New("0.0.0")
if csv.Spec.Version.Equals(*emptyVersion) {
errs = append(errs, fmt.Errorf("csv.Spec.Version is not set"))
}
if csv.Spec.Icon != nil {
// only one icon is allowed
if len(csv.Spec.Icon) != 1 {
errs = append(errs, fmt.Errorf("csv.Spec.Icon should only have one element"))
}
icon := csv.Spec.Icon[0]
if icon.MediaType == "" || icon.Data == "" {
errs = append(errs, fmt.Errorf("csv.Spec.Icon elements should contain both data and mediatype"))
}
if icon.MediaType != "" {
if _, ok := validMediatypes[icon.MediaType]; !ok {
errs = append(errs, fmt.Errorf("csv.Spec.Icon %s does not have a valid mediatype", icon.MediaType))
}
}
} else {
errs = append(errs, fmt.Errorf("csv.Spec.Icon not specified"))
}
if categories, ok := csv.ObjectMeta.Annotations["categories"]; ok {
categorySlice := strings.Split(categories, ",")
// use custom categories for validation if provided
customCategoriesPath := os.Getenv("OPERATOR_BUNDLE_CATEGORIES")
if customCategoriesPath != "" {
customCategories, err := extractCategories(customCategoriesPath)
if err != nil {
errs = append(errs, fmt.Errorf("could not extract custom categories from categories %#v: %s", customCategories, err))
return errs
}
for _, category := range categorySlice {
if _, ok := customCategories[category]; !ok {
errs = append(errs, fmt.Errorf("csv.Metadata.Annotations.Categories %s is not a valid custom category", category))
}
}
} else {
// use default categories
for _, category := range categorySlice {
if _, ok := validCategories[category]; !ok {
errs = append(errs, fmt.Errorf("csv.Metadata.Annotations.Categories %s is not a valid category", category))
}
}
}
}
return errs
}
type categories struct {
Contents []string `json:"categories"`
}
// extractCategories reads a custom categories file and returns the contents in a map[string]struct{}
func extractCategories(path string) (map[string]struct{}, error) {
path, err := filepath.Abs(path)
if err != nil {
return nil, fmt.Errorf("finding category file: %w", err)
}
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("reading category file: %w", err)
}
cat := categories{}
err = json.Unmarshal(data, &cat)
if err != nil {
return nil, fmt.Errorf("unmarshaling category file: %w", err)
}
customCategories := make(map[string]struct{})
for _, c := range cat.Contents {
customCategories[c] = struct{}{}
}
return customCategories, nil
}
| [
"\"OPERATOR_BUNDLE_CATEGORIES\""
]
| []
| [
"OPERATOR_BUNDLE_CATEGORIES"
]
| [] | ["OPERATOR_BUNDLE_CATEGORIES"] | go | 1 | 0 | |
tsdb/store_test.go | package tsdb_test
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math"
"math/rand"
"os"
"path/filepath"
"reflect"
"regexp"
"sort"
"strings"
"testing"
"time"
"github.com/influxdata/influxdb/tsdb/index/inmem"
"github.com/davecgh/go-spew/spew"
"github.com/influxdata/influxdb/internal"
"github.com/influxdata/influxdb/logger"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/pkg/deep"
"github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxql"
)
// Ensure the store can delete a retention policy and all shards under
// it.
func TestStore_DeleteRetentionPolicy(t *testing.T) {
t.Parallel()
test := func(index string) {
s := MustOpenStore(index)
defer s.Close()
// Create a new shard and verify that it exists.
if err := s.CreateShard("db0", "rp0", 1, true); err != nil {
t.Fatal(err)
} else if sh := s.Shard(1); sh == nil {
t.Fatalf("expected shard")
}
// Create a new shard under the same retention policy, and verify
// that it exists.
if err := s.CreateShard("db0", "rp0", 2, true); err != nil {
t.Fatal(err)
} else if sh := s.Shard(2); sh == nil {
t.Fatalf("expected shard")
}
// Create a new shard under a different retention policy, and
// verify that it exists.
if err := s.CreateShard("db0", "rp1", 3, true); err != nil {
t.Fatal(err)
} else if sh := s.Shard(3); sh == nil {
t.Fatalf("expected shard")
}
// Deleting the rp0 retention policy does not return an error.
if err := s.DeleteRetentionPolicy("db0", "rp0"); err != nil {
t.Fatal(err)
}
// It deletes the shards under that retention policy.
if sh := s.Shard(1); sh != nil {
t.Errorf("shard 1 was not deleted")
}
if sh := s.Shard(2); sh != nil {
t.Errorf("shard 2 was not deleted")
}
// It deletes the retention policy directory.
if got, exp := dirExists(filepath.Join(s.Path(), "db0", "rp0")), false; got != exp {
t.Error("directory exists, but should have been removed")
}
// It deletes the WAL retention policy directory.
if got, exp := dirExists(filepath.Join(s.EngineOptions.Config.WALDir, "db0", "rp0")), false; got != exp {
t.Error("directory exists, but should have been removed")
}
// Reopen other shard and check it still exists.
if err := s.Reopen(); err != nil {
t.Error(err)
} else if sh := s.Shard(3); sh == nil {
t.Errorf("shard 3 does not exist")
}
// It does not delete other retention policy directories.
if got, exp := dirExists(filepath.Join(s.Path(), "db0", "rp1")), true; got != exp {
t.Error("directory does not exist, but should")
}
if got, exp := dirExists(filepath.Join(s.EngineOptions.Config.WALDir, "db0", "rp1")), true; got != exp {
t.Error("directory does not exist, but should")
}
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) { test(index) })
}
}
// Ensure the store can create a new shard.
func TestStore_CreateShard(t *testing.T) {
t.Parallel()
test := func(index string) {
s := MustOpenStore(index)
defer s.Close()
// Create a new shard and verify that it exists.
if err := s.CreateShard("db0", "rp0", 1, true); err != nil {
t.Fatal(err)
} else if sh := s.Shard(1); sh == nil {
t.Fatalf("expected shard")
}
// Create another shard and verify that it exists.
if err := s.CreateShard("db0", "rp0", 2, true); err != nil {
t.Fatal(err)
} else if sh := s.Shard(2); sh == nil {
t.Fatalf("expected shard")
}
// Reopen shard and recheck.
if err := s.Reopen(); err != nil {
t.Fatal(err)
} else if sh := s.Shard(1); sh == nil {
t.Fatalf("expected shard(1)")
} else if sh = s.Shard(2); sh == nil {
t.Fatalf("expected shard(2)")
}
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) { test(index) })
}
}
// Ensure the store does not return an error when delete from a non-existent db.
func TestStore_DeleteSeries_NonExistentDB(t *testing.T) {
t.Parallel()
test := func(index string) {
s := MustOpenStore(index)
defer s.Close()
if err := s.DeleteSeries("db0", nil, nil); err != nil {
t.Fatal(err.Error())
}
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) { test(index) })
}
}
// Ensure the store can delete an existing shard.
func TestStore_DeleteShard(t *testing.T) {
t.Parallel()
test := func(index string) error {
s := MustOpenStore(index)
defer s.Close()
// Create a new shard and verify that it exists.
if err := s.CreateShard("db0", "rp0", 1, true); err != nil {
return err
} else if sh := s.Shard(1); sh == nil {
return fmt.Errorf("expected shard")
}
// Create another shard.
if err := s.CreateShard("db0", "rp0", 2, true); err != nil {
return err
} else if sh := s.Shard(2); sh == nil {
return fmt.Errorf("expected shard")
}
// and another, but in a different db.
if err := s.CreateShard("db1", "rp0", 3, true); err != nil {
return err
} else if sh := s.Shard(3); sh == nil {
return fmt.Errorf("expected shard")
}
// Write series data to the db0 shards.
s.MustWriteToShardString(1, "cpu,servera=a v=1", "cpu,serverb=b v=1", "mem,serverc=a v=1")
s.MustWriteToShardString(2, "cpu,servera=a v=1", "mem,serverc=a v=1")
// Write similar data to db1 database
s.MustWriteToShardString(3, "cpu,serverb=b v=1")
// Reopen the store and check all shards still exist
if err := s.Reopen(); err != nil {
return err
}
for i := uint64(1); i <= 3; i++ {
if sh := s.Shard(i); sh == nil {
return fmt.Errorf("shard %d missing", i)
}
}
// Remove the first shard from the store.
if err := s.DeleteShard(1); err != nil {
return err
}
// cpu,serverb=b should be removed from the series file for db0 because
// shard 1 was the only owner of that series.
// Verify by getting all tag keys.
keys, err := s.TagKeys(nil, []uint64{2}, nil)
if err != nil {
return err
}
expKeys := []tsdb.TagKeys{
{Measurement: "cpu", Keys: []string{"servera"}},
{Measurement: "mem", Keys: []string{"serverc"}},
}
if got, exp := keys, expKeys; !reflect.DeepEqual(got, exp) {
return fmt.Errorf("got keys %v, expected %v", got, exp)
}
// Verify that the same series was not removed from other databases'
// series files.
if keys, err = s.TagKeys(nil, []uint64{3}, nil); err != nil {
return err
}
expKeys = []tsdb.TagKeys{{Measurement: "cpu", Keys: []string{"serverb"}}}
if got, exp := keys, expKeys; !reflect.DeepEqual(got, exp) {
return fmt.Errorf("got keys %v, expected %v", got, exp)
}
return nil
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
if err := test(index); err != nil {
t.Error(err)
}
})
}
}
// Ensure the store can create a snapshot to a shard.
func TestStore_CreateShardSnapShot(t *testing.T) {
t.Parallel()
test := func(index string) {
s := MustOpenStore(index)
defer s.Close()
// Create a new shard and verify that it exists.
if err := s.CreateShard("db0", "rp0", 1, true); err != nil {
t.Fatal(err)
} else if sh := s.Shard(1); sh == nil {
t.Fatalf("expected shard")
}
dir, e := s.CreateShardSnapshot(1)
if e != nil {
t.Fatal(e)
}
if dir == "" {
t.Fatal("empty directory name")
}
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) { test(index) })
}
}
func TestStore_Open(t *testing.T) {
t.Parallel()
test := func(index string) {
s := NewStore(index)
defer s.Close()
if err := os.MkdirAll(filepath.Join(s.Path(), "db0", "rp0", "2"), 0777); err != nil {
t.Fatal(err)
}
if err := os.MkdirAll(filepath.Join(s.Path(), "db0", "rp2", "4"), 0777); err != nil {
t.Fatal(err)
}
if err := os.MkdirAll(filepath.Join(s.Path(), "db1", "rp0", "1"), 0777); err != nil {
t.Fatal(err)
}
// Store should ignore shard since it does not have a numeric name.
if err := s.Open(); err != nil {
t.Fatal(err)
} else if n := len(s.Databases()); n != 2 {
t.Fatalf("unexpected database index count: %d", n)
} else if n := s.ShardN(); n != 3 {
t.Fatalf("unexpected shard count: %d", n)
}
expDatabases := []string{"db0", "db1"}
gotDatabases := s.Databases()
sort.Strings(gotDatabases)
if got, exp := gotDatabases, expDatabases; !reflect.DeepEqual(got, exp) {
t.Fatalf("got %#v, expected %#v", got, exp)
}
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) { test(index) })
}
}
// Ensure the store reports an error when it can't open a database directory.
func TestStore_Open_InvalidDatabaseFile(t *testing.T) {
t.Parallel()
test := func(index string) {
s := NewStore(index)
defer s.Close()
// Create a file instead of a directory for a database.
if _, err := os.Create(filepath.Join(s.Path(), "db0")); err != nil {
t.Fatal(err)
}
// Store should ignore database since it's a file.
if err := s.Open(); err != nil {
t.Fatal(err)
} else if n := len(s.Databases()); n != 0 {
t.Fatalf("unexpected database index count: %d", n)
}
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) { test(index) })
}
}
// Ensure the store reports an error when it can't open a retention policy.
func TestStore_Open_InvalidRetentionPolicy(t *testing.T) {
t.Parallel()
test := func(index string) {
s := NewStore(index)
defer s.Close()
// Create an RP file instead of a directory.
if err := os.MkdirAll(filepath.Join(s.Path(), "db0"), 0777); err != nil {
t.Fatal(err)
} else if _, err := os.Create(filepath.Join(s.Path(), "db0", "rp0")); err != nil {
t.Fatal(err)
}
// Store should ignore retention policy since it's a file, and there should
// be no indices created.
if err := s.Open(); err != nil {
t.Fatal(err)
} else if n := len(s.Databases()); n != 0 {
t.Log(s.Databases())
t.Fatalf("unexpected database index count: %d", n)
}
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) { test(index) })
}
}
// Ensure the store reports an error when it can't open a retention policy.
func TestStore_Open_InvalidShard(t *testing.T) {
t.Parallel()
test := func(index string) {
s := NewStore(index)
defer s.Close()
// Create a non-numeric shard file.
if err := os.MkdirAll(filepath.Join(s.Path(), "db0", "rp0"), 0777); err != nil {
t.Fatal(err)
} else if _, err := os.Create(filepath.Join(s.Path(), "db0", "rp0", "bad_shard")); err != nil {
t.Fatal(err)
}
// Store should ignore shard since it does not have a numeric name.
if err := s.Open(); err != nil {
t.Fatal(err)
} else if n := len(s.Databases()); n != 0 {
t.Fatalf("unexpected database index count: %d", n)
} else if n := s.ShardN(); n != 0 {
t.Fatalf("unexpected shard count: %d", n)
}
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) { test(index) })
}
}
// Ensure shards can create iterators.
func TestShards_CreateIterator(t *testing.T) {
t.Parallel()
test := func(index string) {
s := MustOpenStore(index)
defer s.Close()
// Create shard #0 with data.
s.MustCreateShardWithData("db0", "rp0", 0,
`cpu,host=serverA value=1 0`,
`cpu,host=serverA value=2 10`,
`cpu,host=serverB value=3 20`,
)
// Create shard #1 with data.
s.MustCreateShardWithData("db0", "rp0", 1,
`cpu,host=serverA value=1 30`,
`mem,host=serverA value=2 40`, // skip: wrong source
`cpu,host=serverC value=3 60`,
)
// Retrieve shard group.
shards := s.ShardGroup([]uint64{0, 1})
// Create iterator.
m := &influxql.Measurement{Name: "cpu"}
itr, err := shards.CreateIterator(context.Background(), m, query.IteratorOptions{
Expr: influxql.MustParseExpr(`value`),
Dimensions: []string{"host"},
Ascending: true,
StartTime: influxql.MinTime,
EndTime: influxql.MaxTime,
})
if err != nil {
t.Fatal(err)
}
defer itr.Close()
fitr := itr.(query.FloatIterator)
// Read values from iterator. The host=serverA points should come first.
if p, err := fitr.Next(); err != nil {
t.Fatalf("unexpected error(0): %s", err)
} else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverA"), Time: time.Unix(0, 0).UnixNano(), Value: 1}) {
t.Fatalf("unexpected point(0): %s", spew.Sdump(p))
}
if p, err := fitr.Next(); err != nil {
t.Fatalf("unexpected error(1): %s", err)
} else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverA"), Time: time.Unix(10, 0).UnixNano(), Value: 2}) {
t.Fatalf("unexpected point(1): %s", spew.Sdump(p))
}
if p, err := fitr.Next(); err != nil {
t.Fatalf("unexpected error(2): %s", err)
} else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverA"), Time: time.Unix(30, 0).UnixNano(), Value: 1}) {
t.Fatalf("unexpected point(2): %s", spew.Sdump(p))
}
// Next the host=serverB point.
if p, err := fitr.Next(); err != nil {
t.Fatalf("unexpected error(3): %s", err)
} else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverB"), Time: time.Unix(20, 0).UnixNano(), Value: 3}) {
t.Fatalf("unexpected point(3): %s", spew.Sdump(p))
}
// And finally the host=serverC point.
if p, err := fitr.Next(); err != nil {
t.Fatalf("unexpected error(4): %s", err)
} else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverC"), Time: time.Unix(60, 0).UnixNano(), Value: 3}) {
t.Fatalf("unexpected point(4): %s", spew.Sdump(p))
}
// Then an EOF should occur.
if p, err := fitr.Next(); err != nil {
t.Fatalf("expected eof, got error: %s", err)
} else if p != nil {
t.Fatalf("expected eof, got: %s", spew.Sdump(p))
}
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) { test(index) })
}
}
// Ensure the store can backup a shard and another store can restore it.
func TestStore_BackupRestoreShard(t *testing.T) {
test := func(index string) {
s0, s1 := MustOpenStore(index), MustOpenStore(index)
defer s0.Close()
defer s1.Close()
// Create shard with data.
s0.MustCreateShardWithData("db0", "rp0", 100,
`cpu value=1 0`,
`cpu value=2 10`,
`cpu value=3 20`,
)
if err := s0.Reopen(); err != nil {
t.Fatal(err)
}
// Backup shard to a buffer.
var buf bytes.Buffer
if err := s0.BackupShard(100, time.Time{}, &buf); err != nil {
t.Fatal(err)
}
// Create the shard on the other store and restore from buffer.
if err := s1.CreateShard("db0", "rp0", 100, true); err != nil {
t.Fatal(err)
}
if err := s1.RestoreShard(100, &buf); err != nil {
t.Fatal(err)
}
// Read data from
m := &influxql.Measurement{Name: "cpu"}
itr, err := s0.Shard(100).CreateIterator(context.Background(), m, query.IteratorOptions{
Expr: influxql.MustParseExpr(`value`),
Ascending: true,
StartTime: influxql.MinTime,
EndTime: influxql.MaxTime,
})
if err != nil {
t.Fatal(err)
}
defer itr.Close()
fitr := itr.(query.FloatIterator)
// Read values from iterator. The host=serverA points should come first.
p, e := fitr.Next()
if e != nil {
t.Fatal(e)
}
if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Time: time.Unix(0, 0).UnixNano(), Value: 1}) {
t.Fatalf("unexpected point(0): %s", spew.Sdump(p))
}
p, e = fitr.Next()
if e != nil {
t.Fatal(e)
}
if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Time: time.Unix(10, 0).UnixNano(), Value: 2}) {
t.Fatalf("unexpected point(1): %s", spew.Sdump(p))
}
p, e = fitr.Next()
if e != nil {
t.Fatal(e)
}
if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Time: time.Unix(20, 0).UnixNano(), Value: 3}) {
t.Fatalf("unexpected point(2): %s", spew.Sdump(p))
}
}
for _, index := range tsdb.RegisteredIndexes() {
if index == tsdb.TSI1IndexName {
t.Skip("Skipping failing test for tsi1")
}
t.Run(index, func(t *testing.T) {
test(index)
})
}
}
func TestStore_Shard_SeriesN(t *testing.T) {
t.Parallel()
test := func(index string) error {
s := MustOpenStore(index)
defer s.Close()
// Create shard with data.
s.MustCreateShardWithData("db0", "rp0", 1,
`cpu value=1 0`,
`cpu,host=serverA value=2 10`,
)
// Create 2nd shard w/ same measurements.
s.MustCreateShardWithData("db0", "rp0", 2,
`cpu value=1 0`,
`cpu value=2 10`,
)
if got, exp := s.Shard(1).SeriesN(), int64(2); got != exp {
return fmt.Errorf("[shard %d] got series count of %d, but expected %d", 1, got, exp)
} else if got, exp := s.Shard(2).SeriesN(), int64(1); got != exp {
return fmt.Errorf("[shard %d] got series count of %d, but expected %d", 2, got, exp)
}
return nil
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
if err := test(index); err != nil {
t.Error(err)
}
})
}
}
func TestStore_MeasurementNames_Deduplicate(t *testing.T) {
t.Parallel()
test := func(index string) {
s := MustOpenStore(index)
defer s.Close()
// Create shard with data.
s.MustCreateShardWithData("db0", "rp0", 1,
`cpu value=1 0`,
`cpu value=2 10`,
`cpu value=3 20`,
)
// Create 2nd shard w/ same measurements.
s.MustCreateShardWithData("db0", "rp0", 2,
`cpu value=1 0`,
`cpu value=2 10`,
`cpu value=3 20`,
)
meas, err := s.MeasurementNames(query.OpenAuthorizer, "db0", nil)
if err != nil {
t.Fatalf("unexpected error with MeasurementNames: %v", err)
}
if exp, got := 1, len(meas); exp != got {
t.Fatalf("measurement len mismatch: exp %v, got %v", exp, got)
}
if exp, got := "cpu", string(meas[0]); exp != got {
t.Fatalf("measurement name mismatch: exp %v, got %v", exp, got)
}
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) { test(index) })
}
}
func testStoreCardinalityTombstoning(t *testing.T, store *Store) {
// Generate point data to write to the shards.
series := genTestSeries(10, 2, 4) // 160 series
points := make([]models.Point, 0, len(series))
for _, s := range series {
points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now()))
}
// Create requested number of shards in the store & write points across
// shards such that we never write the same series to multiple shards.
for shardID := 0; shardID < 4; shardID++ {
if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil {
t.Errorf("create shard: %s", err)
}
if err := store.BatchWrite(shardID, points[shardID*40:(shardID+1)*40]); err != nil {
t.Errorf("batch write: %s", err)
}
}
// Delete all the series for each measurement.
mnames, err := store.MeasurementNames(nil, "db", nil)
if err != nil {
t.Fatal(err)
}
for _, name := range mnames {
if err := store.DeleteSeries("db", []influxql.Source{&influxql.Measurement{Name: string(name)}}, nil); err != nil {
t.Fatal(err)
}
}
// Estimate the series cardinality...
cardinality, err := store.Store.SeriesCardinality("db")
if err != nil {
t.Fatal(err)
}
// Estimated cardinality should be well within 10 of the actual cardinality.
if got, exp := int(cardinality), 10; got > exp {
t.Errorf("series cardinality was %v (expected within %v), expected was: %d", got, exp, 0)
}
// Since all the series have been deleted, all the measurements should have
// been removed from the index too.
if cardinality, err = store.Store.MeasurementsCardinality("db"); err != nil {
t.Fatal(err)
}
// Estimated cardinality should be well within 2 of the actual cardinality.
// TODO(edd): this is totally arbitrary. How can I make it better?
if got, exp := int(cardinality), 2; got > exp {
t.Errorf("measurement cardinality was %v (expected within %v), expected was: %d", got, exp, 0)
}
}
func TestStore_Cardinality_Tombstoning(t *testing.T) {
t.Parallel()
if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" {
t.Skip("Skipping test in short, race and appveyor mode.")
}
test := func(index string) {
store := NewStore(index)
if err := store.Open(); err != nil {
panic(err)
}
defer store.Close()
testStoreCardinalityTombstoning(t, store)
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) { test(index) })
}
}
func testStoreCardinalityUnique(t *testing.T, store *Store) {
// Generate point data to write to the shards.
series := genTestSeries(64, 5, 5) // 200,000 series
expCardinality := len(series)
points := make([]models.Point, 0, len(series))
for _, s := range series {
points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now()))
}
// Create requested number of shards in the store & write points across
// shards such that we never write the same series to multiple shards.
for shardID := 0; shardID < 10; shardID++ {
if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil {
t.Fatalf("create shard: %s", err)
}
if err := store.BatchWrite(shardID, points[shardID*20000:(shardID+1)*20000]); err != nil {
t.Fatalf("batch write: %s", err)
}
}
// Estimate the series cardinality...
cardinality, err := store.Store.SeriesCardinality("db")
if err != nil {
t.Fatal(err)
}
// Estimated cardinality should be well within 1.5% of the actual cardinality.
if got, exp := math.Abs(float64(cardinality)-float64(expCardinality))/float64(expCardinality), 0.015; got > exp {
t.Errorf("got epsilon of %v for series cardinality %v (expected %v), which is larger than expected %v", got, cardinality, expCardinality, exp)
}
// Estimate the measurement cardinality...
if cardinality, err = store.Store.MeasurementsCardinality("db"); err != nil {
t.Fatal(err)
}
// Estimated cardinality should be well within 2 of the actual cardinality. (arbitrary...)
expCardinality = 64
if got, exp := math.Abs(float64(cardinality)-float64(expCardinality)), 2.0; got > exp {
t.Errorf("got measurmement cardinality %v, expected upto %v; difference is larger than expected %v", cardinality, expCardinality, exp)
}
}
func TestStore_Cardinality_Unique(t *testing.T) {
t.Parallel()
if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" {
t.Skip("Skipping test in short, race and appveyor mode.")
}
test := func(index string) {
store := NewStore(index)
store.EngineOptions.Config.MaxSeriesPerDatabase = 0
if err := store.Open(); err != nil {
panic(err)
}
defer store.Close()
testStoreCardinalityUnique(t, store)
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) { test(index) })
}
}
// This test tests cardinality estimation when series data is duplicated across
// multiple shards.
func testStoreCardinalityDuplicates(t *testing.T, store *Store) {
// Generate point data to write to the shards.
series := genTestSeries(64, 5, 5) // 200,000 series.
expCardinality := len(series)
points := make([]models.Point, 0, len(series))
for _, s := range series {
points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now()))
}
// Create requested number of shards in the store & write points.
for shardID := 0; shardID < 10; shardID++ {
if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil {
t.Fatalf("create shard: %s", err)
}
var from, to int
if shardID == 0 {
// if it's the first shard then write all of the points.
from, to = 0, len(points)-1
} else {
// For other shards we write a random sub-section of all the points.
// which will duplicate the series and shouldn't increase the
// cardinality.
from, to := rand.Intn(len(points)), rand.Intn(len(points))
if from > to {
from, to = to, from
}
}
if err := store.BatchWrite(shardID, points[from:to]); err != nil {
t.Fatalf("batch write: %s", err)
}
}
// Estimate the series cardinality...
cardinality, err := store.Store.SeriesCardinality("db")
if err != nil {
t.Fatal(err)
}
// Estimated cardinality should be well within 1.5% of the actual cardinality.
if got, exp := math.Abs(float64(cardinality)-float64(expCardinality))/float64(expCardinality), 0.015; got > exp {
t.Errorf("got epsilon of %v for series cardinality %d (expected %d), which is larger than expected %v", got, cardinality, expCardinality, exp)
}
// Estimate the measurement cardinality...
if cardinality, err = store.Store.MeasurementsCardinality("db"); err != nil {
t.Fatal(err)
}
// Estimated cardinality should be well within 2 of the actual cardinality. (Arbitrary...)
expCardinality = 64
if got, exp := math.Abs(float64(cardinality)-float64(expCardinality)), 2.0; got > exp {
t.Errorf("got measurement cardinality %v, expected upto %v; difference is larger than expected %v", cardinality, expCardinality, exp)
}
}
func TestStore_Cardinality_Duplicates(t *testing.T) {
t.Parallel()
if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" {
t.Skip("Skipping test in short, race and appveyor mode.")
}
test := func(index string) {
store := NewStore(index)
store.EngineOptions.Config.MaxSeriesPerDatabase = 0
if err := store.Open(); err != nil {
panic(err)
}
defer store.Close()
testStoreCardinalityDuplicates(t, store)
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) { test(index) })
}
}
// Creates a large number of series in multiple shards, which will force
// compactions to occur.
func testStoreCardinalityCompactions(store *Store) error {
// Generate point data to write to the shards.
series := genTestSeries(300, 5, 5) // 937,500 series
expCardinality := len(series)
points := make([]models.Point, 0, len(series))
for _, s := range series {
points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now()))
}
// Create requested number of shards in the store & write points across
// shards such that we never write the same series to multiple shards.
for shardID := 0; shardID < 2; shardID++ {
if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil {
return fmt.Errorf("create shard: %s", err)
}
if err := store.BatchWrite(shardID, points[shardID*468750:(shardID+1)*468750]); err != nil {
return fmt.Errorf("batch write: %s", err)
}
}
// Estimate the series cardinality...
cardinality, err := store.Store.SeriesCardinality("db")
if err != nil {
return err
}
// Estimated cardinality should be well within 1.5% of the actual cardinality.
if got, exp := math.Abs(float64(cardinality)-float64(expCardinality))/float64(expCardinality), 0.015; got > exp {
return fmt.Errorf("got epsilon of %v for series cardinality %v (expected %v), which is larger than expected %v", got, cardinality, expCardinality, exp)
}
// Estimate the measurement cardinality...
if cardinality, err = store.Store.MeasurementsCardinality("db"); err != nil {
return err
}
// Estimated cardinality should be well within 2 of the actual cardinality. (Arbitrary...)
expCardinality = 300
if got, exp := math.Abs(float64(cardinality)-float64(expCardinality)), 2.0; got > exp {
return fmt.Errorf("got measurement cardinality %v, expected upto %v; difference is larger than expected %v", cardinality, expCardinality, exp)
}
return nil
}
func TestStore_Cardinality_Compactions(t *testing.T) {
if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" {
t.Skip("Skipping test in short, race and appveyor mode.")
}
test := func(index string) error {
store := NewStore(index)
store.EngineOptions.Config.MaxSeriesPerDatabase = 0
if err := store.Open(); err != nil {
panic(err)
}
defer store.Close()
return testStoreCardinalityCompactions(store)
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
if err := test(index); err != nil {
t.Fatal(err)
}
})
}
}
func TestStore_Sketches(t *testing.T) {
t.Parallel()
checkCardinalities := func(store *tsdb.Store, series, tseries, measurements, tmeasurements int) error {
// Get sketches and check cardinality...
sketch, tsketch, err := store.SeriesSketches("db")
if err != nil {
return err
}
// delta calculates a rough 10% delta. If i is small then a minimum value
// of 2 is used.
delta := func(i int) int {
v := i / 10
if v == 0 {
v = 2
}
return v
}
// series cardinality should be well within 10%.
if got, exp := int(sketch.Count()), series; got-exp < -delta(series) || got-exp > delta(series) {
return fmt.Errorf("got series cardinality %d, expected ~%d", got, exp)
}
// check series tombstones
if got, exp := int(tsketch.Count()), tseries; got-exp < -delta(tseries) || got-exp > delta(tseries) {
return fmt.Errorf("got series tombstone cardinality %d, expected ~%d", got, exp)
}
// Check measurement cardinality.
if sketch, tsketch, err = store.MeasurementsSketches("db"); err != nil {
return err
}
if got, exp := int(sketch.Count()), measurements; got-exp < -delta(measurements) || got-exp > delta(measurements) {
return fmt.Errorf("got measurement cardinality %d, expected ~%d", got, exp)
}
if got, exp := int(tsketch.Count()), tmeasurements; got-exp < -delta(tmeasurements) || got-exp > delta(tmeasurements) {
return fmt.Errorf("got measurement tombstone cardinality %d, expected ~%d", got, exp)
}
return nil
}
test := func(index string) error {
store := MustOpenStore(index)
defer store.Close()
// Generate point data to write to the shards.
series := genTestSeries(10, 2, 4) // 160 series
points := make([]models.Point, 0, len(series))
for _, s := range series {
points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now()))
}
// Create requested number of shards in the store & write points across
// shards such that we never write the same series to multiple shards.
for shardID := 0; shardID < 4; shardID++ {
if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil {
return fmt.Errorf("create shard: %s", err)
}
if err := store.BatchWrite(shardID, points[shardID*40:(shardID+1)*40]); err != nil {
return fmt.Errorf("batch write: %s", err)
}
}
// Check cardinalities
if err := checkCardinalities(store.Store, 160, 0, 10, 0); err != nil {
return fmt.Errorf("[initial] %v", err)
}
// Reopen the store.
if err := store.Reopen(); err != nil {
return err
}
// Check cardinalities
if err := checkCardinalities(store.Store, 160, 0, 10, 0); err != nil {
return fmt.Errorf("[initial|re-open] %v", err)
}
// Delete half the the measurements data
mnames, err := store.MeasurementNames(nil, "db", nil)
if err != nil {
return err
}
for _, name := range mnames[:len(mnames)/2] {
if err := store.DeleteSeries("db", []influxql.Source{&influxql.Measurement{Name: string(name)}}, nil); err != nil {
return err
}
}
// Check cardinalities. In this case, the indexes behave differently.
expS, expTS, expM, expTM := 160, 0, 10, 5
if index == inmem.IndexName {
expS, expTS, expM, expTM = 160, 80, 10, 5
}
// Check cardinalities - tombstones should be in
if err := checkCardinalities(store.Store, expS, expTS, expM, expTM); err != nil {
return fmt.Errorf("[initial|re-open|delete] %v", err)
}
// Reopen the store.
if err := store.Reopen(); err != nil {
return err
}
// Check cardinalities. In this case, the indexes behave differently.
expS, expTS, expM, expTM = 160, 0, 5, 5
if index == inmem.IndexName {
expS, expTS, expM, expTM = 80, 0, 5, 0
}
if err := checkCardinalities(store.Store, expS, expTS, expM, expTM); err != nil {
return fmt.Errorf("[initial|re-open|delete|re-open] %v", err)
}
return nil
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
if err := test(index); err != nil {
t.Fatal(err)
}
})
}
}
func TestStore_TagValues(t *testing.T) {
t.Parallel()
// No WHERE - just get for keys host and shard
RHSAll := &influxql.ParenExpr{
Expr: &influxql.BinaryExpr{
Op: influxql.OR,
LHS: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "_tagKey"},
RHS: &influxql.StringLiteral{Val: "host"},
},
RHS: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "_tagKey"},
RHS: &influxql.StringLiteral{Val: "shard"},
},
},
}
// Get for host and shard, but also WHERE on foo = a
RHSWhere := &influxql.ParenExpr{
Expr: &influxql.BinaryExpr{
Op: influxql.AND,
LHS: &influxql.ParenExpr{
Expr: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "foo"},
RHS: &influxql.StringLiteral{Val: "a"},
},
},
RHS: RHSAll,
},
}
// SHOW TAG VALUES FROM /cpu\d/ WITH KEY IN ("host", "shard")
//
// Switching out RHS for RHSWhere would make the query:
// SHOW TAG VALUES FROM /cpu\d/ WITH KEY IN ("host", "shard") WHERE foo = 'a'
base := influxql.BinaryExpr{
Op: influxql.AND,
LHS: &influxql.ParenExpr{
Expr: &influxql.BinaryExpr{
Op: influxql.EQREGEX,
LHS: &influxql.VarRef{Val: "_name"},
RHS: &influxql.RegexLiteral{Val: regexp.MustCompile(`cpu\d`)},
},
},
RHS: RHSAll,
}
var baseWhere *influxql.BinaryExpr = influxql.CloneExpr(&base).(*influxql.BinaryExpr)
baseWhere.RHS = RHSWhere
examples := []struct {
Name string
Expr influxql.Expr
Exp []tsdb.TagValues
}{
{
Name: "No WHERE clause",
Expr: &base,
Exp: []tsdb.TagValues{
createTagValues("cpu0", map[string][]string{"shard": {"s0"}}),
createTagValues("cpu1", map[string][]string{"shard": {"s1"}}),
createTagValues("cpu10", map[string][]string{"host": {"nofoo", "tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}),
createTagValues("cpu11", map[string][]string{"host": {"nofoo", "tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}),
createTagValues("cpu12", map[string][]string{"host": {"nofoo", "tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}),
createTagValues("cpu2", map[string][]string{"shard": {"s2"}}),
},
},
{
Name: "With WHERE clause",
Expr: baseWhere,
Exp: []tsdb.TagValues{
createTagValues("cpu0", map[string][]string{"shard": {"s0"}}),
createTagValues("cpu1", map[string][]string{"shard": {"s1"}}),
createTagValues("cpu10", map[string][]string{"host": {"tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}),
createTagValues("cpu11", map[string][]string{"host": {"tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}),
createTagValues("cpu12", map[string][]string{"host": {"tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}),
createTagValues("cpu2", map[string][]string{"shard": {"s2"}}),
},
},
}
var s *Store
setup := func(index string) []uint64 { // returns shard ids
s = MustOpenStore(index)
fmtStr := `cpu1%[1]d,foo=a,ignoreme=nope,host=tv%[2]d,shard=s%[3]d value=1 %[4]d
cpu1%[1]d,host=nofoo value=1 %[4]d
mem,host=nothanks value=1 %[4]d
cpu%[3]d,shard=s%[3]d,foo=a value=2 %[4]d
`
genPoints := func(sid int) []string {
var ts int
points := make([]string, 0, 3*4)
for m := 0; m < 3; m++ {
for tagvid := 0; tagvid < 4; tagvid++ {
points = append(points, fmt.Sprintf(fmtStr, m, tagvid, sid, ts))
ts++
}
}
return points
}
// Create data across 3 shards.
var ids []uint64
for i := 0; i < 3; i++ {
ids = append(ids, uint64(i))
s.MustCreateShardWithData("db0", "rp0", i, genPoints(i)...)
}
return ids
}
for _, example := range examples {
for _, index := range tsdb.RegisteredIndexes() {
shardIDs := setup(index)
t.Run(example.Name+"_"+index, func(t *testing.T) {
got, err := s.TagValues(nil, shardIDs, example.Expr)
if err != nil {
t.Fatal(err)
}
exp := example.Exp
if !reflect.DeepEqual(got, exp) {
t.Fatalf("got:\n%#v\n\nexp:\n%#v", got, exp)
}
})
s.Close()
}
}
}
func TestStore_Measurements_Auth(t *testing.T) {
t.Parallel()
test := func(index string) error {
s := MustOpenStore(index)
defer s.Close()
// Create shard #0 with data.
s.MustCreateShardWithData("db0", "rp0", 0,
`cpu,host=serverA value=1 0`,
`cpu,host=serverA value=2 10`,
`cpu,region=west value=3 20`,
`cpu,secret=foo value=5 30`, // cpu still readable because it has other series that can be read.
`mem,secret=foo value=1 30`,
`disk value=4 30`,
)
authorizer := &internal.AuthorizerMock{
AuthorizeSeriesReadFn: func(database string, measurement []byte, tags models.Tags) bool {
if database == "" || tags.GetString("secret") != "" {
t.Logf("Rejecting series db=%s, m=%s, tags=%v", database, measurement, tags)
return false
}
return true
},
}
names, err := s.MeasurementNames(authorizer, "db0", nil)
if err != nil {
return err
}
// names should not contain any measurements where none of the associated
// series are authorised for reads.
expNames := 2
var gotNames int
for _, name := range names {
if string(name) == "mem" {
return fmt.Errorf("got measurement %q but it should be filtered.", name)
}
gotNames++
}
if gotNames != expNames {
return fmt.Errorf("got %d measurements, but expected %d", gotNames, expNames)
}
// Now delete all of the cpu series.
cond, err := influxql.ParseExpr("host = 'serverA' OR region = 'west'")
if err != nil {
return err
}
if err := s.DeleteSeries("db0", nil, cond); err != nil {
return err
}
if names, err = s.MeasurementNames(authorizer, "db0", nil); err != nil {
return err
}
// names should not contain any measurements where none of the associated
// series are authorised for reads.
expNames = 1
gotNames = 0
for _, name := range names {
if string(name) == "mem" || string(name) == "cpu" {
return fmt.Errorf("after delete got measurement %q but it should be filtered.", name)
}
gotNames++
}
if gotNames != expNames {
return fmt.Errorf("after delete got %d measurements, but expected %d", gotNames, expNames)
}
return nil
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
if err := test(index); err != nil {
t.Fatal(err)
}
})
}
}
func TestStore_TagKeys_Auth(t *testing.T) {
t.Parallel()
test := func(index string) error {
s := MustOpenStore(index)
defer s.Close()
// Create shard #0 with data.
s.MustCreateShardWithData("db0", "rp0", 0,
`cpu,host=serverA value=1 0`,
`cpu,host=serverA,debug=true value=2 10`,
`cpu,region=west value=3 20`,
`cpu,secret=foo,machine=a value=1 20`,
)
authorizer := &internal.AuthorizerMock{
AuthorizeSeriesReadFn: func(database string, measurement []byte, tags models.Tags) bool {
if database == "" || !bytes.Equal(measurement, []byte("cpu")) || tags.GetString("secret") != "" {
t.Logf("Rejecting series db=%s, m=%s, tags=%v", database, measurement, tags)
return false
}
return true
},
}
keys, err := s.TagKeys(authorizer, []uint64{0}, nil)
if err != nil {
return err
}
// keys should not contain any tag keys associated with a series containing
// a secret tag.
expKeys := 3
var gotKeys int
for _, tk := range keys {
if got, exp := tk.Measurement, "cpu"; got != exp {
return fmt.Errorf("got measurement %q, expected %q", got, exp)
}
for _, key := range tk.Keys {
if key == "secret" || key == "machine" {
return fmt.Errorf("got tag key %q but it should be filtered.", key)
}
gotKeys++
}
}
if gotKeys != expKeys {
return fmt.Errorf("got %d keys, but expected %d", gotKeys, expKeys)
}
// Delete the series with region = west
cond, err := influxql.ParseExpr("region = 'west'")
if err != nil {
return err
}
if err := s.DeleteSeries("db0", nil, cond); err != nil {
return err
}
if keys, err = s.TagKeys(authorizer, []uint64{0}, nil); err != nil {
return err
}
// keys should not contain any tag keys associated with a series containing
// a secret tag or the deleted series
expKeys = 2
gotKeys = 0
for _, tk := range keys {
if got, exp := tk.Measurement, "cpu"; got != exp {
return fmt.Errorf("got measurement %q, expected %q", got, exp)
}
for _, key := range tk.Keys {
if key == "secret" || key == "machine" || key == "region" {
return fmt.Errorf("got tag key %q but it should be filtered.", key)
}
gotKeys++
}
}
if gotKeys != expKeys {
return fmt.Errorf("got %d keys, but expected %d", gotKeys, expKeys)
}
return nil
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
if err := test(index); err != nil {
t.Fatal(err)
}
})
}
}
func TestStore_TagValues_Auth(t *testing.T) {
t.Parallel()
test := func(index string) error {
s := MustOpenStore(index)
defer s.Close()
// Create shard #0 with data.
s.MustCreateShardWithData("db0", "rp0", 0,
`cpu,host=serverA value=1 0`,
`cpu,host=serverA value=2 10`,
`cpu,host=serverB value=3 20`,
`cpu,secret=foo,host=serverD value=1 20`,
)
authorizer := &internal.AuthorizerMock{
AuthorizeSeriesReadFn: func(database string, measurement []byte, tags models.Tags) bool {
if database == "" || !bytes.Equal(measurement, []byte("cpu")) || tags.GetString("secret") != "" {
t.Logf("Rejecting series db=%s, m=%s, tags=%v", database, measurement, tags)
return false
}
return true
},
}
values, err := s.TagValues(authorizer, []uint64{0}, &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "_tagKey"},
RHS: &influxql.StringLiteral{Val: "host"},
})
if err != nil {
return err
}
// values should not contain any tag values associated with a series containing
// a secret tag.
expValues := 2
var gotValues int
for _, tv := range values {
if got, exp := tv.Measurement, "cpu"; got != exp {
return fmt.Errorf("got measurement %q, expected %q", got, exp)
}
for _, v := range tv.Values {
if got, exp := v.Value, "serverD"; got == exp {
return fmt.Errorf("got tag value %q but it should be filtered.", got)
}
gotValues++
}
}
if gotValues != expValues {
return fmt.Errorf("got %d tags, but expected %d", gotValues, expValues)
}
// Delete the series with values serverA
cond, err := influxql.ParseExpr("host = 'serverA'")
if err != nil {
return err
}
if err := s.DeleteSeries("db0", nil, cond); err != nil {
return err
}
values, err = s.TagValues(authorizer, []uint64{0}, &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "_tagKey"},
RHS: &influxql.StringLiteral{Val: "host"},
})
if err != nil {
return err
}
// values should not contain any tag values associated with a series containing
// a secret tag.
expValues = 1
gotValues = 0
for _, tv := range values {
if got, exp := tv.Measurement, "cpu"; got != exp {
return fmt.Errorf("got measurement %q, expected %q", got, exp)
}
for _, v := range tv.Values {
if got, exp := v.Value, "serverD"; got == exp {
return fmt.Errorf("got tag value %q but it should be filtered.", got)
} else if got, exp := v.Value, "serverA"; got == exp {
return fmt.Errorf("got tag value %q but it should be filtered.", got)
}
gotValues++
}
}
if gotValues != expValues {
return fmt.Errorf("got %d values, but expected %d", gotValues, expValues)
}
return nil
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
if err := test(index); err != nil {
t.Fatal(err)
}
})
}
}
// Helper to create some tag values
func createTagValues(mname string, kvs map[string][]string) tsdb.TagValues {
var sz int
for _, v := range kvs {
sz += len(v)
}
out := tsdb.TagValues{
Measurement: mname,
Values: make([]tsdb.KeyValue, 0, sz),
}
for tk, tvs := range kvs {
for _, tv := range tvs {
out.Values = append(out.Values, tsdb.KeyValue{Key: tk, Value: tv})
}
// We have to sort the KeyValues since that's how they're provided from
// the tsdb.Store.
sort.Sort(tsdb.KeyValues(out.Values))
}
return out
}
func BenchmarkStore_SeriesCardinality_100_Shards(b *testing.B) {
for _, index := range tsdb.RegisteredIndexes() {
store := NewStore(index)
if err := store.Open(); err != nil {
panic(err)
}
// Write a point to n shards.
for shardID := 0; shardID < 100; shardID++ {
if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil {
b.Fatalf("create shard: %s", err)
}
err := store.WriteToShard(uint64(shardID), []models.Point{models.MustNewPoint("cpu", nil, map[string]interface{}{"value": 1.0}, time.Now())})
if err != nil {
b.Fatalf("write: %s", err)
}
}
b.Run(store.EngineOptions.IndexVersion, func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = store.SeriesCardinality("db")
}
})
store.Close()
}
}
func BenchmarkStoreOpen_200KSeries_100Shards(b *testing.B) { benchmarkStoreOpen(b, 64, 5, 5, 1, 100) }
func benchmarkStoreOpen(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt, shardCnt int) {
var store *Store
setup := func(index string) error {
store := MustOpenStore(index)
// Generate test series (measurements + unique tag sets).
series := genTestSeries(mCnt, tkCnt, tvCnt)
// Generate point data to write to the shards.
points := []models.Point{}
for _, s := range series {
for val := 0.0; val < float64(pntCnt); val++ {
p := models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": val}, time.Now())
points = append(points, p)
}
}
// Create requested number of shards in the store & write points.
for shardID := 0; shardID < shardCnt; shardID++ {
if err := store.CreateShard("mydb", "myrp", uint64(shardID), true); err != nil {
return fmt.Errorf("create shard: %s", err)
}
if err := store.BatchWrite(shardID, points); err != nil {
return fmt.Errorf("batch write: %s", err)
}
}
return nil
}
for _, index := range tsdb.RegisteredIndexes() {
if err := setup(index); err != nil {
b.Fatal(err)
}
b.Run(store.EngineOptions.IndexVersion, func(b *testing.B) {
for n := 0; n < b.N; n++ {
store := tsdb.NewStore(store.Path())
if err := store.Open(); err != nil {
b.Fatalf("open store error: %s", err)
}
b.StopTimer()
store.Close()
b.StartTimer()
}
})
os.RemoveAll(store.Path())
}
}
// To store result of benchmark (ensure allocated on heap).
var tvResult []tsdb.TagValues
func BenchmarkStore_TagValues(b *testing.B) {
benchmarks := []struct {
name string
shards int
measurements int
tagValues int
}{
{name: "s=1_m=1_v=100", shards: 1, measurements: 1, tagValues: 100},
{name: "s=1_m=1_v=1000", shards: 1, measurements: 1, tagValues: 1000},
{name: "s=1_m=10_v=100", shards: 1, measurements: 10, tagValues: 100},
{name: "s=1_m=10_v=1000", shards: 1, measurements: 10, tagValues: 1000},
{name: "s=1_m=100_v=100", shards: 1, measurements: 100, tagValues: 100},
{name: "s=1_m=100_v=1000", shards: 1, measurements: 100, tagValues: 1000},
{name: "s=10_m=1_v=100", shards: 10, measurements: 1, tagValues: 100},
{name: "s=10_m=1_v=1000", shards: 10, measurements: 1, tagValues: 1000},
{name: "s=10_m=10_v=100", shards: 10, measurements: 10, tagValues: 100},
{name: "s=10_m=10_v=1000", shards: 10, measurements: 10, tagValues: 1000},
{name: "s=10_m=100_v=100", shards: 10, measurements: 100, tagValues: 100},
{name: "s=10_m=100_v=1000", shards: 10, measurements: 100, tagValues: 1000},
}
var s *Store
setup := func(shards, measurements, tagValues int, index string, useRandom bool) []uint64 { // returns shard ids
s := NewStore(index)
if err := s.Open(); err != nil {
panic(err)
}
fmtStr := `cpu%[1]d,host=tv%[2]d,shard=s%[3]d,z1=s%[1]d%[2]d,z2=%[4]s value=1 %[5]d`
// genPoints generates some point data. If ran is true then random tag
// key values will be generated, meaning more work sorting and merging.
// If ran is false, then the same set of points will be produced for the
// same set of parameters, meaning more de-duplication of points will be
// needed.
genPoints := func(sid int, ran bool) []string {
var v, ts int
var half string
points := make([]string, 0, measurements*tagValues)
for m := 0; m < measurements; m++ {
for tagvid := 0; tagvid < tagValues; tagvid++ {
v = tagvid
if ran {
v = rand.Intn(100000)
}
half = fmt.Sprint(rand.Intn(2) == 0)
points = append(points, fmt.Sprintf(fmtStr, m, v, sid, half, ts))
ts++
}
}
return points
}
// Create data across chosen number of shards.
var shardIDs []uint64
for i := 0; i < shards; i++ {
shardIDs = append(shardIDs, uint64(i))
s.MustCreateShardWithData("db0", "rp0", i, genPoints(i, useRandom)...)
}
return shardIDs
}
teardown := func() {
if err := s.Close(); err != nil {
b.Fatal(err)
}
}
// SHOW TAG VALUES WITH KEY IN ("host", "shard")
cond1 := &influxql.ParenExpr{
Expr: &influxql.BinaryExpr{
Op: influxql.OR,
LHS: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "_tagKey"},
RHS: &influxql.StringLiteral{Val: "host"},
},
RHS: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "_tagKey"},
RHS: &influxql.StringLiteral{Val: "shard"},
},
},
}
cond2 := &influxql.ParenExpr{
Expr: &influxql.BinaryExpr{
Op: influxql.AND,
LHS: &influxql.ParenExpr{
Expr: &influxql.BinaryExpr{
Op: influxql.EQ,
LHS: &influxql.VarRef{Val: "z2"},
RHS: &influxql.StringLiteral{Val: "true"},
},
},
RHS: cond1,
},
}
var err error
for _, index := range tsdb.RegisteredIndexes() {
for useRand := 0; useRand < 2; useRand++ {
for c, condition := range []influxql.Expr{cond1, cond2} {
for _, bm := range benchmarks {
shardIDs := setup(bm.shards, bm.measurements, bm.tagValues, index, useRand == 1)
cnd := "Unfiltered"
if c == 0 {
cnd = "Filtered"
}
b.Run("random_values="+fmt.Sprint(useRand == 1)+"_index="+index+"_"+cnd+"_"+bm.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
if tvResult, err = s.TagValues(nil, shardIDs, condition); err != nil {
b.Fatal(err)
}
}
})
teardown()
}
}
}
}
}
// Store is a test wrapper for tsdb.Store.
type Store struct {
*tsdb.Store
index string
}
// NewStore returns a new instance of Store with a temporary path.
func NewStore(index string) *Store {
path, err := ioutil.TempDir("", "influxdb-tsdb-")
if err != nil {
panic(err)
}
s := &Store{Store: tsdb.NewStore(path), index: index}
s.EngineOptions.IndexVersion = index
s.EngineOptions.Config.WALDir = filepath.Join(path, "wal")
s.EngineOptions.Config.TraceLoggingEnabled = true
if testing.Verbose() {
s.WithLogger(logger.New(os.Stdout))
}
return s
}
// MustOpenStore returns a new, open Store using the specified index,
// at a temporary path.
func MustOpenStore(index string) *Store {
s := NewStore(index)
if err := s.Open(); err != nil {
panic(err)
}
return s
}
// Reopen closes and reopens the store as a new store.
func (s *Store) Reopen() error {
if err := s.Store.Close(); err != nil {
return err
}
s.Store = tsdb.NewStore(s.Path())
s.EngineOptions.IndexVersion = s.index
s.EngineOptions.Config.WALDir = filepath.Join(s.Path(), "wal")
s.EngineOptions.Config.TraceLoggingEnabled = true
if testing.Verbose() {
s.WithLogger(logger.New(os.Stdout))
}
return s.Store.Open()
}
// Close closes the store and removes the underlying data.
func (s *Store) Close() error {
defer os.RemoveAll(s.Path())
return s.Store.Close()
}
// MustCreateShardWithData creates a shard and writes line protocol data to it.
func (s *Store) MustCreateShardWithData(db, rp string, shardID int, data ...string) {
if err := s.CreateShard(db, rp, uint64(shardID), true); err != nil {
panic(err)
}
s.MustWriteToShardString(shardID, data...)
}
// MustWriteToShardString parses the line protocol (with second precision) and
// inserts the resulting points into a shard. Panic on error.
func (s *Store) MustWriteToShardString(shardID int, data ...string) {
var points []models.Point
for i := range data {
a, err := models.ParsePointsWithPrecision([]byte(strings.TrimSpace(data[i])), time.Time{}, "s")
if err != nil {
panic(err)
}
points = append(points, a...)
}
if err := s.WriteToShard(uint64(shardID), points); err != nil {
panic(err)
}
}
// BatchWrite writes points to a shard in chunks.
func (s *Store) BatchWrite(shardID int, points []models.Point) error {
nPts := len(points)
chunkSz := 10000
start := 0
end := chunkSz
for {
if end > nPts {
end = nPts
}
if end-start == 0 {
break
}
if err := s.WriteToShard(uint64(shardID), points[start:end]); err != nil {
return err
}
start = end
end += chunkSz
}
return nil
}
// ParseTags returns an instance of Tags for a comma-delimited list of key/values.
func ParseTags(s string) query.Tags {
m := make(map[string]string)
for _, kv := range strings.Split(s, ",") {
a := strings.Split(kv, "=")
m[a[0]] = a[1]
}
return query.NewTags(m)
}
func dirExists(path string) bool {
var err error
if _, err = os.Stat(path); err == nil {
return true
}
return !os.IsNotExist(err)
}
| [
"\"GORACE\"",
"\"APPVEYOR\"",
"\"GORACE\"",
"\"APPVEYOR\"",
"\"GORACE\"",
"\"APPVEYOR\"",
"\"GORACE\"",
"\"APPVEYOR\""
]
| []
| [
"APPVEYOR",
"GORACE"
]
| [] | ["APPVEYOR", "GORACE"] | go | 2 | 0 | |
tests/cli/test_cli.py | import os
import shutil
import sys
from pathlib import Path
import pytest
from pdm.cli import actions
from pdm.models.requirements import parse_requirement
from pdm.utils import cd, temp_environ
from tests import FIXTURES
def test_help_option(invoke):
result = invoke(["--help"])
assert "PDM - Python Development Master" in result.output
def test_lock_command(project, invoke, mocker):
m = mocker.patch.object(actions, "do_lock")
invoke(["lock"], obj=project)
m.assert_called_with(project)
def test_install_command(project, invoke, mocker):
do_lock = mocker.patch.object(actions, "do_lock")
do_sync = mocker.patch.object(actions, "do_sync")
invoke(["install"], obj=project)
do_lock.assert_called_once()
do_sync.assert_called_once()
def test_sync_command(project, invoke, mocker):
do_sync = mocker.patch.object(actions, "do_sync")
invoke(["sync"], obj=project)
do_sync.assert_called_once()
def test_update_command(project, invoke, mocker):
do_update = mocker.patch.object(actions, "do_update")
invoke(["update"], obj=project)
do_update.assert_called_once()
def test_remove_command(project, invoke, mocker):
do_remove = mocker.patch.object(actions, "do_remove")
invoke(["remove", "demo"], obj=project)
do_remove.assert_called_once()
def test_add_command(project, invoke, mocker):
do_add = mocker.patch.object(actions, "do_add")
invoke(["add", "requests"], obj=project)
do_add.assert_called_once()
def test_build_command(project, invoke, mocker):
do_build = mocker.patch.object(actions, "do_build")
invoke(["build"], obj=project)
do_build.assert_called_once()
def test_build_global_project_forbidden(invoke):
result = invoke(["build", "-g"])
assert result.exit_code != 0
def test_list_command(project, invoke, mocker):
do_list = mocker.patch.object(actions, "do_list")
invoke(["list"], obj=project)
do_list.assert_called_once()
def test_info_command(project, invoke):
result = invoke(["info"], obj=project)
assert "Project Root:" in result.output
assert project.root.as_posix() in result.output
result = invoke(["info", "--python"], obj=project)
assert result.output.strip() == project.python.executable
result = invoke(["info", "--where"], obj=project)
assert result.output.strip() == project.root.as_posix()
result = invoke(["info", "--env"], obj=project)
assert result.exit_code == 0
def test_info_global_project(invoke, tmp_path):
with cd(tmp_path):
result = invoke(["info", "-g", "--where"])
assert "global-project" in result.output.strip()
def test_global_project_other_location(invoke, project):
result = invoke(["info", "-g", "-p", project.root.as_posix(), "--where"])
assert result.stdout.strip() == project.root.as_posix()
def test_uncaught_error(invoke, mocker):
mocker.patch.object(actions, "do_list", side_effect=RuntimeError("test error"))
result = invoke(["list"])
assert "[RuntimeError]: test error" in result.stderr
result = invoke(["list", "-v"])
assert isinstance(result.exception, RuntimeError)
def test_use_command(project, invoke):
python_path = Path(shutil.which("python")).as_posix()
result = invoke(["use", "-f", "python"], obj=project)
assert result.exit_code == 0
config_content = project.root.joinpath(".pdm.toml").read_text()
assert python_path in config_content
result = invoke(["use", "-f", python_path], obj=project)
assert result.exit_code == 0
project.meta["requires-python"] = ">=3.6"
project.write_pyproject()
result = invoke(["use", "2.7"], obj=project)
assert result.exit_code == 1
def test_use_python_by_version(project, invoke):
python_version = ".".join(map(str, sys.version_info[:2]))
result = invoke(["use", "-f", python_version], obj=project)
assert result.exit_code == 0
def test_install_with_lockfile(project, invoke, working_set, repository):
result = invoke(["lock", "-v"], obj=project)
assert result.exit_code == 0
result = invoke(["install"], obj=project)
assert "Lock file" not in result.output
project.add_dependencies({"pytz": parse_requirement("pytz")}, "default")
result = invoke(["install"], obj=project)
assert "Lock file hash doesn't match" in result.output
assert "pytz" in project.locked_repository.all_candidates
assert project.is_lockfile_hash_match()
def test_install_with_dry_run(project, invoke, repository):
project.add_dependencies({"pytz": parse_requirement("pytz")}, "default")
result = invoke(["install", "--dry-run"], obj=project)
project._lockfile = None
assert "pytz" not in project.locked_repository.all_candidates
assert "pytz 2019.3" in result.output
def test_init_command(project_no_init, invoke, mocker):
mocker.patch(
"pdm.cli.commands.init.get_user_email_from_git",
return_value=("Testing", "[email protected]"),
)
do_init = mocker.patch.object(actions, "do_init")
result = invoke(["init"], input="\n\n\n\n\n\n", obj=project_no_init)
assert result.exit_code == 0
python_version = f"{project_no_init.python.major}.{project_no_init.python.minor}"
do_init.assert_called_with(
project_no_init,
"",
"",
"MIT",
"Testing",
"[email protected]",
f">={python_version}",
)
def test_init_command_library(project_no_init, invoke, mocker):
mocker.patch(
"pdm.cli.commands.init.get_user_email_from_git",
return_value=("Testing", "[email protected]"),
)
do_init = mocker.patch.object(actions, "do_init")
result = invoke(
["init"], input="\ny\ntest-project\n\n\n\n\n\n", obj=project_no_init
)
assert result.exit_code == 0
python_version = f"{project_no_init.python.major}.{project_no_init.python.minor}"
do_init.assert_called_with(
project_no_init,
"test-project",
"0.1.0",
"MIT",
"Testing",
"[email protected]",
f">={python_version}",
)
def test_init_non_interactive(project_no_init, invoke, mocker):
mocker.patch(
"pdm.cli.commands.init.get_user_email_from_git",
return_value=("Testing", "[email protected]"),
)
do_init = mocker.patch.object(actions, "do_init")
result = invoke(["init", "-n"], obj=project_no_init)
assert result.exit_code == 0
python_version = f"{project_no_init.python.major}.{project_no_init.python.minor}"
do_init.assert_called_with(
project_no_init,
"",
"",
"MIT",
"Testing",
"[email protected]",
f">={python_version}",
)
def test_config_command(project, invoke):
result = invoke(["config"], obj=project)
assert result.exit_code == 0
assert "python.use_pyenv = True" in result.output
result = invoke(["config", "-v"], obj=project)
assert result.exit_code == 0
assert "Use the pyenv interpreter" in result.output
def test_config_get_command(project, invoke):
result = invoke(["config", "python.use_pyenv"], obj=project)
assert result.exit_code == 0
assert result.output.strip() == "True"
result = invoke(["config", "foo.bar"], obj=project)
assert result.exit_code != 0
def test_config_set_command(project, invoke):
result = invoke(["config", "python.use_pyenv", "false"], obj=project)
assert result.exit_code == 0
result = invoke(["config", "python.use_pyenv"], obj=project)
assert result.output.strip() == "False"
result = invoke(["config", "foo.bar"], obj=project)
assert result.exit_code != 0
result = invoke(["config", "-l", "cache_dir", "/path/to/bar"], obj=project)
assert result.exit_code != 0
def test_config_del_command(project, invoke):
result = invoke(["config", "-l", "python.use_pyenv", "false"], obj=project)
assert result.exit_code == 0
result = invoke(["config", "python.use_pyenv"], obj=project)
assert result.output.strip() == "False"
result = invoke(["config", "-ld", "python.use_pyenv"], obj=project)
assert result.exit_code == 0
result = invoke(["config", "python.use_pyenv"], obj=project)
assert result.output.strip() == "True"
def test_config_env_var_shadowing(project, invoke):
with temp_environ():
os.environ["PDM_PYPI_URL"] = "https://example.org/simple"
result = invoke(["config", "pypi.url"], obj=project)
assert result.output.strip() == "https://example.org/simple"
result = invoke(
["config", "pypi.url", "https://test.pypi.org/pypi"], obj=project
)
assert "config is shadowed by env var 'PDM_PYPI_URL'" in result.output
result = invoke(["config", "pypi.url"], obj=project)
assert result.output.strip() == "https://example.org/simple"
del os.environ["PDM_PYPI_URL"]
result = invoke(["config", "pypi.url"], obj=project)
assert result.output.strip() == "https://test.pypi.org/pypi"
def test_config_project_global_precedence(project, invoke):
invoke(["config", "python.path", "/path/to/foo"], obj=project)
invoke(["config", "-l", "python.path", "/path/to/bar"], obj=project)
result = invoke(["config", "python.path"], obj=project)
assert result.output.strip() == "/path/to/bar"
@pytest.mark.parametrize(
"filename",
[
"requirements.txt",
"Pipfile",
"pyproject-poetry.toml",
"projects/flit-demo/pyproject.toml",
],
)
def test_import_other_format_file(project, invoke, filename):
requirements_file = FIXTURES / filename
result = invoke(["import", str(requirements_file)], obj=project)
assert result.exit_code == 0
def test_import_requirement_no_overwrite(project, invoke, tmp_path):
project.add_dependencies({"requests": parse_requirement("requests")})
tmp_path.joinpath("reqs.txt").write_text("flask\nflask-login\n")
result = invoke(
["import", "-dGweb", str(tmp_path.joinpath("reqs.txt"))], obj=project
)
assert result.exit_code == 0, result.stderr
assert list(project.get_dependencies()) == ["requests"]
assert list(project.get_dependencies("web")) == ["flask", "flask-login"]
@pytest.mark.pypi
def test_search_package(project, invoke):
result = invoke(["search", "requests"], obj=project)
assert result.exit_code == 0
assert len(result.output.splitlines()) > 0
@pytest.mark.pypi
def test_show_package_on_pypi(invoke):
result = invoke(["show", "ipython"])
assert result.exit_code == 0
assert "ipython" in result.output.splitlines()[0]
result = invoke(["show", "requests"])
assert result.exit_code == 0
assert "requests" in result.output.splitlines()[0]
result = invoke(["show", "--name", "requests"])
assert result.exit_code == 0
assert "requests" in result.output.splitlines()[0]
def test_show_self_package(project, invoke):
result = invoke(["show"], obj=project)
assert result.exit_code == 0, result.stderr
result = invoke(["show", "--name", "--version"], obj=project)
assert result.exit_code == 0
assert "test_project\n0.0.0\n" == result.output
def test_export_to_requirements_txt(invoke, fixture_project):
project = fixture_project("demo-package")
requirements_txt = project.root / "requirements.txt"
requirements_no_hashes = project.root / "requirements_simple.txt"
requirements_pyproject = project.root / "requirements.ini"
result = invoke(["export"], obj=project)
print("==========OUTPUT=============", result.output.strip(), result.stderr.strip())
assert result.exit_code == 0
assert result.output.strip() == requirements_txt.read_text().strip()
result = invoke(["export", "--without-hashes"], obj=project)
assert result.exit_code == 0
assert result.output.strip() == requirements_no_hashes.read_text().strip()
result = invoke(["export", "--pyproject"], obj=project)
assert result.exit_code == 0
assert result.output.strip() == requirements_pyproject.read_text().strip()
result = invoke(
["export", "-o", str(project.root / "requirements_output.txt")], obj=project
)
assert result.exit_code == 0
assert (
project.root / "requirements_output.txt"
).read_text() == requirements_txt.read_text()
def test_completion_command(invoke):
result = invoke(["completion", "bash"])
assert result.exit_code == 0
assert "(completion)" in result.output
def test_lock_legacy_project(invoke, fixture_project, repository):
project = fixture_project("demo-legacy")
result = invoke(["lock"], obj=project)
assert result.exit_code == 0
assert "urllib3" in project.locked_repository.all_candidates
def test_show_update_hint(invoke, project):
prev_version = project.core.version
try:
project.core.version = "0.0.0"
r = invoke(["config"], obj=project)
finally:
project.core.version = prev_version
assert "to upgrade." in r.stderr
assert "Run $ pdm config check_update false to disable the check." in r.stderr
| []
| []
| [
"PDM_PYPI_URL"
]
| [] | ["PDM_PYPI_URL"] | python | 1 | 0 | |
biosys/wsgi.py | """
WSGI config for biosys project.
It exposes the WSGI callable as a module-level variable named ``application``.
"""
from __future__ import absolute_import, unicode_literals, print_function, division
import os
import logging
import confy
from django.core.wsgi import get_wsgi_application
from dj_static import Cling, MediaCling
logger = logging.getLogger(__name__)
if confy.env('ENV_FILE') is not None:
confy.read_environment_file(confy.env('ENV_FILE'))
else:
try:
confy.read_environment_file(".env")
except:
logger.info('.env file not found')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "biosys.settings")
application = Cling(MediaCling(get_wsgi_application()))
| []
| []
| []
| [] | [] | python | 0 | 0 | |
single_frame.py | #!/usr/bin/python
import numpy as np
import os
import sys
import h5py
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from PIL import Image, ImageChops, ImageDraw
def analyse(classifier, regressor, image, batch_size, patch_size, interval):
# open the image and convert to array
image = image.convert('L');
image_array = np.array(image) / 256.0 # normalize to [0, 1)
# calculate variables for the for loops
image_w = image.size[0]
image_h = image.size[1]
# if frame is smaller than classifier's patch
# return "not found"
if image_w < patch_size and image_h < patch_size:
print ('Frame is smaller than classifier\'s patch! Frame: %dx%d Patch: %dx%d' % (image_w, image_h, patch_size, patch_size))
return None
# compute image padding
if interval * (image_w / interval) == image_w:
padded_w = image_w
else:
padded_w = interval * (image_w / interval + 1)
if interval * (image_h / interval) == image_h:
padded_h = image_h
else:
padded_h = interval * (image_h / interval + 1)
# only pad image if needed
if image_w != padded_w or image_h != padded_h:
image_padded = Image.new('L', (padded_w, padded_h))
image_padded.paste(image, ((padded_w - image_w) / 2, (padded_h - image_h) / 2))
else:
image_padded = image
image_padded_array = np.array(image_padded) / 256.0 # normalize to [0, 1)
# compute probability map
probMap = probabilityMap(classifier, image_padded_array, batch_size, patch_size, interval)
# find max probability patch
max_prob = 0.0
max_prob_mean = 0.0
max_prob_x = 0
max_prob_y = 0
found = False
for row in range(0, padded_h - patch_size + interval, interval):
for col in range(0, padded_w - patch_size + interval, interval):
tmp_max = np.max(probMap[row:row + patch_size, col:col + patch_size])
if tmp_max > max_prob and tmp_max > 0.5:
found = True
max_prob = tmp_max
max_prob_mean = np.mean(probMap[row:row + patch_size, col:col + patch_size])
max_prob_x = col - (padded_w - image_w) / 2 # realign with original image
max_prob_y = row - (padded_h - image_h) / 2 # realign with original image
elif tmp_max == max_prob:
tmp_mean = np.mean(probMap[row:row + patch_size, col:col + patch_size])
if tmp_mean > max_prob_mean:
found = True
max_prob = tmp_max
max_prob_mean = tmp_mean
max_prob_x = col - (padded_w - image_w) / 2 # realign with original image
max_prob_y = row - (padded_h - image_h) / 2 # realign with original image
max_prob_x = max(max_prob_x, 0)
max_prob_y = max(max_prob_y, 0)
x2 = min(image_w, max_prob_x + patch_size)
if x2 - patch_size != max_prob_x:
max_prob_x = x2 - patch_size
y2 = min(image_h, max_prob_y + patch_size)
if y2 - patch_size != max_prob_y:
max_prob_y = y2 - patch_size
# crop probability map to original image size
probMap = probMap[(padded_h - image_h) / 2:(padded_h - image_h) / 2 + image_h, (padded_w - image_w) / 2:(padded_w - image_w) / 2 + image_w]
# variables for regression
center_x = 0.0
center_y = 0.0
radius = 0.0
if found:
regressor.blobs['data'].data[0, 0, ...] = image_array[max_prob_y:max_prob_y + patch_size, max_prob_x:max_prob_x + patch_size]
regressor.forward()
center_x, center_y, radius = regressor.blobs['score'].data[0] * 100.0
return (found, probMap, max_prob, max_prob_x, max_prob_y, center_x, center_y, radius)
def analyseAroundPoint(classifier, regressor, image, batch_size, patch_size, interval, old_x, old_y):
# open the image and convert to array
image = image.convert('L');
image_array = np.array(image) / 256.0 # normalize to [0, 1)
# calculate variables for the for loops
image_w = image.size[0]
image_h = image.size[1]
# if frame is smaller than classifier's patch
# return "not found"
if image_w < patch_size and image_h < patch_size:
print ('Frame is smaller than classifier\'s patch! Frame: %dx%d Patch: %dx%d' % (image_w, image_h, patch_size, patch_size))
return (False, 0, 0, 0, 0, 0, 0)
# zero out the matrix
classifier.blobs['data'].data[:] = 0
# initialise probability map to zero
probMap = np.zeros(image_array.shape, dtype='float')
probMapCounters = np.zeros(image_array.shape, dtype='uint8')
# analyse a 3x3 grid around old_x, old_y
# the middle cell goes from old_x, old_y to old_x + patch_size, old_y + patch_size
for row in range(3):
for col in range(3):
# compute x1, x2
x1 = max(0, old_x + interval * (col - 1))
x2 = min(image_w, x1 + patch_size)
if x2 - patch_size != x1:
x1 = x2 - patch_size
# compute y1, y2
y1 = max(0, old_y + interval * (row - 1))
y2 = min(image_h, y1 + patch_size)
if y2 - patch_size != y1:
y1 = y2 - patch_size
# load patches
index = row * 3 + col;
classifier.blobs['data'].data[index, 0, ...] = image_array[y1:y2, x1:x2]
# inference
classifier.forward()
# compute probability map
for idx, prob in enumerate(classifier.blobs['prob'].data[:9, 1]):
tmp_row = idx / 3
tmp_col = idx % 3
# compute x1, x2
x1 = max(0, old_x + interval * (tmp_col - 1))
x2 = min(image_w, x1 + patch_size)
if x2 - patch_size != x1:
x1 = x2 - patch_size
# compute y1, y2
y1 = max(0, old_y + interval * (tmp_row - 1))
y2 = min(image_h, y1 + patch_size)
if y2 - patch_size != y1:
y1 = y2 - patch_size
probMap[y1:y2, x1:x2] += prob
probMapCounters[y1:y2, x1:x2] += 1
# compute mean
# since a lot of counters will be zero, ignore the division by zero error and put 1 as result
# credit: http://stackoverflow.com/a/35696047/2811496
with np.errstate(divide='ignore', invalid='ignore'):
probMap = np.true_divide( probMap, probMapCounters )
probMap[ ~ np.isfinite( probMap )] = np.min(classifier.blobs['prob'].data[:9, 1]) # -inf inf NaN
# max probability of sphere
found = False
max_prob = 0.0
max_prob_mean = 0.0
max_prob_x = 0
max_prob_y = 0
# find max probability patch
for idx in range(9):
tmp_row = idx / 3
tmp_col = idx % 3
# compute x1, x2
x1 = max(0, old_x + interval * (tmp_col - 1))
x2 = min(image_w, x1 + patch_size)
if x2 - patch_size != x1:
x1 = x2 - patch_size
# compute y1, y2
y1 = max(0, old_y + interval * (tmp_row - 1))
y2 = min(image_h, y1 + patch_size)
if y2 - patch_size != y1:
y1 = y2 - patch_size
tmp_max = np.max(probMap[y1:y2, x1:x2])
if tmp_max > max_prob and tmp_max > 0.5:
found = True
max_prob = tmp_max
max_prob_mean = np.mean(probMap[y1:y2, x1:x2])
max_prob_x = x1
max_prob_y = y1
elif tmp_max == max_prob:
tmp_mean = np.mean(probMap[y1:y2, x1:x2])
if tmp_mean > max_prob_mean:
found = True
max_prob = tmp_max
max_prob_mean = tmp_mean
max_prob_x = x1
max_prob_y = y1
# variables for regression
center_x = 0.0
center_y = 0.0
radius = 0.0
if found:
regressor.blobs['data'].data[0, 0, ...] = image_array[max_prob_y:max_prob_y + patch_size, max_prob_x:max_prob_x + patch_size]
regressor.forward()
center_x, center_y, radius = regressor.blobs['score'].data[0] * 100.0
return (found, probMap, max_prob, max_prob_x, max_prob_y, center_x, center_y, radius)
def probabilityMap(classifier, image_array, batch_size, patch_size, interval):
# zero out the matrix
classifier.blobs['data'].data[:] = 0
padded_h = image_array.shape[0]
padded_w = image_array.shape[1]
# loop variables
prob_row = (padded_h - patch_size) / interval + 1
prob_col = (padded_w - patch_size) / interval + 1
probs = np.zeros((prob_row * prob_col), dtype='float')
prob_idx = 0
index = 0
for row in range(0, padded_h - patch_size + interval, interval):
for col in range(0, padded_w - patch_size + interval, interval):
classifier.blobs['data'].data[index, 0, ...] = image_array[row:row + patch_size, col:col + patch_size]
index += 1
# once batch is filled, calculate probabilities with the classifier
if index == batch_size:
# inference
classifier.forward()
probs[prob_idx:prob_idx + batch_size] = classifier.blobs['prob'].data[:, 1]
# update indexes
index = 0
prob_idx += batch_size
# inference for last batch
if index > 0:
classifier.forward()
probs[prob_idx:] = classifier.blobs['prob'].data[:index, 1]
probs = probs.reshape((prob_row, prob_col))
# initialise probability map to zero
probabilityMap = np.zeros(image_array.shape, dtype='float')
probabilityMapCounters = np.zeros(image_array.shape, dtype='uint8')
for row in range(0, prob_row):
for col in range(0, prob_col):
probabilityMap[row * interval:row * interval + patch_size, col * interval:col * interval + patch_size] += probs[row, col]
probabilityMapCounters[row * interval:row * interval + patch_size, col * interval:col * interval + patch_size] += 1
# compute average per pixel
probabilityMap /= probabilityMapCounters
return probabilityMap
# if the script is called from command line and not imported
if __name__ == '__main__':
# check command line arguments
if len(sys.argv) != 6:
print ("Usage: %s classifierDescriptor regressorDescriptor classifierModel regressorModel inputFilename" % sys.argv[0])
sys.exit(1)
caffe_root = os.getenv('CAFFE_ROOT', './')
sys.path.insert(0, caffe_root + '/python')
import caffe
# setup solver
caffe.set_device(0)
caffe.set_mode_gpu()
# get console arguments
classifierDescriptor, regressorDescriptor, classifierModel, regressorModel, imageFilename = sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]
# load models
classifier = caffe.Net(classifierDescriptor, classifierModel, caffe.TEST)
regressor = caffe.Net(regressorDescriptor, regressorModel, caffe.TEST)
# open the image and convert to array
image = Image.open(imageFilename).convert('L');
image_color = image.convert('RGB')
interval = 25
patch_size = 100
batch_size = 100
# compute max probability patch
found, probMap, max_prob, max_prob_x, max_prob_y, center_x, center_y, radius = analyse(classifier, regressor, image, batch_size, patch_size, interval)
# compose original image with computed probability map
image_color = ImageChops.multiply(image_color, Image.fromarray(np.uint8(cm.jet(probMap, bytes=True))).convert('RGB'))
# draw rectangle around solution
if found:
draw = ImageDraw.Draw(image_color)
draw.rectangle([(max_prob_x, max_prob_y), (max_prob_x + patch_size, max_prob_y + patch_size)], outline='red')
draw.rectangle([(max_prob_x + 1, max_prob_y + 1), (max_prob_x + patch_size - 1, max_prob_y + patch_size - 1)], outline='red')
draw.rectangle([(max_prob_x + 2, max_prob_y + 2), (max_prob_x + patch_size - 2, max_prob_y + patch_size - 2)], outline='red')
draw.ellipse([max_prob_x + center_x - radius, max_prob_y + center_y - radius, max_prob_x + center_x + radius, max_prob_y + center_y + radius], outline='red')
draw.ellipse([max_prob_x + center_x - radius - 1, max_prob_y + center_y - radius - 1, max_prob_x + center_x + radius + 1, max_prob_y + center_y + radius + 1], outline='red')
draw.ellipse([max_prob_x + center_x - radius - 2, max_prob_y + center_y - radius - 2, max_prob_x + center_x + radius + 2, max_prob_y + center_y + radius + 2], outline='red')
else:
print ("No ball found!")
plt.imshow(image_color)
plt.show()
| []
| []
| [
"CAFFE_ROOT"
]
| [] | ["CAFFE_ROOT"] | python | 1 | 0 | |
app/templates/model/user.go | package model
// User struct
type User struct {
Name string `json:"name" form:"name"`
Password string `json:"password" form:"password"`
}
| []
| []
| []
| [] | [] | go | null | null | null |
manage.py | #!/usr/bin/env python
"""
* Copyright 2016 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import os
import sys
from udon.boot import fix_path
fix_path()
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "udon.conf.local")
from djangae.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
api/src/main/java/se/codeby/service/postnummer/PostnummerConfiguration.java | package se.codeby.service.postnummer;
import com.fasterxml.jackson.annotation.JsonProperty;
import io.dropwizard.Configuration;
import io.dropwizard.db.DataSourceFactory;
import lombok.extern.slf4j.Slf4j;
import se.codeby.service.postnummer.clients.ClientConfiguration;
import javax.validation.Valid;
import javax.validation.constraints.NotNull;
import java.net.URI;
import java.net.URISyntaxException;
@SuppressWarnings("WeakerAccess")
@Slf4j
public class PostnummerConfiguration extends Configuration {
@Valid
@NotNull
@JsonProperty("database")
public DataSourceFactory dataSourceFactory = new DataSourceFactory();
@NotNull
@JsonProperty("postNordClient")
public ClientConfiguration postNordClient = ClientConfiguration.builder().build();
@NotNull
@JsonProperty("openStreetMapClient")
public ClientConfiguration openStreetMapClient = ClientConfiguration.builder().build();
public DataSourceFactory getDataSourceFactory() {
String databaseUrl = System.getenv("DATABASE_URL");
if (databaseUrl == null) {
log.info("Running within local");
return dataSourceFactory;
} else {
log.info("Running within Heroku");
log.info("Creating DB for " + databaseUrl);
try {
URI dbUri = new URI(databaseUrl);
final String user = dbUri.getUserInfo().split(":")[0];
final String password = dbUri.getUserInfo().split(":")[1];
final String url = "jdbc:postgresql://" + dbUri.getHost() + ':' + dbUri.getPort() + dbUri.getPath() + "?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory";
dataSourceFactory.setUser(user);
dataSourceFactory.setPassword(password);
dataSourceFactory.setUrl(url);
return dataSourceFactory;
} catch (URISyntaxException e) {
log.info(e.getMessage());
throw new RuntimeException(e);
}
}
}
}
| [
"\"DATABASE_URL\""
]
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | java | 1 | 0 | |
cmd/helper.go | package cmd
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
)
func check(e error) {
if e != nil {
panic(e)
}
}
func validGithubConfig(opts *GithubConfig) bool {
return opts.Token != "" && opts.SourceRepo != "" && opts.AuthorName != "" && opts.AuthorEmail != ""
}
//func UserHomeDir() string {
// if runtime.GOOS == "windows" {
// home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
// if home == "" {
// home = os.Getenv("USERPROFILE")
// }
// return home
// }
// return os.Getenv("HOME")
//}
func UpdateIfNew(scanner *bufio.Scanner, src *string, text string) bool {
var changed bool = false
if *src == "" {
fmt.Print(text + ": ")
} else {
fmt.Print(text + "(" + *src + "): ")
}
scanner.Scan()
val := scanner.Text()
if val != "" {
if val != *src {
changed = true
}
*src = val
}
return changed
}
func cacheSet(filename string, data interface{}, root string) {
f, err := os.Create(filepath.Join(root, filename))
defer f.Close()
check(err)
b, err := json.MarshalIndent(data, "", " ")
check(err)
_, err = io.Copy(f, bytes.NewReader(b))
}
func cacheGet(filename string, ret interface{}, root string) bool {
path := filepath.Join(root, filename)
_, err := os.Stat(path)
if os.IsNotExist(err) {
return false
}
content, err := ioutil.ReadFile(path)
check(err)
json.Unmarshal(content, ret)
return true
}
func writeTask(filename string, text string, root string) {
f, err := os.Create(filepath.Join(root, filename))
defer f.Close()
check(err)
_, err = f.WriteString(text)
check(err)
}
func updateListByTask(task string, status string, rootPath string) {
var problems []*Problem
cacheGet("problemList.json", &problems, rootPath)
for _, v := range problems {
if task == v.Task {
v.Solved = status
break
}
}
cacheSet("problemList.json", problems, rootPath)
}
func getTemplate(ext string) string {
content, err := ioutil.ReadFile("template" + ext)
if err != nil {
return ""
}
return string(content)
}
func getTaskFromCache(task string, root string) (string, string) {
path := filepath.Join(root, task+".task.html")
output, err := exec.Command("bash", "-c", "lynx -dump "+path).Output()
check(err)
data := string(output)
for k, v := range unicodeMap {
data = strings.Replace(data, k, v, -1)
}
x := strings.Fields(strings.Split(strings.Split(data, "\n")[0], "-")[1])
return strings.Join(x, "-"), data
}
func writeCodeFile(filename string, text string, template string) bool {
if _, err := os.Stat(filename); err == nil {
return true
}
f, err := os.Create(filename)
if err != nil {
return false
}
_, err = f.WriteString("/*\n" + text + "*/\n" + template)
if err != nil {
err := f.Close()
check(err)
return false
}
err = f.Close()
if err != nil {
return false
}
return true
}
var extLangMap = map[string]string{
".java": "Java",
".js": "Node.js",
".py": "Python3",
".cpp": "C++",
}
var extOptionMap = map[string]string{
".java": "",
".js": "",
".py": "CPython3",
".cpp": "C++17",
}
var langExtMap = map[string]string{
"java": ".java",
"javascript": ".js",
"python": ".py",
"cpp": ".cpp",
}
var unicodeMap = map[string]string{
"\\alpha": "\u03B1",
"\\beta": "\u03B2",
"\\gamma": "\u03B3",
"\\delta": "\u03B4",
"\\epsilon": "\u03F5",
"\\zeta": "\u03B6",
"\\eta": "\u03B7",
"\\theta": "\u03B8",
"\\iota": "\u03B9",
"\\kappa": "\u03BA",
"\\lambda": "\u03BB",
"\\mu": "\u03BC",
"\\nu": "\u03BD",
"\\xi": "\u03BE",
"\\omicron": "\u03BF",
"\\pi": "\u03C0",
"\\rho": "\u03C1",
"\\sigma": "\u03C3",
"\\tau": "\u03C4",
"\\upsilon": "\u03C5",
"\\phi": "\u03D5",
"\\chi": "\u03C7",
"\\psi": "\u03C8",
"\\omega": "\u03C9",
"\\varepsilon": "\u03B5",
"\\vartheta": "\u03D1",
"\\varpi": "\u03D6",
"\\varrho": "\u03F1",
"\\varsigma": "\u03C2",
"\\varphi": "\u03C6",
"\\S": "\u00A7",
"\\aleph": "\u2135",
"\\hbar": "\u210F",
"\\imath": "\u0131",
"\\jmath": "\u0237",
"\\ell": "\u2113",
"\\wp": "\u2118",
"\\Re": "\u211C",
"\\Im": "\u2111",
"\\partial": "\u2202",
"\\infty": "\u221E",
"\\prime": "\u2032",
"\\emptyset": "\u2205",
"\\nabla": "\u2207",
"\\top": "\u22A4",
"\\bot": "\u22A5",
"\\angle": "\u2220",
"\\triangle": "\u25B3",
"\\backslash": "\u2216",
"\\forall": "\u2200",
"\\exists": "\u2203",
"\\neg": "\u00AC",
"\\lnot": "\u00AC",
"\\flat": "\u266D",
"\\natural": "\u266E",
"\\sharp": "\u266F",
"\\clubsuit": "\u2663",
"\\diamondsuit": "\u2662",
"\\heartsuit": "\u2661",
"\\spadesuit": "\u2660",
"\\surd": "\u221A",
"\\coprod": "\u2210",
"\\bigvee": "\u22C1",
"\\bigwedge": "\u22C0",
"\\biguplus": "\u2A04",
"\\bigcap": "\u22C2",
"\\bigcup": "\u22C3",
"\\int": "\u222B",
"\\intop": "\u222B",
"\\iint": "\u222C",
"\\iiint": "\u222D",
"\\prod": "\u220F",
"\\sum": "\u2211",
"\\bigotimes": "\u2A02",
"\\bigoplus": "\u2A01",
"\\bigodot": "\u2A00",
"\\oint": "\u222E",
"\\bigsqcup": "\u2A06",
"\\smallint": "\u222B",
"\\triangleleft": "\u25C3",
"\\triangleright": "\u25B9",
"\\bigtriangleup": "\u25B3",
"\\bigtriangledown": "\u25BD",
"\\wedge": "\u2227",
"\\land": "\u2227",
"\\vee": "\u2228",
"\\lor": "\u2228",
"\\cap": "\u2229",
"\\cup": "\u222A",
"\\ddagger": "\u2021",
"\\dagger": "\u2020",
"\\sqcap": "\u2293",
"\\sqcup": "\u2294",
"\\uplus": "\u228E",
"\\amalg": "\u2A3F",
"\\diamond": "\u22C4",
"\\bullet": "\u2219",
"\\wr": "\u2240",
"\\div": "\u00F7",
"\\mp": "\u2213",
"\\pm": "\u00B1",
"\\circ": "\u2218",
"\\bigcirc": "\u25EF",
"\\setminus": "\u2216",
"\\cdot": "\u22C5",
"\\ast": "\u2217",
"\\times": "\u00D7",
"\\star": "\u22C6",
"\\propto": "\u221D",
"\\sqsubseteq": "\u2291",
"\\sqsupseteq": "\u2292",
"\\parallel": "\u2225",
"\\mid": "\u2223",
"\\dashv": "\u22A3",
"\\vdash": "\u22A2",
"\\leq": "\u2264",
"\\le": "\u2264",
"\\geq": "\u2265",
"\\ge": "\u2265",
"\\lt": "\u003C",
"\\gt": "\u003E",
"\\succ": "\u227B",
"\\prec": "\u227A",
"\\approx": "\u2248",
"\\succeq": "\u2AB0",
"\\preceq": "\u2AAF",
"\\supset": "\u2283",
"\\subset": "\u2282",
"\\supseteq": "\u2287",
"\\subseteq": "\u2286",
"\\in": "\u2208",
"\\ni": "\u220B",
"\\notin": "\u2209",
"\\owns": "\u220B",
"\\gg": "\u226B",
"\\ll": "\u226A",
"\\sim": "\u223C",
"\\simeq": "\u2243",
"\\perp": "\u22A5",
"\\equiv": "\u2261",
"\\asymp": "\u224D",
"\\smile": "\u2323",
"\\frown": "\u2322",
"\\ne": "\u2260",
"\\neq": "\u2260",
"\\cong": "\u2245",
"\\doteq": "\u2250",
"\\bowtie": "\u22C8",
"\\models": "\u22A8",
"\\notChar": "\u29F8",
"\\Leftrightarrow": "\u21D4",
"\\Leftarrow": "\u21D0",
"\\Rightarrow": "\u21D2",
"\\leftrightarrow": "\u2194",
"\\leftarrow": "\u2190",
"\\gets": "\u2190",
"\\rightarrow": "\u2192",
"\\to": "\u2192",
"\\mapsto": "\u21A6",
"\\leftharpoonup": "\u21BC",
"\\leftharpoondown": "\u21BD",
"\\rightharpoonup": "\u21C0",
"\\rightharpoondown": "\u21C1",
"\\nearrow": "\u2197",
"\\searrow": "\u2198",
"\\nwarrow": "\u2196",
"\\swarrow": "\u2199",
"\\rightleftharpoons": "\u21CC",
"\\hookrightarrow": "\u21AA",
"\\hookleftarrow": "\u21A9",
"\\longleftarrow": "\u27F5",
"\\Longleftarrow": "\u27F8",
"\\longrightarrow": "\u27F6",
"\\Longrightarrow": "\u27F9",
"\\Longleftrightarrow": "\u27FA",
"\\longleftrightarrow": "\u27F7",
"\\longmapsto": "\u27FC",
"\\ldots": "\u2026",
"\\cdots": "\u22EF",
"\\vdots": "\u22EE",
"\\ddots": "\u22F1",
"\\dotsc": "\u2026",
"\\dotsb": "\u22EF",
"\\dotsm": "\u22EF",
"\\dotsi": "\u22EF",
"\\dotso": "\u2026",
"\\uparrow": "\u2191",
"\\downarrow": "\u2193",
"\\updownarrow": "\u2195",
"\\Uparrow": "\u21D1",
"\\Downarrow": "\u21D3",
"\\Updownarrow": "\u21D5",
"\\rangle": "\u27E9",
"\\langle": "\u27E8",
"\\rbrace": "}",
"\\lbrace": "{",
"\\}": "}",
"\\{": "{",
"\\rceil": "\u2309",
"\\lceil": "\u2308",
"\\rfloor": "\u230B",
"\\lfloor": "\u230A",
"\\lbrack": "[",
"\\rbrack": "]",
"\\[": "[",
"\\]": "]",
"\\dots": "...",
}
| [
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"HOME\""
]
| []
| [
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
]
| [] | ["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"] | go | 4 | 0 | |
examples/overlay_network.py | import os
from cohesivenet import Logger
from cohesivenet.macros import connect, config, admin, peering, routing
Logger.silence_urllib3()
def take_keys(keys, data_dict):
"""Take keys from dict
Arguments:
keys {List[str]} -- Keys it include in output dict
data_dict {Dict}
Returns:
[Dict]
"""
return {k: v for k, v in data_dict.items() if k in keys}
def setup_clients(host_password_dicts):
"""setup_clients Connect to clients
Arguments:
args: List[Dict] - {
host: str,
password: str
}
Returns:
List[VNS3Client]
"""
assert type(host_password_dicts) is list, "setup_clients expects list as input."
return connect.get_clients(
*[
dict(
take_keys(["host", "password"], connect_args),
verify=False,
username="api",
)
for connect_args in host_password_dicts
]
)
def update_client_passwords(clients, master_password):
"""[summary]
Arguments:
clients {List[VNS3Client]}
master_password {str}
Returns:
List[VNS3Client]
"""
api_roll_resp = admin.roll_api_password(master_password, clients)
ui_toggle_resp = admin.roll_ui_credentials(
{"username": "vnscubed", "password": master_password}, clients, enable_ui=True
)
return api_roll_resp, ui_toggle_resp
def get_env():
"""Fetch variables from environment:
CONTROLLER_HOSTS_CSV: CSV of VNS3 hosts
CONTROLLER_PASSWORDS_CSV: CSV of VNS3 host passwords
MASTER_PASSWORD: master password to be used for API
LICENSE: path to license file
KEYSET_TOKEN: secret token to be used for keyset
Raises:
RuntimeError: Raise runtime error if environment is not properly configured
Returns:
Dict -- Parsed data for configuring a mesh network
"""
license_file = os.getenv("LICENSE")
keyset_token = os.getenv("KEYSET_TOKEN")
master_password = os.getenv("MASTER_PASSWORD")
master_set = os.getenv("MASTER_SET", "False").lower() not in ("0", "false")
controller_hosts = os.getenv("CONTROLLER_HOSTS_CSV").split(",")
controller_passwords = os.getenv("CONTROLLER_PASSWORDS_CSV").split(",")
controller_subnets = os.getenv("CONTROLLER_SUBNETS").split(",")
assert (
len(controller_hosts) == len(controller_passwords) == len(controller_subnets)
), (
"CONTROLLER_HOSTS_CSV, CONTROLLER_PASSWORDS_CSV "
"and CONTROLLER_SUBNETS must have same number of elements"
)
return {
"controllers": [
{
"host": host + ":8000",
"password": controller_passwords[i]
if not master_set
else master_password,
"subnet": controller_subnets[i],
}
for i, host in enumerate(controller_hosts)
],
"master_password": master_password,
"topology_name": "VNS3 Overlay Net Example",
"license": license_file,
"keyset_token": keyset_token,
}
def create_clients(**parameters):
clients = setup_clients(parameters["controllers"])
if not parameters.get("master_set"):
print("Setting master")
[_, api_failures], [_, ui_failures] = update_client_passwords(
clients, parameters.get("master_password")
)
if len(ui_failures) or len(api_failures):
print("Failure updating master passwords:")
print("UI: %s" % ".".join([str(e) for e in ui_failures]))
print("API: %s" % ".".join([str(e) for e in api_failures]))
else:
for client in clients:
client.configuration.password = parameters.get("master_password")
return clients
def setup_overlay(client, parameters):
return config.setup_controller(
client,
parameters["topology_name"],
parameters["license"],
license_parameters={"default": True},
keyset_parameters={"token": parameters["keyset_token"]},
reboot_timeout=240,
keyset_timeout=240,
)
def peer_controllers(root_client, peer_client, parameters):
"""Run configure and create peering mesh and route advertisements
Arguments:
root_client {VNS3Client}
peer_client {VNS3Client}
parameters {Dict} - values from get_env
"""
print("Setting peer Id")
peer_client.peering.put_self_peering_id({"id": 2})
print("Creating peering mesh")
peering.peer_mesh([root_client, peer_client])
print("Creating route advertisements")
ordered_subnets = [parameters["root_controller"]["subnet"]] + [
c["subnet"] for c in parameters["controllers"]
]
routing.create_route_advertisements([root_client, peer_client], ordered_subnets)
def run():
"""Run create peering mesh."""
parameters = get_env()
clients = create_clients(**parameters)
# only building a simple overlay topology with 2 controllers
assert (
len(clients) == 2
), "More controllers provided by env than expected. Expected 2 in overlay."
root, peer = clients
setup_overlay(root, parameters)
config.fetch_keysets([peer], root.host_uri, parameters["keyset_token"])
peer_controllers(root, peer, parameters)
| []
| []
| [
"CONTROLLER_HOSTS_CSV",
"LICENSE",
"MASTER_PASSWORD",
"KEYSET_TOKEN",
"CONTROLLER_PASSWORDS_CSV",
"CONTROLLER_SUBNETS",
"MASTER_SET"
]
| [] | ["CONTROLLER_HOSTS_CSV", "LICENSE", "MASTER_PASSWORD", "KEYSET_TOKEN", "CONTROLLER_PASSWORDS_CSV", "CONTROLLER_SUBNETS", "MASTER_SET"] | python | 7 | 0 | |
tool/tsh/tsh.go | /*
Copyright 2016-2021 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"os"
"os/signal"
"path"
"path/filepath"
"runtime"
"sort"
"strings"
"syscall"
"time"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/api/constants"
"github.com/gravitational/teleport/api/types"
apisshutils "github.com/gravitational/teleport/api/utils/sshutils"
"github.com/gravitational/teleport/lib/asciitable"
"github.com/gravitational/teleport/lib/auth"
"github.com/gravitational/teleport/lib/benchmark"
"github.com/gravitational/teleport/lib/client"
dbprofile "github.com/gravitational/teleport/lib/client/db"
"github.com/gravitational/teleport/lib/client/identityfile"
"github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/events"
"github.com/gravitational/teleport/lib/kube/kubeconfig"
"github.com/gravitational/teleport/lib/modules"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/session"
"github.com/gravitational/teleport/lib/sshutils"
"github.com/gravitational/teleport/lib/sshutils/scp"
"github.com/gravitational/teleport/lib/tlsca"
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/kingpin"
"github.com/gravitational/trace"
gops "github.com/google/gops/agent"
"github.com/jonboulle/clockwork"
"github.com/sirupsen/logrus"
)
var log = logrus.WithFields(logrus.Fields{
trace.Component: teleport.ComponentTSH,
})
// CLIConf stores command line arguments and flags:
type CLIConf struct {
// UserHost contains "[login]@hostname" argument to SSH command
UserHost string
// Commands to execute on a remote host
RemoteCommand []string
// DesiredRoles indicates one or more roles which should be requested.
DesiredRoles string
// RequestReason indicates the reason for an access request.
RequestReason string
// SuggestedReviewers is a list of suggested request reviewers.
SuggestedReviewers string
// RequestID is an access request ID
RequestID string
// ReviewReason indicates the reason for an access review.
ReviewReason string
// ReviewableRequests indicates that only requests which can be reviewed should
// be listed.
ReviewableRequests bool
// SuggestedRequests indicates that only requests which suggest the current user
// as a reviewer should be listed.
SuggestedRequests bool
// MyRequests indicates that only requests created by the current user
// should be listed.
MyRequests bool
// Approve/Deny indicates the desired review kind.
Approve, Deny bool
// Username is the Teleport user's username (to login into proxies)
Username string
// Proxy keeps the hostname:port of the SSH proxy to use
Proxy string
// TTL defines how long a session must be active (in minutes)
MinsToLive int32
// SSH Port on a remote SSH host
NodePort int32
// Login on a remote SSH host
NodeLogin string
// InsecureSkipVerify bypasses verification of HTTPS certificate when talking to web proxy
InsecureSkipVerify bool
// Remote SSH session to join
SessionID string
// Src:dest parameter for SCP
CopySpec []string
// -r flag for scp
RecursiveCopy bool
// -L flag for ssh. Local port forwarding like 'ssh -L 80:remote.host:80 -L 443:remote.host:443'
LocalForwardPorts []string
// DynamicForwardedPorts is port forwarding using SOCKS5. It is similar to
// "ssh -D 8080 example.com".
DynamicForwardedPorts []string
// ForwardAgent agent to target node. Equivalent of -A for OpenSSH.
ForwardAgent bool
// ProxyJump is an optional -J flag pointing to the list of jumphosts,
// it is an equivalent of --proxy flag in tsh interpretation
ProxyJump string
// --local flag for ssh
LocalExec bool
// SiteName specifies remote site go login to
SiteName string
// KubernetesCluster specifies the kubernetes cluster to login to.
KubernetesCluster string
// DatabaseService specifies the database proxy server to log into.
DatabaseService string
// DatabaseUser specifies database user to embed in the certificate.
DatabaseUser string
// DatabaseName specifies database name to embed in the certificate.
DatabaseName string
// AppName specifies proxied application name.
AppName string
// Interactive, when set to true, launches remote command with the terminal attached
Interactive bool
// Quiet mode, -q command (disables progress printing)
Quiet bool
// Namespace is used to select cluster namespace
Namespace string
// NoCache is used to turn off client cache for nodes discovery
NoCache bool
// BenchDuration is a duration for the benchmark
BenchDuration time.Duration
// BenchRate is a requests per second rate to mantain
BenchRate int
// BenchInteractive indicates that we should create interactive session
BenchInteractive bool
// BenchExport exports the latency profile
BenchExport bool
// BenchExportPath saves the latency profile in provided path
BenchExportPath string
// BenchTicks ticks per half distance
BenchTicks int32
// BenchValueScale value at which to scale the values recorded
BenchValueScale float64
// Context is a context to control execution
Context context.Context
// Gops starts gops agent on a specified address
// if not specified, gops won't start
Gops bool
// GopsAddr specifies to gops addr to listen on
GopsAddr string
// IdentityFileIn is an argument to -i flag (path to the private key+cert file)
IdentityFileIn string
// Compatibility flags, --compat, specifies OpenSSH compatibility flags.
Compatibility string
// CertificateFormat defines the format of the user SSH certificate.
CertificateFormat string
// IdentityFileOut is an argument to -out flag
IdentityFileOut string
// IdentityFormat (used for --format flag for 'tsh login') defines which
// format to use with --out to store a fershly retreived certificate
IdentityFormat identityfile.Format
// IdentityOverwrite when true will overwrite any existing identity file at
// IdentityFileOut. When false, user will be prompted before overwriting
// any files.
IdentityOverwrite bool
// BindAddr is an address in the form of host:port to bind to
// during `tsh login` command
BindAddr string
// AuthConnector is the name of the connector to use.
AuthConnector string
// SkipVersionCheck skips version checking for client and server
SkipVersionCheck bool
// Options is a list of OpenSSH options in the format used in the
// configuration file.
Options []string
// Verbose is used to print extra output.
Verbose bool
// Format is used to change the format of output
Format string
// NoRemoteExec will not execute a remote command after connecting to a host,
// will block instead. Useful when port forwarding. Equivalent of -N for OpenSSH.
NoRemoteExec bool
// Debug sends debug logs to stdout.
Debug bool
// Browser can be used to pass the name of a browser to override the system default
// (not currently implemented), or set to 'none' to suppress browser opening entirely.
Browser string
// UseLocalSSHAgent set to false will prevent this client from attempting to
// connect to the local ssh-agent (or similar) socket at $SSH_AUTH_SOCK.
//
// Deprecated in favor of `AddKeysToAgent`.
UseLocalSSHAgent bool
// AddKeysToAgent specifies the behaviour of how certs are handled.
AddKeysToAgent string
// EnableEscapeSequences will scan stdin for SSH escape sequences during
// command/shell execution. This also requires stdin to be an interactive
// terminal.
EnableEscapeSequences bool
// PreserveAttrs preserves access/modification times from the original file.
PreserveAttrs bool
// executablePath is the absolute path to the current executable.
executablePath string
// unsetEnvironment unsets Teleport related environment variables.
unsetEnvironment bool
// mockSSOLogin used in tests to override sso login handler in teleport client.
mockSSOLogin client.SSOLoginFunc
// HomePath is where tsh stores profiles
HomePath string
}
func main() {
cmdLineOrig := os.Args[1:]
var cmdLine []string
// lets see: if the executable name is 'ssh' or 'scp' we convert
// that to "tsh ssh" or "tsh scp"
switch path.Base(os.Args[0]) {
case "ssh":
cmdLine = append([]string{"ssh"}, cmdLineOrig...)
case "scp":
cmdLine = append([]string{"scp"}, cmdLineOrig...)
default:
cmdLine = cmdLineOrig
}
if err := Run(cmdLine); err != nil {
utils.FatalError(err)
}
}
const (
authEnvVar = "TELEPORT_AUTH"
clusterEnvVar = "TELEPORT_CLUSTER"
loginEnvVar = "TELEPORT_LOGIN"
bindAddrEnvVar = "TELEPORT_LOGIN_BIND_ADDR"
proxyEnvVar = "TELEPORT_PROXY"
homeEnvVar = "TELEPORT_HOME"
// TELEPORT_SITE uses the older deprecated "site" terminology to refer to a
// cluster. All new code should use TELEPORT_CLUSTER instead.
siteEnvVar = "TELEPORT_SITE"
userEnvVar = "TELEPORT_USER"
addKeysToAgentEnvVar = "TELEPORT_ADD_KEYS_TO_AGENT"
useLocalSSHAgentEnvVar = "TELEPORT_USE_LOCAL_SSH_AGENT"
clusterHelp = "Specify the Teleport cluster to connect"
browserHelp = "Set to 'none' to suppress browser opening on login"
// proxyDefaultResolutionTimeout is how long to wait for an unknown proxy
// port to be resolved.
//
// Originally based on the RFC-8305 "Maximum Connection Attempt Delay"
// recommended default value of 2s. In the RFC this value is for the
// establishment of a TCP connection, rather than the full HTTP round-
// trip that we measure against, so some tweaking may be needed.
proxyDefaultResolutionTimeout = 2 * time.Second
)
// cliOption is used in tests to inject/override configuration within Run
type cliOption func(*CLIConf) error
// Run executes TSH client. same as main() but easier to test
func Run(args []string, opts ...cliOption) error {
var cf CLIConf
utils.InitLogger(utils.LoggingForCLI, logrus.WarnLevel)
moduleCfg := modules.GetModules()
// configure CLI argument parser:
app := utils.InitCLIParser("tsh", "TSH: Teleport Authentication Gateway Client").Interspersed(false)
app.Flag("login", "Remote host login").Short('l').Envar(loginEnvVar).StringVar(&cf.NodeLogin)
localUser, _ := client.Username()
app.Flag("proxy", "SSH proxy address").Envar(proxyEnvVar).StringVar(&cf.Proxy)
app.Flag("nocache", "do not cache cluster discovery locally").Hidden().BoolVar(&cf.NoCache)
app.Flag("user", fmt.Sprintf("SSH proxy user [%s]", localUser)).Envar(userEnvVar).StringVar(&cf.Username)
app.Flag("option", "").Short('o').Hidden().AllowDuplicate().PreAction(func(ctx *kingpin.ParseContext) error {
return trace.BadParameter("invalid flag, perhaps you want to use this flag as tsh ssh -o?")
}).String()
app.Flag("ttl", "Minutes to live for a SSH session").Int32Var(&cf.MinsToLive)
app.Flag("identity", "Identity file").Short('i').StringVar(&cf.IdentityFileIn)
app.Flag("compat", "OpenSSH compatibility flag").Hidden().StringVar(&cf.Compatibility)
app.Flag("cert-format", "SSH certificate format").StringVar(&cf.CertificateFormat)
if !moduleCfg.IsBoringBinary() {
// The user is *never* allowed to do this in FIPS mode.
app.Flag("insecure", "Do not verify server's certificate and host name. Use only in test environments").
Default("false").
BoolVar(&cf.InsecureSkipVerify)
}
app.Flag("auth", "Specify the type of authentication connector to use.").Envar(authEnvVar).StringVar(&cf.AuthConnector)
app.Flag("namespace", "Namespace of the cluster").Default(defaults.Namespace).Hidden().StringVar(&cf.Namespace)
app.Flag("gops", "Start gops endpoint on a given address").Hidden().BoolVar(&cf.Gops)
app.Flag("gops-addr", "Specify gops addr to listen on").Hidden().StringVar(&cf.GopsAddr)
app.Flag("skip-version-check", "Skip version checking between server and client.").BoolVar(&cf.SkipVersionCheck)
app.Flag("debug", "Verbose logging to stdout").Short('d').BoolVar(&cf.Debug)
app.Flag("add-keys-to-agent", fmt.Sprintf("Controls how keys are handled. Valid values are %v.", client.AllAddKeysOptions)).Short('k').Envar(addKeysToAgentEnvVar).Default(client.AddKeysToAgentAuto).StringVar(&cf.AddKeysToAgent)
app.Flag("use-local-ssh-agent", "Deprecated in favor of the add-keys-to-agent flag.").
Hidden().
Envar(useLocalSSHAgentEnvVar).
Default("true").
BoolVar(&cf.UseLocalSSHAgent)
app.Flag("enable-escape-sequences", "Enable support for SSH escape sequences. Type '~?' during an SSH session to list supported sequences. Default is enabled.").
Default("true").
BoolVar(&cf.EnableEscapeSequences)
app.Flag("bind-addr", "Override host:port used when opening a browser for cluster logins").Envar(bindAddrEnvVar).StringVar(&cf.BindAddr)
app.HelpFlag.Short('h')
ver := app.Command("version", "Print the version")
// ssh
ssh := app.Command("ssh", "Run shell or execute a command on a remote SSH node")
ssh.Arg("[user@]host", "Remote hostname and the login to use").Required().StringVar(&cf.UserHost)
ssh.Arg("command", "Command to execute on a remote host").StringsVar(&cf.RemoteCommand)
app.Flag("jumphost", "SSH jumphost").Short('J').StringVar(&cf.ProxyJump)
ssh.Flag("port", "SSH port on a remote host").Short('p').Int32Var(&cf.NodePort)
ssh.Flag("forward-agent", "Forward agent to target node").Short('A').BoolVar(&cf.ForwardAgent)
ssh.Flag("forward", "Forward localhost connections to remote server").Short('L').StringsVar(&cf.LocalForwardPorts)
ssh.Flag("dynamic-forward", "Forward localhost connections to remote server using SOCKS5").Short('D').StringsVar(&cf.DynamicForwardedPorts)
ssh.Flag("local", "Execute command on localhost after connecting to SSH node").Default("false").BoolVar(&cf.LocalExec)
ssh.Flag("tty", "Allocate TTY").Short('t').BoolVar(&cf.Interactive)
ssh.Flag("cluster", clusterHelp).StringVar(&cf.SiteName)
ssh.Flag("option", "OpenSSH options in the format used in the configuration file").Short('o').AllowDuplicate().StringsVar(&cf.Options)
ssh.Flag("no-remote-exec", "Don't execute remote command, useful for port forwarding").Short('N').BoolVar(&cf.NoRemoteExec)
// Applications.
apps := app.Command("apps", "View and control proxied applications.").Alias("app")
lsApps := apps.Command("ls", "List available applications.")
lsApps.Flag("verbose", "Show extra application fields.").Short('v').BoolVar(&cf.Verbose)
lsApps.Flag("cluster", clusterHelp).StringVar(&cf.SiteName)
appLogin := apps.Command("login", "Retrieve short-lived certificate for an app.")
appLogin.Arg("app", "App name to retrieve credentials for. Can be obtained from `tsh apps ls` output.").Required().StringVar(&cf.AppName)
appLogout := apps.Command("logout", "Remove app certificate.")
appLogout.Arg("app", "App to remove credentials for.").StringVar(&cf.AppName)
appConfig := apps.Command("config", "Print app connection information.")
appConfig.Arg("app", "App to print information for. Required when logged into multiple apps.").StringVar(&cf.AppName)
appConfig.Flag("format", fmt.Sprintf("Optional print format, one of: %q to print app address, %q to print CA cert path, %q to print cert path, %q print key path, %q to print example curl command.",
appFormatURI, appFormatCA, appFormatCert, appFormatKey, appFormatCURL)).StringVar(&cf.Format)
// Databases.
db := app.Command("db", "View and control proxied databases.")
dbList := db.Command("ls", "List all available databases.")
dbList.Flag("verbose", "Show extra database fields.").Short('v').BoolVar(&cf.Verbose)
dbList.Flag("cluster", clusterHelp).StringVar(&cf.SiteName)
dbLogin := db.Command("login", "Retrieve credentials for a database.")
dbLogin.Arg("db", "Database to retrieve credentials for. Can be obtained from 'tsh db ls' output.").Required().StringVar(&cf.DatabaseService)
dbLogin.Flag("db-user", "Optional database user to configure as default.").StringVar(&cf.DatabaseUser)
dbLogin.Flag("db-name", "Optional database name to configure as default.").StringVar(&cf.DatabaseName)
dbLogout := db.Command("logout", "Remove database credentials.")
dbLogout.Arg("db", "Database to remove credentials for.").StringVar(&cf.DatabaseService)
dbEnv := db.Command("env", "Print environment variables for the configured database.")
dbEnv.Flag("db", "Print environment for the specified database.").StringVar(&cf.DatabaseService)
dbConfig := db.Command("config", "Print database connection information. Useful when configuring GUI clients.")
dbConfig.Flag("db", "Print information for the specified database.").StringVar(&cf.DatabaseService)
// join
join := app.Command("join", "Join the active SSH session")
join.Flag("cluster", clusterHelp).StringVar(&cf.SiteName)
join.Arg("session-id", "ID of the session to join").Required().StringVar(&cf.SessionID)
// play
play := app.Command("play", "Replay the recorded SSH session")
play.Flag("cluster", clusterHelp).StringVar(&cf.SiteName)
play.Flag("format", "Format output (json, pty)").Short('f').Default(teleport.PTY).StringVar(&cf.Format)
play.Arg("session-id", "ID of the session to play").Required().StringVar(&cf.SessionID)
// scp
scp := app.Command("scp", "Secure file copy")
scp.Flag("cluster", clusterHelp).StringVar(&cf.SiteName)
scp.Arg("from, to", "Source and destination to copy").Required().StringsVar(&cf.CopySpec)
scp.Flag("recursive", "Recursive copy of subdirectories").Short('r').BoolVar(&cf.RecursiveCopy)
scp.Flag("port", "Port to connect to on the remote host").Short('P').Int32Var(&cf.NodePort)
scp.Flag("preserve", "Preserves access and modification times from the original file").Short('p').BoolVar(&cf.PreserveAttrs)
scp.Flag("quiet", "Quiet mode").Short('q').BoolVar(&cf.Quiet)
// ls
ls := app.Command("ls", "List remote SSH nodes")
ls.Flag("cluster", clusterHelp).StringVar(&cf.SiteName)
ls.Arg("labels", "List of labels to filter node list").StringVar(&cf.UserHost)
ls.Flag("verbose", "One-line output (for text format), including node UUIDs").Short('v').BoolVar(&cf.Verbose)
ls.Flag("format", "Format output (text, json, names)").Short('f').Default(teleport.Text).StringVar(&cf.Format)
// clusters
clusters := app.Command("clusters", "List available Teleport clusters")
clusters.Flag("quiet", "Quiet mode").Short('q').BoolVar(&cf.Quiet)
// login logs in with remote proxy and obtains a "session certificate" which gets
// stored in ~/.tsh directory
login := app.Command("login", "Log in to a cluster and retrieve the session certificate")
login.Flag("out", "Identity output").Short('o').AllowDuplicate().StringVar(&cf.IdentityFileOut)
login.Flag("format", fmt.Sprintf("Identity format: %s, %s (for OpenSSH compatibility) or %s (for kubeconfig)",
identityfile.DefaultFormat,
identityfile.FormatOpenSSH,
identityfile.FormatKubernetes,
)).Default(string(identityfile.DefaultFormat)).StringVar((*string)(&cf.IdentityFormat))
login.Flag("overwrite", "Whether to overwrite the existing identity file.").BoolVar(&cf.IdentityOverwrite)
login.Flag("request-roles", "Request one or more extra roles").StringVar(&cf.DesiredRoles)
login.Flag("request-reason", "Reason for requesting additional roles").StringVar(&cf.RequestReason)
login.Flag("request-reviewers", "Suggested reviewers for role request").StringVar(&cf.SuggestedReviewers)
login.Arg("cluster", clusterHelp).StringVar(&cf.SiteName)
login.Flag("browser", browserHelp).StringVar(&cf.Browser)
login.Flag("kube-cluster", "Name of the Kubernetes cluster to login to").StringVar(&cf.KubernetesCluster)
login.Alias(loginUsageFooter)
// logout deletes obtained session certificates in ~/.tsh
logout := app.Command("logout", "Delete a cluster certificate")
// bench
bench := app.Command("bench", "Run shell or execute a command on a remote SSH node").Hidden()
bench.Flag("cluster", clusterHelp).StringVar(&cf.SiteName)
bench.Arg("[user@]host", "Remote hostname and the login to use").Required().StringVar(&cf.UserHost)
bench.Arg("command", "Command to execute on a remote host").Required().StringsVar(&cf.RemoteCommand)
bench.Flag("port", "SSH port on a remote host").Short('p').Int32Var(&cf.NodePort)
bench.Flag("duration", "Test duration").Default("1s").DurationVar(&cf.BenchDuration)
bench.Flag("rate", "Requests per second rate").Default("10").IntVar(&cf.BenchRate)
bench.Flag("interactive", "Create interactive SSH session").BoolVar(&cf.BenchInteractive)
bench.Flag("export", "Export the latency profile").BoolVar(&cf.BenchExport)
bench.Flag("path", "Directory to save the latency profile to, default path is the current directory").Default(".").StringVar(&cf.BenchExportPath)
bench.Flag("ticks", "Ticks per half distance").Default("100").Int32Var(&cf.BenchTicks)
bench.Flag("scale", "Value scale in which to scale the recorded values").Default("1.0").Float64Var(&cf.BenchValueScale)
// show key
show := app.Command("show", "Read an identity from file and print to stdout").Hidden()
show.Arg("identity_file", "The file containing a public key or a certificate").Required().StringVar(&cf.IdentityFileIn)
// The status command shows which proxy the user is logged into and metadata
// about the certificate.
status := app.Command("status", "Display the list of proxy servers and retrieved certificates")
// The environment command prints out environment variables for the configured
// proxy and cluster. Can be used to create sessions "sticky" to a terminal
// even if the user runs "tsh login" again in another window.
environment := app.Command("env", "Print commands to set Teleport session environment variables")
environment.Flag("unset", "Print commands to clear Teleport session environment variables").BoolVar(&cf.unsetEnvironment)
req := app.Command("request", "Manage access requests").Alias("requests")
reqList := req.Command("ls", "List access requests").Alias("list")
reqList.Flag("format", "Format output (text, json)").Short('f').Default(teleport.Text).StringVar(&cf.Format)
reqList.Flag("reviewable", "Only show requests reviewable by current user").BoolVar(&cf.ReviewableRequests)
reqList.Flag("suggested", "Only show requests that suggest current user as reviewer").BoolVar(&cf.SuggestedRequests)
reqList.Flag("my-requests", "Only show requests created by current user").BoolVar(&cf.MyRequests)
reqShow := req.Command("show", "Show request details").Alias("details")
reqShow.Arg("request-id", "ID of the target request").Required().StringVar(&cf.RequestID)
reqCreate := req.Command("new", "Create a new access request").Alias("create")
reqCreate.Flag("roles", "Roles to be requested").Required().StringVar(&cf.DesiredRoles)
reqCreate.Flag("reason", "Reason for requesting").StringVar(&cf.RequestReason)
reqCreate.Flag("reviewers", "Suggested reviewers").StringVar(&cf.SuggestedReviewers)
reqReview := req.Command("review", "Review an access request")
reqReview.Arg("request-id", "ID of target request").Required().StringVar(&cf.RequestID)
reqReview.Flag("approve", "Review proposes approval").BoolVar(&cf.Approve)
reqReview.Flag("deny", "Review proposes denial").BoolVar(&cf.Deny)
reqReview.Flag("reason", "Review reason message").StringVar(&cf.ReviewReason)
// Kubernetes subcommands.
kube := newKubeCommand(app)
// MFA subcommands.
mfa := newMFACommand(app)
// On Windows, hide the "ssh", "join", "play", "scp", and "bench" commands
// because they all use a terminal.
if runtime.GOOS == constants.WindowsOS {
ssh.Hidden()
join.Hidden()
play.Hidden()
scp.Hidden()
bench.Hidden()
}
// parse CLI commands+flags:
command, err := app.Parse(args)
if err != nil {
return trace.Wrap(err)
}
// apply any options after parsing of arguments to ensure
// that defaults don't overwrite options.
for _, opt := range opts {
if err := opt(&cf); err != nil {
return trace.Wrap(err)
}
}
// While in debug mode, send logs to stdout.
if cf.Debug {
utils.InitLogger(utils.LoggingForCLI, logrus.DebugLevel)
}
ctx, cancel := context.WithCancel(context.Background())
go func() {
exitSignals := make(chan os.Signal, 1)
signal.Notify(exitSignals, syscall.SIGTERM, syscall.SIGINT)
sig := <-exitSignals
log.Debugf("signal: %v", sig)
cancel()
}()
cf.Context = ctx
if cf.Gops {
log.Debugf("Starting gops agent.")
err = gops.Listen(gops.Options{Addr: cf.GopsAddr})
if err != nil {
log.Warningf("Failed to start gops agent %v.", err)
}
}
cf.executablePath, err = os.Executable()
if err != nil {
return trace.Wrap(err)
}
if err := client.ValidateAgentKeyOption(cf.AddKeysToAgent); err != nil {
return trace.Wrap(err)
}
// Read in cluster flag from CLI or environment.
readClusterFlag(&cf, os.Getenv)
// Read in home configured home directory from environment
readTeleportHome(&cf, os.Getenv)
switch command {
case ver.FullCommand():
utils.PrintVersion()
case ssh.FullCommand():
err = onSSH(&cf)
case bench.FullCommand():
err = onBenchmark(&cf)
case join.FullCommand():
err = onJoin(&cf)
case scp.FullCommand():
err = onSCP(&cf)
case play.FullCommand():
err = onPlay(&cf)
case ls.FullCommand():
err = onListNodes(&cf)
case clusters.FullCommand():
err = onListClusters(&cf)
case login.FullCommand():
err = onLogin(&cf)
case logout.FullCommand():
if err := refuseArgs(logout.FullCommand(), args); err != nil {
return trace.Wrap(err)
}
err = onLogout(&cf)
case show.FullCommand():
err = onShow(&cf)
case status.FullCommand():
err = onStatus(&cf)
case lsApps.FullCommand():
err = onApps(&cf)
case appLogin.FullCommand():
err = onAppLogin(&cf)
case appLogout.FullCommand():
err = onAppLogout(&cf)
case appConfig.FullCommand():
err = onAppConfig(&cf)
case kube.credentials.FullCommand():
err = kube.credentials.run(&cf)
case kube.ls.FullCommand():
err = kube.ls.run(&cf)
case kube.login.FullCommand():
err = kube.login.run(&cf)
case dbList.FullCommand():
err = onListDatabases(&cf)
case dbLogin.FullCommand():
err = onDatabaseLogin(&cf)
case dbLogout.FullCommand():
err = onDatabaseLogout(&cf)
case dbEnv.FullCommand():
err = onDatabaseEnv(&cf)
case dbConfig.FullCommand():
err = onDatabaseConfig(&cf)
case environment.FullCommand():
err = onEnvironment(&cf)
case mfa.ls.FullCommand():
err = mfa.ls.run(&cf)
case mfa.add.FullCommand():
err = mfa.add.run(&cf)
case mfa.rm.FullCommand():
err = mfa.rm.run(&cf)
case reqList.FullCommand():
err = onRequestList(&cf)
case reqShow.FullCommand():
err = onRequestShow(&cf)
case reqCreate.FullCommand():
err = onRequestCreate(&cf)
case reqReview.FullCommand():
err = onRequestReview(&cf)
default:
// This should only happen when there's a missing switch case above.
err = trace.BadParameter("command %q not configured", command)
}
if trace.IsNotImplemented(err) {
return handleUnimplementedError(ctx, err, cf)
}
return trace.Wrap(err)
}
// onPlay replays a session with a given ID
func onPlay(cf *CLIConf) error {
switch cf.Format {
case teleport.PTY:
switch {
case path.Ext(cf.SessionID) == ".tar":
sid := sessionIDFromPath(cf.SessionID)
tarFile, err := os.Open(cf.SessionID)
defer tarFile.Close()
if err != nil {
return trace.ConvertSystemError(err)
}
if err := client.PlayFile(context.TODO(), tarFile, sid); err != nil {
return trace.Wrap(err)
}
default:
tc, err := makeClient(cf, true)
if err != nil {
return trace.Wrap(err)
}
if err := tc.Play(context.TODO(), cf.Namespace, cf.SessionID); err != nil {
return trace.Wrap(err)
}
}
default:
err := exportFile(cf.SessionID, cf.Format)
if err != nil {
return trace.Wrap(err)
}
}
return nil
}
func sessionIDFromPath(path string) string {
fileName := filepath.Base(path)
return strings.TrimSuffix(fileName, ".tar")
}
func exportFile(path string, format string) error {
f, err := os.Open(path)
if err != nil {
return trace.ConvertSystemError(err)
}
defer f.Close()
err = events.Export(context.TODO(), f, os.Stdout, format)
if err != nil {
return trace.Wrap(err)
}
return nil
}
// onLogin logs in with remote proxy and gets signed certificates
func onLogin(cf *CLIConf) error {
autoRequest := true
// special case: --request-roles=no disables auto-request behavior.
if cf.DesiredRoles == "no" {
autoRequest = false
cf.DesiredRoles = ""
}
if cf.IdentityFileIn != "" {
return trace.BadParameter("-i flag cannot be used here")
}
switch cf.IdentityFormat {
case identityfile.FormatFile, identityfile.FormatOpenSSH, identityfile.FormatKubernetes:
default:
return trace.BadParameter("invalid identity format: %s", cf.IdentityFormat)
}
// Get the status of the active profile as well as the status
// of any other proxies the user is logged into.
profile, profiles, err := client.Status(cf.HomePath, cf.Proxy)
if err != nil {
if !trace.IsNotFound(err) {
return trace.Wrap(err)
}
}
// make the teleport client and retrieve the certificate from the proxy:
tc, err := makeClient(cf, true)
if err != nil {
return trace.Wrap(err)
}
tc.HomePath = cf.HomePath
// client is already logged in and profile is not expired
if profile != nil && !profile.IsExpired(clockwork.NewRealClock()) {
switch {
// in case if nothing is specified, re-fetch kube clusters and print
// current status
case cf.Proxy == "" && cf.SiteName == "" && cf.DesiredRoles == "" && cf.IdentityFileOut == "":
if err := updateKubeConfig(cf, tc); err != nil {
return trace.Wrap(err)
}
printProfiles(cf.Debug, profile, profiles)
return nil
// in case if parameters match, re-fetch kube clusters and print
// current status
case host(cf.Proxy) == host(profile.ProxyURL.Host) && cf.SiteName == profile.Cluster && cf.DesiredRoles == "":
if err := updateKubeConfig(cf, tc); err != nil {
return trace.Wrap(err)
}
printProfiles(cf.Debug, profile, profiles)
return nil
// proxy is unspecified or the same as the currently provided proxy,
// but cluster is specified, treat this as selecting a new cluster
// for the same proxy
case (cf.Proxy == "" || host(cf.Proxy) == host(profile.ProxyURL.Host)) && cf.SiteName != "":
// trigger reissue, preserving any active requests.
err = tc.ReissueUserCerts(cf.Context, client.CertCacheKeep, client.ReissueParams{
AccessRequests: profile.ActiveRequests.AccessRequests,
RouteToCluster: cf.SiteName,
})
if err != nil {
return trace.Wrap(err)
}
if err := tc.SaveProfile(cf.HomePath, true); err != nil {
return trace.Wrap(err)
}
if err := updateKubeConfig(cf, tc); err != nil {
return trace.Wrap(err)
}
return trace.Wrap(onStatus(cf))
// proxy is unspecified or the same as the currently provided proxy,
// but desired roles are specified, treat this as a privilege escalation
// request for the same login session.
case (cf.Proxy == "" || host(cf.Proxy) == host(profile.ProxyURL.Host)) && cf.DesiredRoles != "" && cf.IdentityFileOut == "":
if err := executeAccessRequest(cf, tc); err != nil {
return trace.Wrap(err)
}
if err := updateKubeConfig(cf, tc); err != nil {
return trace.Wrap(err)
}
return trace.Wrap(onStatus(cf))
// otherwise just passthrough to standard login
default:
}
}
if cf.Username == "" {
cf.Username = tc.Username
}
// -i flag specified? save the retrieved cert into an identity file
makeIdentityFile := (cf.IdentityFileOut != "")
key, err := tc.Login(cf.Context)
if err != nil {
return trace.Wrap(err)
}
// the login operation may update the username and should be considered the more
// "authoritative" source.
cf.Username = tc.Username
// TODO(fspmarshall): Refactor access request & cert reissue logic to allow
// access requests to be applied to identity files.
if makeIdentityFile {
if err := setupNoninteractiveClient(tc, key); err != nil {
return trace.Wrap(err)
}
// key.TrustedCA at this point only has the CA of the root cluster we
// logged into. We need to fetch all the CAs for leaf clusters too, to
// make them available in the identity file.
rootClusterName := key.TrustedCA[0].ClusterName
authorities, err := tc.GetTrustedCA(cf.Context, rootClusterName)
if err != nil {
return trace.Wrap(err)
}
key.TrustedCA = auth.AuthoritiesToTrustedCerts(authorities)
filesWritten, err := identityfile.Write(identityfile.WriteConfig{
OutputPath: cf.IdentityFileOut,
Key: key,
Format: cf.IdentityFormat,
KubeProxyAddr: tc.KubeClusterAddr(),
OverwriteDestination: cf.IdentityOverwrite,
})
if err != nil {
return trace.Wrap(err)
}
fmt.Printf("\nThe certificate has been written to %s\n", strings.Join(filesWritten, ","))
return nil
}
if err := tc.ActivateKey(cf.Context, key); err != nil {
return trace.Wrap(err)
}
// If the proxy is advertising that it supports Kubernetes, update kubeconfig.
if tc.KubeProxyAddr != "" {
if err := updateKubeConfig(cf, tc); err != nil {
return trace.Wrap(err)
}
}
// Regular login without -i flag.
if err := tc.SaveProfile(cf.HomePath, true); err != nil {
return trace.Wrap(err)
}
if autoRequest && cf.DesiredRoles == "" {
var requireReason, auto bool
var prompt string
roleNames, err := key.CertRoles()
if err != nil {
logoutErr := tc.Logout()
return trace.NewAggregate(err, logoutErr)
}
// load all roles from root cluster and collect relevant options.
// the normal one-off TeleportClient methods don't re-use the auth server
// connection, so we use WithRootClusterClient to speed things up.
err = tc.WithRootClusterClient(cf.Context, func(clt auth.ClientI) error {
for _, roleName := range roleNames {
role, err := clt.GetRole(cf.Context, roleName)
if err != nil {
return trace.Wrap(err)
}
requireReason = requireReason || role.GetOptions().RequestAccess.RequireReason()
auto = auto || role.GetOptions().RequestAccess.ShouldAutoRequest()
if prompt == "" {
prompt = role.GetOptions().RequestPrompt
}
}
return nil
})
if err != nil {
logoutErr := tc.Logout()
return trace.NewAggregate(err, logoutErr)
}
if requireReason && cf.RequestReason == "" {
msg := "--request-reason must be specified"
if prompt != "" {
msg = msg + ", prompt=" + prompt
}
err := trace.BadParameter(msg)
logoutErr := tc.Logout()
return trace.NewAggregate(err, logoutErr)
}
if auto {
cf.DesiredRoles = "*"
}
}
if cf.DesiredRoles != "" {
fmt.Println("") // visually separate access request output
if err := executeAccessRequest(cf, tc); err != nil {
logoutErr := tc.Logout()
return trace.NewAggregate(err, logoutErr)
}
}
// Update the command line flag for the proxy to make sure any advertised
// settings are picked up.
webProxyHost, _ := tc.WebProxyHostPort()
cf.Proxy = webProxyHost
// If the profile is already logged into any database services,
// refresh the creds.
if err := fetchDatabaseCreds(cf, tc); err != nil {
return trace.Wrap(err)
}
// Print status to show information of the logged in user.
return trace.Wrap(onStatus(cf))
}
// setupNoninteractiveClient sets up existing client to use
// non-interactive authentication methods
func setupNoninteractiveClient(tc *client.TeleportClient, key *client.Key) error {
certUsername, err := key.CertUsername()
if err != nil {
return trace.Wrap(err)
}
tc.Username = certUsername
// Extract and set the HostLogin to be the first principal. It doesn't
// matter what the value is, but some valid principal has to be set
// otherwise the certificate won't be validated.
certPrincipals, err := key.CertPrincipals()
if err != nil {
return trace.Wrap(err)
}
if len(certPrincipals) == 0 {
return trace.BadParameter("no principals found")
}
tc.HostLogin = certPrincipals[0]
identityAuth, err := authFromIdentity(key)
if err != nil {
return trace.Wrap(err)
}
tc.TLS, err = key.TeleportClientTLSConfig(nil)
if err != nil {
return trace.Wrap(err)
}
tc.AuthMethods = []ssh.AuthMethod{identityAuth}
tc.Interactive = false
tc.SkipLocalAuth = true
// When user logs in for the first time without a CA in ~/.tsh/known_hosts,
// and specifies the -out flag, we need to avoid writing anything to
// ~/.tsh/ but still validate the proxy cert. Because the existing
// client.Client methods have a side-effect of persisting the CA on disk,
// we do all of this by hand.
//
// Wrap tc.HostKeyCallback with a another checker. This outer checker uses
// key.TrustedCA to validate the remote host cert first, before falling
// back to the original HostKeyCallback.
oldHostKeyCallback := tc.HostKeyCallback
tc.HostKeyCallback = func(hostname string, remote net.Addr, hostKey ssh.PublicKey) error {
checker := ssh.CertChecker{
// ssh.CertChecker will parse hostKey, extract public key of the
// signer (CA) and call IsHostAuthority. IsHostAuthority in turn
// has to match hostCAKey to any known trusted CA.
IsHostAuthority: func(hostCAKey ssh.PublicKey, address string) bool {
for _, ca := range key.TrustedCA {
caKeys, err := ca.SSHCertPublicKeys()
if err != nil {
return false
}
for _, caKey := range caKeys {
if apisshutils.KeysEqual(caKey, hostCAKey) {
return true
}
}
}
return false
},
}
err := checker.CheckHostKey(hostname, remote, hostKey)
if err != nil && oldHostKeyCallback != nil {
errOld := oldHostKeyCallback(hostname, remote, hostKey)
if errOld != nil {
return trace.NewAggregate(err, errOld)
}
}
return nil
}
return nil
}
// onLogout deletes a "session certificate" from ~/.tsh for a given proxy
func onLogout(cf *CLIConf) error {
// Extract all clusters the user is currently logged into.
active, available, err := client.Status(cf.HomePath, "")
if err != nil {
if trace.IsNotFound(err) {
fmt.Printf("All users logged out.\n")
return nil
} else if trace.IsAccessDenied(err) {
fmt.Printf("%v: Logged in user does not have the correct permissions\n", err)
return nil
}
return trace.Wrap(err)
}
profiles := append([]*client.ProfileStatus{}, available...)
if active != nil {
profiles = append(profiles, active)
}
// Extract the proxy name.
proxyHost, _, err := net.SplitHostPort(cf.Proxy)
if err != nil {
proxyHost = cf.Proxy
}
switch {
// Proxy and username for key to remove.
case proxyHost != "" && cf.Username != "":
tc, err := makeClient(cf, true)
if err != nil {
return trace.Wrap(err)
}
// Load profile for the requested proxy/user.
profile, err := client.StatusFor(cf.HomePath, proxyHost, cf.Username)
if err != nil && !trace.IsNotFound(err) {
return trace.Wrap(err)
}
// Log out user from the databases.
if profile != nil {
for _, db := range profile.Databases {
log.Debugf("Logging %v out of database %v.", profile.Name, db)
err = dbprofile.Delete(tc, db)
if err != nil {
return trace.Wrap(err)
}
}
}
// Remove keys for this user from disk and running agent.
err = tc.Logout()
if err != nil {
if trace.IsNotFound(err) {
fmt.Printf("User %v already logged out from %v.\n", cf.Username, proxyHost)
os.Exit(1)
}
return trace.Wrap(err)
}
// Get the address of the active Kubernetes proxy to find AuthInfos,
// Clusters, and Contexts in kubeconfig.
clusterName, _ := tc.KubeProxyHostPort()
if tc.SiteName != "" {
clusterName = fmt.Sprintf("%v.%v", tc.SiteName, clusterName)
}
// Remove Teleport related entries from kubeconfig.
log.Debugf("Removing Teleport related entries for '%v' from kubeconfig.", clusterName)
err = kubeconfig.Remove("", clusterName)
if err != nil {
return trace.Wrap(err)
}
fmt.Printf("Logged out %v from %v.\n", cf.Username, proxyHost)
// Remove all keys.
case proxyHost == "" && cf.Username == "":
// The makeClient function requires a proxy. However this value is not used
// because the user will be logged out from all proxies. Pass a dummy value
// to allow creation of the TeleportClient.
cf.Proxy = "dummy:1234"
tc, err := makeClient(cf, true)
if err != nil {
return trace.Wrap(err)
}
// Remove Teleport related entries from kubeconfig for all clusters.
for _, profile := range profiles {
log.Debugf("Removing Teleport related entries for '%v' from kubeconfig.", profile.Cluster)
err = kubeconfig.Remove("", profile.Cluster)
if err != nil {
return trace.Wrap(err)
}
}
// Remove all database access related profiles as well such as Postgres
// connection service file.
for _, profile := range profiles {
for _, db := range profile.Databases {
log.Debugf("Logging %v out of database %v.", profile.Name, db)
err = dbprofile.Delete(tc, db)
if err != nil {
return trace.Wrap(err)
}
}
}
// Remove all keys from disk and the running agent.
err = tc.LogoutAll()
if err != nil {
return trace.Wrap(err)
}
fmt.Printf("Logged out all users from all proxies.\n")
default:
fmt.Printf("Specify --proxy and --user to remove keys for specific user ")
fmt.Printf("from a proxy or neither to log out all users from all proxies.\n")
}
return nil
}
// onListNodes executes 'tsh ls' command.
func onListNodes(cf *CLIConf) error {
tc, err := makeClient(cf, true)
if err != nil {
return trace.Wrap(err)
}
// Get list of all nodes in backend and sort by "Node Name".
var nodes []types.Server
err = client.RetryWithRelogin(cf.Context, tc, func() error {
nodes, err = tc.ListNodes(cf.Context)
return err
})
if err != nil {
return trace.Wrap(err)
}
sort.Slice(nodes, func(i, j int) bool {
return nodes[i].GetHostname() < nodes[j].GetHostname()
})
if err := printNodes(nodes, cf.Format, cf.Verbose); err != nil {
return trace.Wrap(err)
}
return nil
}
func executeAccessRequest(cf *CLIConf, tc *client.TeleportClient) error {
if cf.DesiredRoles == "" {
return trace.BadParameter("one or more roles must be specified")
}
roles := utils.SplitIdentifiers(cf.DesiredRoles)
reviewers := utils.SplitIdentifiers(cf.SuggestedReviewers)
if cf.Username == "" {
cf.Username = tc.Username
}
req, err := services.NewAccessRequest(cf.Username, roles...)
if err != nil {
return trace.Wrap(err)
}
req.SetRequestReason(cf.RequestReason)
req.SetSuggestedReviewers(reviewers)
fmt.Fprintf(os.Stderr, "Seeking request approval... (id: %s)\n", req.GetName())
var res types.AccessRequest
// always create access request against the root cluster
err = tc.WithRootClusterClient(cf.Context, func(clt auth.ClientI) error {
res, err = getRequestResolution(cf, clt, req)
return trace.Wrap(err)
})
if err != nil {
return trace.Wrap(err)
}
if !res.GetState().IsApproved() {
msg := fmt.Sprintf("request %s has been set to %s", res.GetName(), res.GetState().String())
if reason := res.GetResolveReason(); reason != "" {
msg = fmt.Sprintf("%s, reason=%q", msg, reason)
}
return trace.Errorf(msg)
}
msg := "\nApproval received, getting updated certificates...\n\n"
if reason := res.GetResolveReason(); reason != "" {
msg = fmt.Sprintf("\nApproval received, reason=%q\nGetting updated certificates...\n\n", reason)
}
fmt.Fprint(os.Stderr, msg)
if err := reissueWithRequests(cf, tc, req.GetName()); err != nil {
return trace.Wrap(err)
}
return nil
}
func printNodes(nodes []types.Server, format string, verbose bool) error {
switch strings.ToLower(format) {
case teleport.Text:
printNodesAsText(nodes, verbose)
case teleport.JSON:
out, err := json.MarshalIndent(nodes, "", " ")
if err != nil {
return trace.Wrap(err)
}
fmt.Println(string(out))
case teleport.Names:
for _, n := range nodes {
fmt.Println(n.GetHostname())
}
default:
return trace.BadParameter("unsupported format. try 'json', 'text', or 'names'")
}
return nil
}
func printNodesAsText(nodes []types.Server, verbose bool) {
// Reusable function to get addr or tunnel for each node
getAddr := func(n types.Server) string {
if n.GetUseTunnel() {
return "⟵ Tunnel"
}
return n.GetAddr()
}
var t asciitable.Table
switch verbose {
// In verbose mode, print everything on a single line and include the Node
// ID (UUID). Useful for machines that need to parse the output of "tsh ls".
case true:
t = asciitable.MakeTable([]string{"Node Name", "Node ID", "Address", "Labels"})
for _, n := range nodes {
t.AddRow([]string{
n.GetHostname(), n.GetName(), getAddr(n), n.LabelsString(),
})
}
// In normal mode chunk the labels and print two per line and allow multiple
// lines per node.
case false:
t = asciitable.MakeTable([]string{"Node Name", "Address", "Labels"})
for _, n := range nodes {
labelChunks := chunkLabels(n.GetAllLabels(), 2)
for i, v := range labelChunks {
if i == 0 {
t.AddRow([]string{n.GetHostname(), getAddr(n), strings.Join(v, ", ")})
} else {
t.AddRow([]string{"", "", strings.Join(v, ", ")})
}
}
}
}
fmt.Println(t.AsBuffer().String())
}
func showApps(servers []types.Server, active []tlsca.RouteToApp, verbose bool) {
// In verbose mode, print everything on a single line and include host UUID.
// In normal mode, chunk the labels, print two per line and allow multiple
// lines per node.
if verbose {
t := asciitable.MakeTable([]string{"Application", "Description", "Host", "Public Address", "URI", "Labels"})
for _, server := range servers {
for _, app := range server.GetApps() {
name := app.Name
for _, a := range active {
if name == a.Name {
name = fmt.Sprintf("> %v", name)
}
}
t.AddRow([]string{
name, app.Description, server.GetName(), app.PublicAddr, app.URI, types.LabelsAsString(app.StaticLabels, app.DynamicLabels),
})
}
}
fmt.Println(t.AsBuffer().String())
} else {
t := asciitable.MakeTable([]string{"Application", "Description", "Public Address", "Labels"})
for _, server := range servers {
for _, app := range server.GetApps() {
labelChunks := chunkLabels(types.CombineLabels(app.StaticLabels, app.DynamicLabels), 2)
for i, v := range labelChunks {
var name string
var addr string
if i == 0 {
name = app.Name
addr = app.PublicAddr
}
for _, a := range active {
if name == a.Name {
name = fmt.Sprintf("> %v", name)
}
}
t.AddRow([]string{name, app.Description, addr, strings.Join(v, ", ")})
}
}
}
fmt.Println(t.AsBuffer().String())
}
}
func showDatabases(cluster string, servers []types.DatabaseServer, active []tlsca.RouteToDatabase, verbose bool) {
if verbose {
t := asciitable.MakeTable([]string{"Name", "Description", "Protocol", "Type", "URI", "Labels", "Connect", "Expires"})
for _, server := range servers {
name := server.GetName()
var connect string
for _, a := range active {
if a.ServiceName == name {
name = formatActiveDB(a)
connect = formatConnectCommand(cluster, a)
}
}
t.AddRow([]string{
name,
server.GetDescription(),
server.GetProtocol(),
server.GetType(),
server.GetURI(),
server.LabelsString(),
connect,
server.Expiry().Format(constants.HumanDateFormatSeconds),
})
}
fmt.Println(t.AsBuffer().String())
} else {
t := asciitable.MakeTable([]string{"Name", "Description", "Labels", "Connect"})
for _, server := range servers {
name := server.GetName()
var connect string
for _, a := range active {
if a.ServiceName == name {
name = formatActiveDB(a)
connect = formatConnectCommand(cluster, a)
}
}
t.AddRow([]string{
name,
server.GetDescription(),
server.LabelsString(),
connect,
})
}
fmt.Println(t.AsBuffer().String())
}
}
// formatConnectCommand formats an appropriate database connection command
// for a user based on the provided database parameters.
func formatConnectCommand(cluster string, active tlsca.RouteToDatabase) string {
service := fmt.Sprintf("%v-%v", cluster, active.ServiceName)
switch active.Protocol {
case defaults.ProtocolPostgres:
switch {
case active.Username != "" && active.Database != "":
return fmt.Sprintf(`psql "service=%v"`, service)
case active.Username != "":
return fmt.Sprintf(`psql "service=%v dbname=<database>"`, service)
case active.Database != "":
return fmt.Sprintf(`psql "service=%v user=<user>"`, service)
}
return fmt.Sprintf(`psql "service=%v user=<user> dbname=<database>"`, service)
case defaults.ProtocolMySQL:
switch {
case active.Username != "" && active.Database != "":
return fmt.Sprintf("mysql --defaults-group-suffix=_%v", service)
case active.Username != "":
return fmt.Sprintf("mysql --defaults-group-suffix=_%v --database=<database>", service)
case active.Database != "":
return fmt.Sprintf("mysql --defaults-group-suffix=_%v --user=<user>", service)
}
return fmt.Sprintf("mysql --defaults-group-suffix=_%v --user=<user> --database=<database>", service)
}
return ""
}
func formatActiveDB(active tlsca.RouteToDatabase) string {
switch {
case active.Username != "" && active.Database != "":
return fmt.Sprintf("> %v (user: %v, db: %v)", active.ServiceName, active.Username, active.Database)
case active.Username != "":
return fmt.Sprintf("> %v (user: %v)", active.ServiceName, active.Username)
case active.Database != "":
return fmt.Sprintf("> %v (db: %v)", active.ServiceName, active.Database)
}
return fmt.Sprintf("> %v", active.ServiceName)
}
// chunkLabels breaks labels into sized chunks. Used to improve readability
// of "tsh ls".
func chunkLabels(labels map[string]string, chunkSize int) [][]string {
// First sort labels so they always occur in the same order.
sorted := make([]string, 0, len(labels))
for k, v := range labels {
sorted = append(sorted, fmt.Sprintf("%v=%v", k, v))
}
sort.Slice(sorted, func(i, j int) bool { return sorted[i] < sorted[j] })
// Then chunk labels into sized chunks.
var chunks [][]string
for chunkSize < len(sorted) {
sorted, chunks = sorted[chunkSize:], append(chunks, sorted[0:chunkSize:chunkSize])
}
chunks = append(chunks, sorted)
return chunks
}
// onListClusters executes 'tsh clusters' command
func onListClusters(cf *CLIConf) error {
tc, err := makeClient(cf, true)
if err != nil {
return trace.Wrap(err)
}
var rootClusterName string
var leafClusters []types.RemoteCluster
err = client.RetryWithRelogin(cf.Context, tc, func() error {
proxyClient, err := tc.ConnectToProxy(cf.Context)
if err != nil {
return err
}
defer proxyClient.Close()
var rootErr, leafErr error
rootClusterName, rootErr = proxyClient.RootClusterName()
leafClusters, leafErr = proxyClient.GetLeafClusters(cf.Context)
return trace.NewAggregate(rootErr, leafErr)
})
if err != nil {
return trace.Wrap(err)
}
profile, _, err := client.Status(cf.HomePath, cf.Proxy)
if err != nil {
return trace.Wrap(err)
}
showSelected := func(clusterName string) string {
if profile != nil && clusterName == profile.Cluster {
return "*"
}
return ""
}
var t asciitable.Table
if cf.Quiet {
t = asciitable.MakeHeadlessTable(4)
} else {
t = asciitable.MakeTable([]string{"Cluster Name", "Status", "Cluster Type", "Selected"})
}
t.AddRow([]string{
rootClusterName, teleport.RemoteClusterStatusOnline, "root", showSelected(rootClusterName),
})
for _, cluster := range leafClusters {
t.AddRow([]string{
cluster.GetName(), cluster.GetConnectionStatus(), "leaf", showSelected(cluster.GetName()),
})
}
fmt.Println(t.AsBuffer().String())
return nil
}
// onSSH executes 'tsh ssh' command
func onSSH(cf *CLIConf) error {
tc, err := makeClient(cf, false)
if err != nil {
return trace.Wrap(err)
}
tc.Stdin = os.Stdin
err = client.RetryWithRelogin(cf.Context, tc, func() error {
return tc.SSH(cf.Context, cf.RemoteCommand, cf.LocalExec)
})
if err != nil {
if strings.Contains(utils.UserMessageFromError(err), teleport.NodeIsAmbiguous) {
allNodes, err := tc.ListAllNodes(cf.Context)
if err != nil {
return trace.Wrap(err)
}
var nodes []types.Server
for _, node := range allNodes {
if node.GetHostname() == tc.Host {
nodes = append(nodes, node)
}
}
fmt.Fprintf(os.Stderr, "error: ambiguous host could match multiple nodes\n\n")
printNodesAsText(nodes, true)
fmt.Fprintf(os.Stderr, "Hint: try addressing the node by unique id (ex: tsh ssh user@node-id)\n")
fmt.Fprintf(os.Stderr, "Hint: use 'tsh ls -v' to list all nodes with their unique ids\n")
fmt.Fprintf(os.Stderr, "\n")
os.Exit(1)
}
// exit with the same exit status as the failed command:
if tc.ExitStatus != 0 {
fmt.Fprintln(os.Stderr, utils.UserMessageFromError(err))
os.Exit(tc.ExitStatus)
} else {
return trace.Wrap(err)
}
}
return nil
}
// onBenchmark executes benchmark
func onBenchmark(cf *CLIConf) error {
tc, err := makeClient(cf, false)
if err != nil {
return trace.Wrap(err)
}
cnf := benchmark.Config{
Command: cf.RemoteCommand,
MinimumWindow: cf.BenchDuration,
Rate: cf.BenchRate,
}
result, err := cnf.Benchmark(cf.Context, tc)
if err != nil {
fmt.Fprintln(os.Stderr, utils.UserMessageFromError(err))
os.Exit(255)
}
fmt.Printf("\n")
fmt.Printf("* Requests originated: %v\n", result.RequestsOriginated)
fmt.Printf("* Requests failed: %v\n", result.RequestsFailed)
if result.LastError != nil {
fmt.Printf("* Last error: %v\n", result.LastError)
}
fmt.Printf("\nHistogram\n\n")
t := asciitable.MakeTable([]string{"Percentile", "Response Duration"})
for _, quantile := range []float64{25, 50, 75, 90, 95, 99, 100} {
t.AddRow([]string{fmt.Sprintf("%v", quantile),
fmt.Sprintf("%v ms", result.Histogram.ValueAtQuantile(quantile)),
})
}
if _, err := io.Copy(os.Stdout, t.AsBuffer()); err != nil {
return trace.Wrap(err)
}
fmt.Printf("\n")
if cf.BenchExport {
path, err := benchmark.ExportLatencyProfile(cf.BenchExportPath, result.Histogram, cf.BenchTicks, cf.BenchValueScale)
if err != nil {
fmt.Fprintf(os.Stderr, "failed exporting latency profile: %s\n", utils.UserMessageFromError(err))
} else {
fmt.Printf("latency profile saved: %v\n", path)
}
}
return nil
}
// onJoin executes 'ssh join' command
func onJoin(cf *CLIConf) error {
tc, err := makeClient(cf, true)
if err != nil {
return trace.Wrap(err)
}
sid, err := session.ParseID(cf.SessionID)
if err != nil {
return trace.BadParameter("'%v' is not a valid session ID (must be GUID)", cf.SessionID)
}
err = client.RetryWithRelogin(cf.Context, tc, func() error {
return tc.Join(context.TODO(), cf.Namespace, *sid, nil)
})
if err != nil {
return trace.Wrap(err)
}
return nil
}
// onSCP executes 'tsh scp' command
func onSCP(cf *CLIConf) error {
tc, err := makeClient(cf, false)
if err != nil {
return trace.Wrap(err)
}
flags := scp.Flags{
Recursive: cf.RecursiveCopy,
PreserveAttrs: cf.PreserveAttrs,
}
err = client.RetryWithRelogin(cf.Context, tc, func() error {
return tc.SCP(cf.Context, cf.CopySpec, int(cf.NodePort), flags, cf.Quiet)
})
if err == nil {
return nil
}
// exit with the same exit status as the failed command:
if tc.ExitStatus != 0 {
fmt.Fprintln(os.Stderr, utils.UserMessageFromError(err))
os.Exit(tc.ExitStatus)
}
return trace.Wrap(err)
}
// makeClient takes the command-line configuration and constructs & returns
// a fully configured TeleportClient object
func makeClient(cf *CLIConf, useProfileLogin bool) (*client.TeleportClient, error) {
// Parse OpenSSH style options.
options, err := parseOptions(cf.Options)
if err != nil {
return nil, trace.Wrap(err)
}
// apply defaults
if cf.MinsToLive == 0 {
cf.MinsToLive = int32(defaults.CertDuration / time.Minute)
}
// split login & host
hostLogin := cf.NodeLogin
var labels map[string]string
if cf.UserHost != "" {
parts := strings.Split(cf.UserHost, "@")
partsLength := len(parts)
if partsLength > 1 {
hostLogin = strings.Join(parts[:partsLength-1], "@")
cf.UserHost = parts[partsLength-1]
}
// see if remote host is specified as a set of labels
if strings.Contains(cf.UserHost, "=") {
labels, err = client.ParseLabelSpec(cf.UserHost)
if err != nil {
return nil, err
}
}
} else if cf.CopySpec != nil {
for _, location := range cf.CopySpec {
// Extract username and host from "username@host:file/path"
parts := strings.Split(location, ":")
parts = strings.Split(parts[0], "@")
partsLength := len(parts)
if partsLength > 1 {
hostLogin = strings.Join(parts[:partsLength-1], "@")
cf.UserHost = parts[partsLength-1]
break
}
}
}
fPorts, err := client.ParsePortForwardSpec(cf.LocalForwardPorts)
if err != nil {
return nil, err
}
dPorts, err := client.ParseDynamicPortForwardSpec(cf.DynamicForwardedPorts)
if err != nil {
return nil, err
}
// 1: start with the defaults
c := client.MakeDefaultConfig()
// ProxyJump is an alias of Proxy flag
if cf.ProxyJump != "" {
hosts, err := utils.ParseProxyJump(cf.ProxyJump)
if err != nil {
return nil, trace.Wrap(err)
}
c.JumpHosts = hosts
}
// Look if a user identity was given via -i flag
if cf.IdentityFileIn != "" {
// Ignore local authentication methods when identity file is provided
c.SkipLocalAuth = true
var (
key *client.Key
identityAuth ssh.AuthMethod
expiryDate time.Time
hostAuthFunc ssh.HostKeyCallback
)
// read the ID file and create an "auth method" from it:
key, err = client.KeyFromIdentityFile(cf.IdentityFileIn)
if err != nil {
return nil, trace.Wrap(err)
}
hostAuthFunc, err := key.HostKeyCallback()
if err != nil {
return nil, trace.Wrap(err)
}
if hostAuthFunc != nil {
c.HostKeyCallback = hostAuthFunc
} else {
return nil, trace.BadParameter("missing trusted certificate authorities in the identity, upgrade to newer version of tctl, export identity and try again")
}
certUsername, err := key.CertUsername()
if err != nil {
return nil, trace.Wrap(err)
}
log.Debugf("Extracted username %q from the identity file %v.", certUsername, cf.IdentityFileIn)
c.Username = certUsername
identityAuth, err = authFromIdentity(key)
if err != nil {
return nil, trace.Wrap(err)
}
c.AuthMethods = []ssh.AuthMethod{identityAuth}
// Also create an in-memory agent to hold the key. If cluster is in
// proxy recording mode, agent forwarding will be required for
// sessions.
c.Agent = agent.NewKeyring()
agentKeys, err := key.AsAgentKeys()
if err != nil {
return nil, trace.Wrap(err)
}
for _, k := range agentKeys {
if err := c.Agent.Add(k); err != nil {
return nil, trace.Wrap(err)
}
}
if len(key.TLSCert) > 0 {
c.TLS, err = key.TeleportClientTLSConfig(nil)
if err != nil {
return nil, trace.Wrap(err)
}
}
// check the expiration date
expiryDate, _ = key.CertValidBefore()
if expiryDate.Before(time.Now()) {
fmt.Fprintf(os.Stderr, "WARNING: the certificate has expired on %v\n", expiryDate)
}
} else {
// load profile. if no --proxy is given the currently active profile is used, otherwise
// fetch profile for exact proxy we are trying to connect to.
err = c.LoadProfile(cf.HomePath, cf.Proxy)
if err != nil {
fmt.Printf("WARNING: Failed to load tsh profile for %q: %v\n", cf.Proxy, err)
}
}
// 3: override with the CLI flags
if cf.Namespace != "" {
c.Namespace = cf.Namespace
}
if cf.Username != "" {
c.Username = cf.Username
}
// if proxy is set, and proxy is not equal to profile's
// loaded addresses, override the values
if err := setClientWebProxyAddr(cf, c); err != nil {
return nil, trace.Wrap(err)
}
if len(fPorts) > 0 {
c.LocalForwardPorts = fPorts
}
if len(dPorts) > 0 {
c.DynamicForwardedPorts = dPorts
}
profileSiteName := c.SiteName
if cf.SiteName != "" {
c.SiteName = cf.SiteName
}
if cf.KubernetesCluster != "" {
c.KubernetesCluster = cf.KubernetesCluster
}
if cf.DatabaseService != "" {
c.DatabaseService = cf.DatabaseService
}
// if host logins stored in profiles must be ignored...
if !useProfileLogin {
c.HostLogin = ""
}
if hostLogin != "" {
c.HostLogin = hostLogin
}
c.Host = cf.UserHost
c.HostPort = int(cf.NodePort)
c.Labels = labels
c.KeyTTL = time.Minute * time.Duration(cf.MinsToLive)
c.InsecureSkipVerify = cf.InsecureSkipVerify
// If a TTY was requested, make sure to allocate it. Note this applies to
// "exec" command because a shell always has a TTY allocated.
if cf.Interactive || options.RequestTTY {
c.Interactive = true
}
if !cf.NoCache {
c.CachePolicy = &client.CachePolicy{}
}
// check version compatibility of the server and client
c.CheckVersions = !cf.SkipVersionCheck
// parse compatibility parameter
certificateFormat, err := parseCertificateCompatibilityFlag(cf.Compatibility, cf.CertificateFormat)
if err != nil {
return nil, trace.Wrap(err)
}
c.CertificateFormat = certificateFormat
// copy the authentication connector over
if cf.AuthConnector != "" {
c.AuthConnector = cf.AuthConnector
}
// If agent forwarding was specified on the command line enable it.
c.ForwardAgent = options.ForwardAgent
if cf.ForwardAgent {
c.ForwardAgent = client.ForwardAgentYes
}
// If the caller does not want to check host keys, pass in a insecure host
// key checker.
if !options.StrictHostKeyChecking {
c.HostKeyCallback = client.InsecureSkipHostKeyChecking
}
c.BindAddr = cf.BindAddr
// Don't execute remote command, used when port forwarding.
c.NoRemoteExec = cf.NoRemoteExec
// Allow the default browser used to open tsh login links to be overridden
// (not currently implemented) or set to 'none' to suppress browser opening entirely.
c.Browser = cf.Browser
c.AddKeysToAgent = cf.AddKeysToAgent
if !cf.UseLocalSSHAgent {
c.AddKeysToAgent = client.AddKeysToAgentNo
}
c.EnableEscapeSequences = cf.EnableEscapeSequences
// pass along mock sso login if provided (only used in tests)
c.MockSSOLogin = cf.mockSSOLogin
// Set tsh home directory
c.HomePath = cf.HomePath
if c.KeysDir == "" {
c.KeysDir = c.HomePath
}
tc, err := client.NewClient(c)
if err != nil {
return nil, trace.Wrap(err)
}
// Load SSH key for the cluster indicated in the profile.
// Handle gracefully if the profile is empty or if the key cannot be found.
if profileSiteName != "" {
if err := tc.LoadKeyForCluster(profileSiteName); err != nil {
log.Debug(err)
if !trace.IsNotFound(err) {
return nil, trace.Wrap(err)
}
}
}
// If identity file was provided, we skip loading the local profile info
// (above). This profile info provides the proxy-advertised listening
// addresses.
// To compensate, when using an identity file, explicitly fetch these
// addresses from the proxy (this is what Ping does).
if cf.IdentityFileIn != "" {
log.Debug("Pinging the proxy to fetch listening addresses for non-web ports.")
if _, err := tc.Ping(cf.Context); err != nil {
return nil, trace.Wrap(err)
}
}
return tc, nil
}
// defaultWebProxyPorts is the order of default proxy ports to try, in order that
// they will be tried.
var defaultWebProxyPorts = []int{
defaults.HTTPListenPort, teleport.StandardHTTPSPort,
}
// setClientWebProxyAddr configures the client WebProxyAddr and SSHProxyAddr
// configuration values. Values that are not fully specified via configuration
// or command-line options will be deduced if necessary.
//
// If successful, setClientWebProxyAddr will modify the client Config in-place.
func setClientWebProxyAddr(cf *CLIConf, c *client.Config) error {
// If the user has specified a proxy on the command line, and one has not
// already been specified from configuration...
if cf.Proxy != "" && c.WebProxyAddr == "" {
parsedAddrs, err := client.ParseProxyHost(cf.Proxy)
if err != nil {
return trace.Wrap(err)
}
proxyAddress := parsedAddrs.WebProxyAddr
if parsedAddrs.UsingDefaultWebProxyPort {
log.Debug("Web proxy port was not set. Attempting to detect port number to use.")
timeout, cancel := context.WithTimeout(context.Background(), proxyDefaultResolutionTimeout)
defer cancel()
proxyAddress, err = pickDefaultAddr(
timeout, cf.InsecureSkipVerify, parsedAddrs.Host, defaultWebProxyPorts)
// On error, fall back to the legacy behaviour
if err != nil {
log.WithError(err).Debug("Proxy port resolution failed, falling back to legacy default.")
return c.ParseProxyHost(cf.Proxy)
}
}
c.WebProxyAddr = proxyAddress
c.SSHProxyAddr = parsedAddrs.SSHProxyAddr
}
return nil
}
func parseCertificateCompatibilityFlag(compatibility string, certificateFormat string) (string, error) {
switch {
// if nothing is passed in, the role will decide
case compatibility == "" && certificateFormat == "":
return teleport.CertificateFormatUnspecified, nil
// supporting the old --compat format for backward compatibility
case compatibility != "" && certificateFormat == "":
return utils.CheckCertificateFormatFlag(compatibility)
// new documented flag --cert-format
case compatibility == "" && certificateFormat != "":
return utils.CheckCertificateFormatFlag(certificateFormat)
// can not use both
default:
return "", trace.BadParameter("--compat or --cert-format must be specified")
}
}
// refuseArgs helper makes sure that 'args' (list of CLI arguments)
// does not contain anything other than command
func refuseArgs(command string, args []string) error {
for _, arg := range args {
if arg == command || strings.HasPrefix(arg, "-") {
continue
} else {
return trace.BadParameter("unexpected argument: %s", arg)
}
}
return nil
}
// authFromIdentity returns a standard ssh.Authmethod for a given identity file
func authFromIdentity(k *client.Key) (ssh.AuthMethod, error) {
signer, err := sshutils.NewSigner(k.Priv, k.Cert)
if err != nil {
return nil, trace.Wrap(err)
}
return ssh.PublicKeys(signer), nil
}
// onShow reads an identity file (a public SSH key or a cert) and dumps it to stdout
func onShow(cf *CLIConf) error {
key, err := client.KeyFromIdentityFile(cf.IdentityFileIn)
if err != nil {
return trace.Wrap(err)
}
// unmarshal certificate bytes into a ssh.PublicKey
cert, _, _, _, err := ssh.ParseAuthorizedKey(key.Cert)
if err != nil {
return trace.Wrap(err)
}
// unmarshal private key bytes into a *rsa.PrivateKey
priv, err := ssh.ParseRawPrivateKey(key.Priv)
if err != nil {
return trace.Wrap(err)
}
pub, err := ssh.ParsePublicKey(key.Pub)
if err != nil {
return trace.Wrap(err)
}
fmt.Printf("Cert: %#v\nPriv: %#v\nPub: %#v\n",
cert, priv, pub)
fmt.Printf("Fingerprint: %s\n", ssh.FingerprintSHA256(pub))
return nil
}
// printStatus prints the status of the profile.
func printStatus(debug bool, p *client.ProfileStatus, isActive bool) {
var count int
var prefix string
if isActive {
prefix = "> "
} else {
prefix = " "
}
duration := time.Until(p.ValidUntil)
humanDuration := "EXPIRED"
if duration.Nanoseconds() > 0 {
humanDuration = fmt.Sprintf("valid for %v", duration.Round(time.Minute))
}
fmt.Printf("%vProfile URL: %v\n", prefix, p.ProxyURL.String())
fmt.Printf(" Logged in as: %v\n", p.Username)
if p.Cluster != "" {
fmt.Printf(" Cluster: %v\n", p.Cluster)
}
fmt.Printf(" Roles: %v\n", strings.Join(p.Roles, ", "))
if debug {
for k, v := range p.Traits {
if count == 0 {
fmt.Printf(" Traits: %v: %v\n", k, v)
} else {
fmt.Printf(" %v: %v\n", k, v)
}
count = count + 1
}
}
fmt.Printf(" Logins: %v\n", strings.Join(p.Logins, ", "))
if p.KubeEnabled {
fmt.Printf(" Kubernetes: enabled\n")
if kubeCluster := selectedKubeCluster(p.Cluster); kubeCluster != "" {
fmt.Printf(" Kubernetes cluster: %q\n", kubeCluster)
}
if len(p.KubeUsers) > 0 {
fmt.Printf(" Kubernetes users: %v\n", strings.Join(p.KubeUsers, ", "))
}
if len(p.KubeGroups) > 0 {
fmt.Printf(" Kubernetes groups: %v\n", strings.Join(p.KubeGroups, ", "))
}
} else {
fmt.Printf(" Kubernetes: disabled\n")
}
if len(p.Databases) != 0 {
fmt.Printf(" Databases: %v\n", strings.Join(p.DatabaseServices(), ", "))
}
fmt.Printf(" Valid until: %v [%v]\n", p.ValidUntil, humanDuration)
fmt.Printf(" Extensions: %v\n", strings.Join(p.Extensions, ", "))
fmt.Printf("\n")
}
// onStatus command shows which proxy the user is logged into and metadata
// about the certificate.
func onStatus(cf *CLIConf) error {
// Get the status of the active profile as well as the status
// of any other proxies the user is logged into.
//
// Return error if not logged in, no active profile, or expired.
profile, profiles, err := client.Status(cf.HomePath, cf.Proxy)
if err != nil {
return trace.Wrap(err)
}
printProfiles(cf.Debug, profile, profiles)
if profile == nil {
return trace.NotFound("Not logged in.")
}
duration := time.Until(profile.ValidUntil)
if !profile.ValidUntil.IsZero() && duration.Nanoseconds() <= 0 {
return trace.NotFound("Active profile expired.")
}
return nil
}
func printProfiles(debug bool, profile *client.ProfileStatus, profiles []*client.ProfileStatus) {
if profile == nil && len(profiles) == 0 {
return
}
// Print the active profile.
if profile != nil {
printStatus(debug, profile, true)
}
// Print all other profiles.
for _, p := range profiles {
printStatus(debug, p, false)
}
}
// host is a utility function that extracts
// host from the host:port pair, in case of any error
// returns the original value
func host(in string) string {
out, err := utils.Host(in)
if err != nil {
return in
}
return out
}
// getRequestResolution registers an access request with the auth server and waits for it to be resolved.
func getRequestResolution(cf *CLIConf, clt auth.ClientI, req types.AccessRequest) (types.AccessRequest, error) {
// set up request watcher before submitting the request to the admin server
// in order to avoid potential race.
filter := types.AccessRequestFilter{
User: req.GetUser(),
}
watcher, err := clt.NewWatcher(cf.Context, types.Watch{
Name: "await-request-approval",
Kinds: []types.WatchKind{
types.WatchKind{
Kind: types.KindAccessRequest,
Filter: filter.IntoMap(),
},
},
})
if err != nil {
return nil, trace.Wrap(err)
}
defer watcher.Close()
if err := clt.CreateAccessRequest(cf.Context, req); err != nil {
return nil, trace.Wrap(err)
}
Loop:
for {
select {
case event := <-watcher.Events():
switch event.Type {
case types.OpInit:
log.Infof("Access-request watcher initialized...")
continue Loop
case types.OpPut:
r, ok := event.Resource.(*types.AccessRequestV3)
if !ok {
return nil, trace.BadParameter("unexpected resource type %T", event.Resource)
}
if r.GetName() != req.GetName() || r.GetState().IsPending() {
log.Debugf("Skipping put event id=%s,state=%s.", r.GetName(), r.GetState())
continue Loop
}
return r, nil
case types.OpDelete:
if event.Resource.GetName() != req.GetName() {
log.Debugf("Skipping delete event id=%s", event.Resource.GetName())
continue Loop
}
return nil, trace.Errorf("request %s has expired or been deleted...", event.Resource.GetName())
default:
log.Warnf("Skipping unknown event type %s", event.Type)
}
case <-watcher.Done():
return nil, trace.Wrap(watcher.Error())
}
}
}
// reissueWithRequests handles a certificate reissue, applying new requests by ID,
// and saving the updated profile.
func reissueWithRequests(cf *CLIConf, tc *client.TeleportClient, reqIDs ...string) error {
profile, err := client.StatusCurrent(cf.HomePath, cf.Proxy)
if err != nil {
return trace.Wrap(err)
}
params := client.ReissueParams{
AccessRequests: reqIDs,
RouteToCluster: cf.SiteName,
}
// if the certificate already had active requests, add them to our inputs parameters.
if len(profile.ActiveRequests.AccessRequests) > 0 {
params.AccessRequests = append(params.AccessRequests, profile.ActiveRequests.AccessRequests...)
}
if params.RouteToCluster == "" {
params.RouteToCluster = profile.Cluster
}
if err := tc.ReissueUserCerts(cf.Context, client.CertCacheDrop, params); err != nil {
return trace.Wrap(err)
}
if err := tc.SaveProfile("", true); err != nil {
return trace.Wrap(err)
}
if err := updateKubeConfig(cf, tc); err != nil {
return trace.Wrap(err)
}
return nil
}
func onApps(cf *CLIConf) error {
tc, err := makeClient(cf, false)
if err != nil {
return trace.Wrap(err)
}
// Get a list of all applications.
var servers []types.Server
err = client.RetryWithRelogin(cf.Context, tc, func() error {
servers, err = tc.ListAppServers(cf.Context)
return err
})
if err != nil {
return trace.Wrap(err)
}
// Retrieve profile to be able to show which apps user is logged into.
profile, err := client.StatusCurrent(cf.HomePath, cf.Proxy)
if err != nil {
return trace.Wrap(err)
}
// Sort by server host name.
sort.Slice(servers, func(i, j int) bool {
return servers[i].GetName() < servers[j].GetName()
})
showApps(servers, profile.Apps, cf.Verbose)
return nil
}
// onEnvironment handles "tsh env" command.
func onEnvironment(cf *CLIConf) error {
profile, err := client.StatusCurrent(cf.HomePath, cf.Proxy)
if err != nil {
return trace.Wrap(err)
}
// Print shell built-in commands to set (or unset) environment.
switch {
case cf.unsetEnvironment:
fmt.Printf("unset %v\n", proxyEnvVar)
fmt.Printf("unset %v\n", clusterEnvVar)
case !cf.unsetEnvironment:
fmt.Printf("export %v=%v\n", proxyEnvVar, profile.ProxyURL.Host)
fmt.Printf("export %v=%v\n", clusterEnvVar, profile.Cluster)
}
return nil
}
// readClusterFlag figures out the cluster the user is attempting to select.
// Command line specification always has priority, after that TELEPORT_CLUSTER,
// then the legacy terminology of TELEPORT_SITE.
func readClusterFlag(cf *CLIConf, fn envGetter) {
// If the user specified something on the command line, prefer that.
if cf.SiteName != "" {
return
}
// Otherwise pick up cluster name from environment.
if clusterName := fn(siteEnvVar); clusterName != "" {
cf.SiteName = clusterName
}
if clusterName := fn(clusterEnvVar); clusterName != "" {
cf.SiteName = clusterName
}
}
// envGetter is used to read in the environment. In production "os.Getenv"
// is used.
type envGetter func(string) string
func handleUnimplementedError(ctx context.Context, perr error, cf CLIConf) error {
const (
errMsgFormat = "This server does not implement this feature yet. Likely the client version you are using is newer than the server. The server version: %v, the client version: %v. Please upgrade the server."
unknownServerVersion = "unknown"
)
tc, err := makeClient(&cf, false)
if err != nil {
log.WithError(err).Warning("Failed to create client.")
return trace.WrapWithMessage(perr, errMsgFormat, unknownServerVersion, teleport.Version)
}
pr, err := tc.Ping(ctx)
if err != nil {
log.WithError(err).Warning("Failed to call ping.")
return trace.WrapWithMessage(perr, errMsgFormat, unknownServerVersion, teleport.Version)
}
return trace.WrapWithMessage(perr, errMsgFormat, pr.ServerVersion, teleport.Version)
}
// readTeleportHome gets home directory from environment if configured.
func readTeleportHome(cf *CLIConf, fn envGetter) {
if homeDir := fn(homeEnvVar); homeDir != "" {
cf.HomePath = path.Clean(homeDir)
}
}
| []
| []
| []
| [] | [] | go | 0 | 0 | |
webhook.go | package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/golang/glog"
"k8s.io/api/admission/v1beta1"
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
v1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log"
)
var (
runtimeScheme = runtime.NewScheme()
codecs = serializer.NewCodecFactory(runtimeScheme)
deserializer = codecs.UniversalDeserializer()
// (https://github.com/kubernetes/kubernetes/issues/57982)
defaulter = runtime.ObjectDefaulter(runtimeScheme)
)
var (
ignoredNamespaces = []string{
metav1.NamespaceSystem,
metav1.NamespacePublic,
}
requiredLabels = []string{
nameLabel,
instanceLabel,
versionLabel,
componentLabel,
partOfLabel,
managedByLabel,
}
addLabels = map[string]string{
nameLabel: NA,
instanceLabel: NA,
versionLabel: NA,
componentLabel: NA,
partOfLabel: NA,
managedByLabel: NA,
}
)
const (
admissionWebhookAnnotationValidateKey = "admissionwebhook.99bill.com/validate"
admissionWebhookAnnotationMutateKey = "mount.99bill.com/log"
admissionWebhookAnnotationStatusKey = "mount.99bill.com/status"
nameLabel = "app.kubernetes.io/name"
instanceLabel = "app.kubernetes.io/instance"
versionLabel = "app.kubernetes.io/version"
componentLabel = "app.kubernetes.io/component"
partOfLabel = "app.kubernetes.io/part-of"
managedByLabel = "app.kubernetes.io/managed-by"
// The default value for labels
NA = "not_available"
)
var volumeName string = os.Getenv("volumeName")
var volumePath string = os.Getenv("volumePath")
var mountPath string = os.Getenv("mountPath")
var logHostPathSource = corev1.HostPathVolumeSource{
Path: volumePath,
//Type: logMountType,
}
//var logVolumeSource = corev1.VolumeSource{
// HostPath: &logHostPathSource,
//}
var LogVolume = corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{HostPath: &logHostPathSource},
}
var LogMount = corev1.VolumeMount{
Name: volumeName,
ReadOnly: false,
MountPath: mountPath,
}
type WebhookServer struct {
server *http.Server
}
// Webhook Server parameters
type WhSvrParameters struct {
port int // webhook server port
certFile string // path to the x509 certificate for https
keyFile string // path to the x509 private key matching `CertFile`
sidecarCfgFile string // path to sidecar injector configuration file
//mountPath string // mount dir to container
//volumePath string // volueme dir to host
//volumeName string // volume to name
}
type patchOperation struct {
Op string `json:"op"`
Path string `json:"path"`
Value interface{} `json:"value,omitempty"`
}
func init() {
_ = corev1.AddToScheme(runtimeScheme)
_ = admissionregistrationv1beta1.AddToScheme(runtimeScheme)
// defaulting with webhooks:
// https://github.com/kubernetes/kubernetes/issues/57982
_ = v1.AddToScheme(runtimeScheme)
}
func admissionRequired(ignoredList []string, admissionAnnotationKey string, metadata *metav1.ObjectMeta) bool {
// skip special kubernetes system namespaces
for _, namespace := range ignoredList {
if metadata.Namespace == namespace {
glog.Infof("Skip validation for %v for it's in special namespace:%v", metadata.Name, metadata.Namespace)
return false
}
}
annotations := metadata.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
var required bool
switch strings.ToLower(annotations[admissionAnnotationKey]) {
default:
required = true
case "n", "no", "false", "off":
required = false
}
return required
}
func mutationRequired(ignoredList []string, metadata *metav1.ObjectMeta) bool {
required := admissionRequired(ignoredList, admissionWebhookAnnotationMutateKey, metadata)
annotations := metadata.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
status := annotations[admissionWebhookAnnotationStatusKey]
if strings.ToLower(status) == "mutated" {
required = false
}
glog.Infof("Mutation policy for %v/%v: required:%v", metadata.Namespace, metadata.Name, required)
return required
}
func validationRequired(ignoredList []string, metadata *metav1.ObjectMeta) bool {
required := admissionRequired(ignoredList, admissionWebhookAnnotationValidateKey, metadata)
glog.Infof("Validation policy for %v/%v: required:%v", metadata.Namespace, metadata.Name, required)
return required
}
func updateAnnotation(target map[string]string, added map[string]string) (patch []patchOperation) {
for key, value := range added {
if target == nil || target[key] == "" {
target = map[string]string{}
patch = append(patch, patchOperation{
Op: "add",
Path: "/metadata/annotations",
Value: map[string]string{
key: value,
},
})
} else {
patch = append(patch, patchOperation{
Op: "replace",
Path: "/metadata/annotations/" + key,
Value: value,
})
}
}
return patch
}
func updateLabels(target map[string]string, added map[string]string) (patch []patchOperation) {
values := make(map[string]string)
for key, value := range added {
if target == nil || target[key] == "" {
values[key] = value
}
}
patch = append(patch, patchOperation{
Op: "add",
Path: "/metadata/labels",
Value: values,
})
return patch
}
func updateMounts(index int, target *corev1.Container, added []corev1.VolumeMount) (rtcode bool) {
if targetMounts := target.VolumeMounts; targetMounts != nil {
for _, vV := range targetMounts {
for iA, vA := range added {
if vA.Name == vV.Name {
added = append(added[:iA], added[iA+1:]...)
}
}
}
added = append(added, targetMounts...)
}
target.VolumeMounts = added
rtcode = true
return rtcode
}
func updateVolumes(target *corev1.PodSpec, added []corev1.Volume) (patch []patchOperation) {
value := *target
if targetVolumes := value.Volumes; targetVolumes != nil {
for _, vV := range targetVolumes {
for iA, vA := range added {
if vA.Name == vV.Name {
added = append(added[:iA], added[iA+1:]...)
}
}
}
added = append(added, targetVolumes...)
}
value.Volumes = added
patch = append(patch, patchOperation{
Op: "replace",
Path: "/spec/template/spec",
Value: value,
})
return patch
}
func createPatch(deploy *appsv1.Deployment, mounts []corev1.VolumeMount, volumes []corev1.Volume) ([]byte, error) {
var patch []patchOperation
var index int
var availableContainer *corev1.Container
podSpec := &deploy.Spec.Template.Spec
for k, v := range podSpec.Containers {
if v.Name == deploy.Name {
index = k
availableContainer = &podSpec.Containers[k]
if i := updateMounts(index, availableContainer, mounts); i != true {
log.Errorf("Update Mount failed.")
}
}
}
//patch = append(patch, updateAnnotation(availableAnnotations, annotations)...)
//patch = append(patch, updateLabels(availableLabels, labels)...)
//patch = append(patch, updateMounts(index, &availableContainer, mounts)...)
patch = append(patch, updateVolumes(podSpec, volumes)...)
return json.Marshal(patch)
}
// validate deployments and services
func (whsvr *WebhookServer) validate(ar *v1beta1.AdmissionReview) *v1beta1.AdmissionResponse {
req := ar.Request
var (
availableLabels map[string]string
objectMeta *metav1.ObjectMeta
resourceNamespace, resourceName string
)
glog.Infof("AdmissionReview for Kind=%v, Namespace=%v Name=%v (%v) UID=%v patchOperation=%v UserInfo=%v",
req.Kind, req.Namespace, req.Name, resourceName, req.UID, req.Operation, req.UserInfo)
switch req.Kind.Kind {
case "Deployment":
var deployment appsv1.Deployment
if err := json.Unmarshal(req.Object.Raw, &deployment); err != nil {
glog.Errorf("Could not unmarshal raw object: %v", err)
return &v1beta1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
},
}
}
resourceName, resourceNamespace, objectMeta = deployment.Name, deployment.Namespace, &deployment.ObjectMeta
//availableLabels = deployment.Labels
case "Service":
var service corev1.Service
if err := json.Unmarshal(req.Object.Raw, &service); err != nil {
glog.Errorf("Could not unmarshal raw object: %v", err)
return &v1beta1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
},
}
}
resourceName, resourceNamespace, objectMeta = service.Name, service.Namespace, &service.ObjectMeta
availableLabels = service.Labels
}
if !validationRequired(ignoredNamespaces, objectMeta) {
glog.Infof("Skipping validation for %s/%s due to policy check", resourceNamespace, resourceName)
return &v1beta1.AdmissionResponse{
Allowed: true,
}
}
allowed := true
var result *metav1.Status
glog.Info("available labels:", availableLabels)
glog.Info("required labels", requiredLabels)
for _, rl := range requiredLabels {
if _, ok := availableLabels[rl]; !ok {
allowed = false
result = &metav1.Status{
Reason: "required labels are not set",
}
break
}
}
return &v1beta1.AdmissionResponse{
Allowed: allowed,
Result: result,
}
}
// main mutation process
func (whsvr *WebhookServer) mutate(ar *v1beta1.AdmissionReview) *v1beta1.AdmissionResponse {
req := ar.Request
var (
//availableAnnotations map[string]string
//availableMounts []corev1.VolumeMount
//availableVolumes []corev1.Volume
objectMeta *metav1.ObjectMeta
resourceNamespace, resourceName string
deployment appsv1.Deployment
)
glog.Infof("AdmissionReview for Kind=%v, Namespace=%v Name=%v (%v) UID=%v patchOperation=%v UserInfo=%v",
req.Kind, req.Namespace, req.Name, resourceName, req.UID, req.Operation, req.UserInfo)
switch req.Kind.Kind {
case "Deployment":
//var deployment appsv1.Deployment
if err := json.Unmarshal(req.Object.Raw, &deployment); err != nil {
glog.Errorf("Could not unmarshal raw object: %v", err)
return &v1beta1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
},
}
}
//glog.Info(deployment)
resourceName, resourceNamespace, objectMeta = deployment.Name, deployment.Namespace, &deployment.ObjectMeta
//availableLabels = deployment.Labels
//availableVolumes = deployment.Spec.Template.Spec.Volumes
//container := deployment.Spec.Template.Spec.Containers[0]
//availableMounts = container.VolumeMounts
/*
case "Service":
var service corev1.Service
if err := json.Unmarshal(req.Object.Raw, &service); err != nil {
glog.Errorf("Could not unmarshal raw object: %v", err)
return &v1beta1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
},
}
}
resourceName, resourceNamespace, objectMeta = service.Name, service.Namespace, &service.ObjectMeta
availableLabels = service.Labels
*/
}
if !mutationRequired(ignoredNamespaces, objectMeta) {
glog.Infof("Skipping validation for %s/%s due to policy check", resourceNamespace, resourceName)
return &v1beta1.AdmissionResponse{
Allowed: true,
}
}
NeedVolumes := []corev1.Volume{LogVolume}
NeedMounts := []corev1.VolumeMount{LogMount}
//annotations := map[string]string{admissionWebhookAnnotationStatusKey: "mutated"}
patchBytes, err := createPatch(&deployment, NeedMounts, NeedVolumes)
if err != nil {
return &v1beta1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
},
}
}
glog.Infof("AdmissionResponse: patch=%v\n", string(patchBytes))
return &v1beta1.AdmissionResponse{
Allowed: true,
Patch: patchBytes,
PatchType: func() *v1beta1.PatchType {
pt := v1beta1.PatchTypeJSONPatch
return &pt
}(),
}
}
// Serve method for webhook server
func (whsvr *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
var body []byte
if r.Body != nil {
if data, err := ioutil.ReadAll(r.Body); err == nil {
body = data
}
}
if len(body) == 0 {
glog.Error("empty body")
http.Error(w, "empty body", http.StatusBadRequest)
return
}
// verify the content type is accurate
contentType := r.Header.Get("Content-Type")
if contentType != "application/json" {
glog.Errorf("Content-Type=%s, expect application/json", contentType)
http.Error(w, "invalid Content-Type, expect `application/json`", http.StatusUnsupportedMediaType)
return
}
var admissionResponse *v1beta1.AdmissionResponse
ar := v1beta1.AdmissionReview{}
if _, _, err := deserializer.Decode(body, nil, &ar); err != nil {
glog.Errorf("Can't decode body: %v", err)
admissionResponse = &v1beta1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
},
}
} else {
fmt.Println(r.URL.Path)
if r.URL.Path == "/mutate" {
admissionResponse = whsvr.mutate(&ar)
} else if r.URL.Path == "/validate" {
admissionResponse = whsvr.validate(&ar)
}
}
admissionReview := v1beta1.AdmissionReview{}
if admissionResponse != nil {
admissionReview.Response = admissionResponse
if ar.Request != nil {
admissionReview.Response.UID = ar.Request.UID
}
}
resp, err := json.Marshal(admissionReview)
if err != nil {
glog.Errorf("Can't encode response: %v", err)
http.Error(w, fmt.Sprintf("could not encode response: %v", err), http.StatusInternalServerError)
}
glog.Infof("Ready to write reponse ...")
if _, err := w.Write(resp); err != nil {
glog.Errorf("Can't write response: %v", err)
http.Error(w, fmt.Sprintf("could not write response: %v", err), http.StatusInternalServerError)
}
}
| [
"\"volumeName\"",
"\"volumePath\"",
"\"mountPath\""
]
| []
| [
"volumeName",
"volumePath",
"mountPath"
]
| [] | ["volumeName", "volumePath", "mountPath"] | go | 3 | 0 | |
registrar/markmonitorsftp/markmonitorsftp.go | // Copyright 2021 MarkMonitor Technologies, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package markmonitorsftp
import (
dns "github.com/akamai/AkamaiOPEN-edgegrid-golang/configdns-v2"
"github.com/akamai/edgedns-registrar-coordinator/registrar"
log "github.com/apex/log"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/knownhosts"
"bufio"
"context"
"fmt"
"gopkg.in/yaml.v2"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
)
const (
//dfault cert algo
DefaultCertAlgorithm = ""
// default signature
DefaultSignature = ""
// default port
DefaultHostPort = 22
DefaultFileTTL = time.Second * 600
)
var ()
// edgeDNSClient is a proxy interface of the MarkMonitor edgegrid configdns-v2 package that can be stubbed for testing.
type MarkMonitorDNSService interface {
GetDomains(ctx context.Context) ([]string, error)
GetDomain(ctx context.Context, domain string) (*registrar.Domain, error)
GetTsigKey(ctx context.Context, domain string) (*dns.TSIGKey, error)
GetServeAlgorithm(ctx context.Context, domain string) (string, error)
GetMasterIPs(ctx context.Context) ([]string, error)
}
type SFTPDNSService interface {
EstablishSFTPSession(log *log.Entry, markmonitorConfig *MarkMonitorSFTPConfig) error
ReadRemoteDomainFile(log *log.Entry, markmonitorConfig *MarkMonitorSFTPConfig) (*[]string, error)
ParseDomainFile(log *log.Entry, domFile *os.File) (*[]string, error)
}
type SFTPDNSConfig struct {
sftpClient *sftp.Client
sshClient *ssh.Client
domainMasterList *[]string
lastDomFileUpdate time.Time
domFileTTL time.Duration
}
// MarkMonitorSFTPRegistrar implements the DNS registrar for Mark Monitor SFTP.
type MarkMonitorSFTPRegistrar struct {
registrar.BaseRegistrarProvider
markmonitorConfig *MarkMonitorSFTPConfig
closeSFTPSession func(interface{})
// Defines client. Allows for mocking.
sftpService SFTPDNSService
}
type MarkMonitorSFTPConfig struct {
MarkMonitorSFTPConfigPath string
MarkMonitorSshUser string `yaml:"markmonitor_ssh_user"`
MarkMonitorSshPassword string `yaml:"markmonitor_ssh_password"`
MarkMonitorSshHost string `yaml:"markmonitor_ssh_host"`
MarkMonitorSshPort int `yaml:"markmonitor_ssh_port"`
MarkMonitorSslCertAlgorithm string `yaml:"markmonitor_ssl_cert_algorithm"`
MarkMonitorSslSignature string `yaml:"markmonitor_ssl_signature"`
MarkMonitorSftpPktSize int `yaml:"markmonitor_sftp_pkt_size"`
MarkMonitorMasterIPs []string `yaml:"markmonitor_master_ips"`
MarkMonitorDomainConfigFilePath string `yaml:"markmonitor_registrar_domain_filepath"`
MarkMonitorTempDomainFileFolder string `yaml:"markmonitor_temp_file_folder"`
MarkMonitorDomFileTTL string `yaml:"markmonitor_domain_file_ttle"` // in seconds
}
// Create and return ssl Connection
func initSSHClient(log *log.Entry, markmonitorConfig *MarkMonitorSFTPConfig) (*ssh.Client, error) {
// use known_hosts in the users home directory
hostKeyCallback, err := knownhosts.New(filepath.Join(os.Getenv("HOME"), ".ssh", "known_hosts"))
if err != nil {
log.Errorf("could not create hostkeycallback function: %s", err.Error())
return nil, err
}
config := &ssh.ClientConfig{
User: markmonitorConfig.MarkMonitorSshUser,
Auth: []ssh.AuthMethod{
ssh.Password(markmonitorConfig.MarkMonitorSshPassword),
},
HostKeyCallback: hostKeyCallback,
}
if markmonitorConfig.MarkMonitorSslCertAlgorithm != "" {
config.HostKeyAlgorithms = append(config.HostKeyAlgorithms, markmonitorConfig.MarkMonitorSslCertAlgorithm)
}
sshAddr := markmonitorConfig.MarkMonitorSshHost + ":" + strconv.Itoa(markmonitorConfig.MarkMonitorSshPort)
// connect
client, err := ssh.Dial("tcp", sshAddr, config)
if err != nil {
return nil, err
}
return client, nil
}
// Create and return sftp client
func initSFTPClient(log *log.Entry, markmonitorConfig *MarkMonitorSFTPConfig, sshClient *ssh.Client) (*sftp.Client, error) {
var err error
var sftpClient *sftp.Client
// create new SFTP client
if markmonitorConfig.MarkMonitorSftpPktSize != 0 {
sftpClient, err = sftp.NewClient(sshClient, sftp.MaxPacket(markmonitorConfig.MarkMonitorSftpPktSize))
} else {
sftpClient, err = sftp.NewClient(sshClient)
}
if err != nil {
return nil, err
}
return sftpClient, nil
}
func closeSFTPSession(config interface{}) {
sftpDNSConfig, ok := config.(*SFTPDNSConfig)
if !ok {
return
}
// close active connectios
if sftpDNSConfig.sftpClient != nil {
sftpDNSConfig.sftpClient.Close()
}
if sftpDNSConfig.sshClient != nil {
sftpDNSConfig.sshClient.Close()
}
// clear
sftpDNSConfig.sftpClient = nil
sftpDNSConfig.sshClient = nil
}
// NewMarkMonitorProvider initializes a new MarkMonitor DNS based Provider.
func NewMarkMonitorSFTPRegistrar(ctx context.Context, mmConfig MarkMonitorSFTPConfig, sftpService SFTPDNSService) (*MarkMonitorSFTPRegistrar, error) {
var err error
log := ctx.Value("appLog").(*log.Entry)
markmonitorConfig := &mmConfig
// if mock, skip
if sftpService == nil {
// Get file config and parse
if mmConfig.MarkMonitorSFTPConfigPath == "" {
return nil, fmt.Errorf("MarkMonitor Registrar requires a configuration file")
}
markmonitorConfig, err = loadConfig(log, mmConfig.MarkMonitorSFTPConfigPath)
if err != nil {
return nil, fmt.Errorf("MarkMonitor Registrar. Invalid configuration file")
}
}
// Set up ssl and sftp clients/session
if markmonitorConfig.MarkMonitorSshHost == "" || markmonitorConfig.MarkMonitorSshUser == "" || markmonitorConfig.MarkMonitorSshPassword == "" {
return nil, fmt.Errorf("MarkMonitor Registrar. Invalid configuration file. One or more required credentials missing.")
}
if len(markmonitorConfig.MarkMonitorMasterIPs) < 1 {
return nil, fmt.Errorf("MarkMonitor Registrar. One or more Master IPs required.")
}
if markmonitorConfig.MarkMonitorDomainConfigFilePath == "" {
return nil, fmt.Errorf("MarkMonitor Registrar. Invalid configuration file. Remote domain file path missing.")
}
if markmonitorConfig.MarkMonitorSslCertAlgorithm == "" {
log.Infof("MarkMonitor using default SSL Certificate Algorithm: %s", DefaultCertAlgorithm)
markmonitorConfig.MarkMonitorSslCertAlgorithm = DefaultCertAlgorithm
}
if markmonitorConfig.MarkMonitorSslSignature == "" {
log.Infof("MarkMonitor using default SSL Signature: %s", DefaultSignature)
markmonitorConfig.MarkMonitorSslSignature = DefaultSignature
}
if markmonitorConfig.MarkMonitorSshPort == 0 {
log.Infof("MarkMonitor using default port: %v", DefaultHostPort)
markmonitorConfig.MarkMonitorSshPort = DefaultHostPort
}
provider := &MarkMonitorSFTPRegistrar{
markmonitorConfig: markmonitorConfig,
sftpService: &SFTPDNSConfig{},
closeSFTPSession: closeSFTPSession,
}
if sftpService != nil {
log.Debugf("Using STUB")
provider.sftpService = sftpService
} else {
err := provider.sftpService.EstablishSFTPSession(log, markmonitorConfig)
if err != nil {
closeSFTPSession(provider.sftpService)
log.Errorf("MarkMonitor Registrar. Failed to initialize SFTP Client. %s", err.Error())
return nil, fmt.Errorf("MarkMonitor Registrar. Failed to initialize SFTP Client.")
}
defer closeSFTPSession(provider.sftpService)
dur, err := time.ParseDuration(markmonitorConfig.MarkMonitorDomFileTTL)
if provider.markmonitorConfig.MarkMonitorDomFileTTL != "" && err == nil {
provider.sftpService.(*SFTPDNSConfig).domFileTTL = dur
} else {
provider.sftpService.(*SFTPDNSConfig).domFileTTL = DefaultFileTTL
}
}
return provider, nil
}
//
func closeAndRemoveDomainsFile(log *log.Entry, localDomsFile *os.File) {
// if can't defer in defer, will need to place inline
defer os.Remove(localDomsFile.Name())
if err := localDomsFile.Close(); err != nil {
log.Warnf("Failed to close temp file. %s", err.Error())
}
return
}
// parseDomainsConfigFile parses MM domains file into a list.
func parseDomainsConfigFile(localDomsFile *os.File) ([]string, error) {
// STUB
return []string{"zone-1.com", "zone-2.com"}, nil
}
/*
Service Entry Points
*/
func (mm *MarkMonitorSFTPRegistrar) GetDomains(ctx context.Context) ([]string, error) {
log := ctx.Value("appLog").(*log.Entry)
log.Debug("Entering MarkMonitor registrar GetDomains")
defer mm.closeSFTPSession(mm.sftpService)
err := mm.sftpService.EstablishSFTPSession(log, mm.markmonitorConfig)
if err != nil {
log.Errorf(" MarkMonitor GetDomains: Failed to initialize SFTP Client. %s", err.Error())
return []string{}, fmt.Errorf("MarkMonitor GetDomains: Failed to initialize SFTP Client.")
}
domains, err := mm.sftpService.ReadRemoteDomainFile(log, mm.markmonitorConfig)
if err != nil {
log.Errorf(" MarkMonitor GetDomains: Failed. %s", err.Error())
return []string{}, fmt.Errorf("MarkMonitor GetDomains: Failed to parse domains file.")
}
log.Debugf("Registrar GetDomains result: %v", domains)
return *domains, nil
}
func (mm *MarkMonitorSFTPRegistrar) GetDomain(ctx context.Context, domain string) (*registrar.Domain, error) {
log := ctx.Value("appLog").(*log.Entry)
log.Debug("Entering MarkMonitor registrar GetDomain")
defer mm.closeSFTPSession(mm.sftpService)
err := mm.sftpService.EstablishSFTPSession(log, mm.markmonitorConfig)
if err != nil {
log.Errorf(" MarkMonitor GetDomains: Failed to initialize SFTP Client. %s", err.Error())
return nil, fmt.Errorf("MarkMonitor GetDomains: Failed to initialize SFTP Client.")
}
zone := "not implemented"
log.Debugf("Registrar GetDomain result: %v", zone)
/*
return ®istrar.Domain{
Name: zone.Zone,
Type: zone.Type,
SignAndServe: zone.SignAndServe,
SignAndServeAlgorithm: zone.SignAndServeAlgorithm,
Masters: zone.Masters,
TsigKey: zone.TsigKey,
}, nil
*/
return ®istrar.Domain{}, nil
}
func (mm *MarkMonitorSFTPRegistrar) GetTsigKey(ctx context.Context, domain string) (tsigKey *dns.TSIGKey, err error) {
log := ctx.Value("appLog").(*log.Entry)
log.Debug("Entering MarkMonitor registrar GetTsigKey")
defer mm.closeSFTPSession(mm.sftpService)
err = mm.sftpService.EstablishSFTPSession(log, mm.markmonitorConfig)
if err != nil {
log.Errorf(" MarkMonitor GetDomains: Failed to initialize SFTP Client. %s", err.Error())
return nil, fmt.Errorf("MarkMonitor GetDomains: Failed to initialize SFTP Client.")
}
log.Info("MarkMonitorSFTPRegistrar does not support Tsig")
return
}
func (mm *MarkMonitorSFTPRegistrar) GetServeAlgorithm(ctx context.Context, domain string) (algo string, err error) {
log := ctx.Value("appLog").(*log.Entry)
log.Debug("Entering MarkMonitor registrar GetServeAlgorithm")
defer mm.closeSFTPSession(mm.sftpService)
err = mm.sftpService.EstablishSFTPSession(log, mm.markmonitorConfig)
if err != nil {
log.Errorf(" MarkMonitor GetDomains: Failed to initialize SFTP Client. %s", err.Error())
return "", fmt.Errorf("MarkMonitor GetDomains: Failed to initialize SFTP Client.")
}
log.Info("MarkMonitorSFTPRegistrar does not support DNSSEC")
return
}
func (mm *MarkMonitorSFTPRegistrar) GetMasterIPs(ctx context.Context) (masters []string, err error) {
log := ctx.Value("appLog").(*log.Entry)
log.Debug("Entering MarkMonitor registrar GetMasterIPs")
log.Debugf("Registrar GetMasterIPs result: %v", mm.markmonitorConfig.MarkMonitorMasterIPs)
return mm.markmonitorConfig.MarkMonitorMasterIPs, nil
}
//
// Config file processing
//
func loadConfig(log *log.Entry, configFile string) (*MarkMonitorSFTPConfig, error) {
log.Debug("Entering MarkMonitor registrar loadConfig")
if fileExists(configFile) {
// Load config from file
configData, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
return loadConfigContent(log, configData)
}
log.Infof("Config file %v does not exist, using default values", configFile)
return nil, nil
}
func loadConfigContent(log *log.Entry, configData []byte) (*MarkMonitorSFTPConfig, error) {
config := MarkMonitorSFTPConfig{}
err := yaml.Unmarshal(configData, &config)
if err != nil {
return nil, err
}
log.Info("akamai registrar config loaded")
return &config, nil
}
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
//
// Stubbable functions
//
// establish SFTPSession if not already
func (s *SFTPDNSConfig) EstablishSFTPSession(log *log.Entry, markmonitorConfig *MarkMonitorSFTPConfig) error {
log.Debugf("EstablishSFTPSession")
if s.sshClient == nil {
sshClient, err := initSSHClient(log, markmonitorConfig)
if err != nil {
log.Errorf("MarkMonitor Registrar. Failed to initialize SSH Client. %s", err.Error())
return fmt.Errorf("MarkMonitor Registrar. Failed to initialize SSH Client.")
}
s.sshClient = sshClient
log.Debugf("SSH Session created: [%v]", *s.sshClient)
}
if s.sftpClient == nil {
sftpClient, err := initSFTPClient(log, markmonitorConfig, s.sshClient)
if err != nil {
log.Errorf("MarkMonitor Registrar. Failed to initialize SFTP Client. %s", err.Error())
return fmt.Errorf("MarkMonitor Registrar. Failed to initialize SFTP Client.")
}
s.sftpClient = sftpClient
log.Debugf("SSH Session created: [%v]", *s.sftpClient)
}
return nil
}
func ParseZoneData(log *log.Entry, zoneLine string) string {
log.Debugf("Domains line: [%s]", zoneLine)
if !strings.HasPrefix(zoneLine, "zone") {
return ""
}
if !strings.Contains(zoneLine, "slave") {
log.Debugf("Skipping zone line [%s]", zoneLine)
return ""
}
dline := strings.SplitN(zoneLine, " ", 4)
if len(dline) < 4 {
log.Warnf("Incomplete zone line: %s", zoneLine)
return ""
}
zoneText := strings.Split(dline[1], "\"")
return zoneText[1]
}
// ParseDomainFile parses reteieved domains file. Returns map of domains indexed by masterp ip and error
func (s *SFTPDNSConfig) ParseDomainFile(log *log.Entry, domFile *os.File) (*[]string, error) {
log.Debugf("Entering ParseDomainFile")
// File line example (excluding preamble and postables):
// zone "genevarx.com" in { type slave; file "/var/dns-config/dbs/zone.genevarx.com.bak"; masters { 64.124.14.39; }; allow-transfer {def_xfer; }; };
defer closeAndRemoveDomainsFile(log, domFile)
domNames := []string{}
// start from beginning of the file
if _, err := domFile.Seek(0, 0); err != nil {
return &domNames, err
}
scanner := bufio.NewScanner(domFile)
for scanner.Scan() {
zoneLine := strings.TrimSpace(scanner.Text())
zone := ParseZoneData(log, zoneLine)
if zone != "" {
log.Debugf("Adding %s to domain list", zone)
domNames = append(domNames, zone)
}
}
if err := scanner.Err(); err != nil {
log.Errorf(err.Error())
return nil, err
}
return &domNames, nil
}
// ReadRemoteDomainFile reads remote dmains file, saves to remp location. returns handle of temp file and error.
func (s *SFTPDNSConfig) ReadRemoteDomainFile(log *log.Entry, markmonitorConfig *MarkMonitorSFTPConfig) (*[]string, error) {
log.Debugf("Entering ReadRemoteDomainFile")
fstat, err := s.sftpClient.Stat(markmonitorConfig.MarkMonitorDomainConfigFilePath)
if err != nil {
log.Errorf("ReadRemoteDomainFile: Failed to stat remote domains file. %s", err.Error())
return nil, err
}
modTime := fstat.ModTime()
if modTime.After(s.lastDomFileUpdate.Add(s.domFileTTL)) && s.domainMasterList != nil {
return s.domainMasterList, nil
}
// open remote domains file
domsFile, err := s.sftpClient.Open(markmonitorConfig.MarkMonitorDomainConfigFilePath)
if err != nil {
log.Errorf("ReadRemoteDomainFile: Failed to open remote domains file. %s", err.Error())
return nil, err
}
defer domsFile.Close()
// create temporary file
tempFile, err := ioutil.TempFile(markmonitorConfig.MarkMonitorTempDomainFileFolder, "MMDomainConfig-*")
if err != nil {
log.Errorf("ReadRemoteDomainFile: Failed to create temp domains file. %s", err.Error())
return nil, err
}
// copy domains file to temp file
_, err = io.Copy(tempFile, domsFile)
if err != nil {
closeAndRemoveDomainsFile(log, tempFile)
log.Errorf("ReadRemoteDomainFile: Failed to copy file from remote. %s", err.Error())
return nil, err
}
// flush in-memory copy
err = tempFile.Sync()
if err != nil {
closeAndRemoveDomainsFile(log, tempFile)
log.Errorf("ReadRemoteDomainFile: Failed to persist copied file. %s", err.Error())
return nil, err
}
domsList, err := s.ParseDomainFile(log, tempFile)
if err != nil {
return nil, err
}
s.domainMasterList = domsList
s.lastDomFileUpdate = modTime
return domsList, nil
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
luoo_project/wsgi.py | """
WSGI config for luoo_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'luoo_project.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
Hangman/wsgi.py | """
WSGI config for Hangman project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Hangman.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
examples/longRunningClient.go | package main
import (
"encoding/hex"
"log"
"os"
"time"
"gopkg.in/jcmturner/gokrb5.v7/client"
"gopkg.in/jcmturner/gokrb5.v7/config"
"gopkg.in/jcmturner/gokrb5.v7/keytab"
"gopkg.in/jcmturner/gokrb5.v7/test/testdata"
)
const (
kRB5CONF = `[libdefaults]
default_realm = TEST.GOKRB5
dns_lookup_realm = false
dns_lookup_kdc = false
ticket_lifetime = 24h
forwardable = yes
default_tkt_enctypes = aes256-cts-hmac-sha1-96
default_tgs_enctypes = aes256-cts-hmac-sha1-96
[realms]
TEST.GOKRB5 = {
kdc = 10.80.88.88:88
admin_server = 10.80.88.88:749
default_domain = test.gokrb5
}
[domain_realm]
.test.gokrb5 = TEST.GOKRB5
test.gokrb5 = TEST.GOKRB5
`
)
func main() {
l := log.New(os.Stderr, "GOKRB5 Client: ", log.LstdFlags)
//defer profile.Start(profile.TraceProfile).Stop()
// Load the keytab
kb, _ := hex.DecodeString(testdata.TESTUSER2_KEYTAB)
kt := keytab.New()
err := kt.Unmarshal(kb)
if err != nil {
l.Fatalf("could not load client keytab: %v", err)
}
// Load the client krb5 config
conf, err := config.NewConfigFromString(kRB5CONF)
if err != nil {
l.Fatalf("could not load krb5.conf: %v", err)
}
addr := os.Getenv("TEST_KDC_ADDR")
if addr != "" {
conf.Realms[0].KDC = []string{addr + ":88"}
}
// Create the client with the keytab
cl := client.NewClientWithKeytab("testuser2", "TEST.GOKRB5", kt, conf, client.Logger(l), client.DisablePAFXFAST(true))
// Log in the client
err = cl.Login()
if err != nil {
l.Fatalf("could not login client: %v", err)
}
for {
_, _, err := cl.GetServiceTicket("HTTP/host.test.gokrb5")
if err != nil {
l.Printf("failed to get service ticket: %v\n", err)
}
time.Sleep(time.Minute * 5)
}
}
| [
"\"TEST_KDC_ADDR\""
]
| []
| [
"TEST_KDC_ADDR"
]
| [] | ["TEST_KDC_ADDR"] | go | 1 | 0 | |
nipype/utils/filemanip.py | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Miscellaneous file manipulation functions
"""
import sys
import pickle
import errno
import subprocess as sp
import gzip
import hashlib
import locale
from hashlib import md5
import os
import os.path as op
import re
import shutil
import contextlib
import posixpath
from pathlib import Path
import simplejson as json
from time import sleep, time
from .. import logging, config, __version__ as version
from .misc import is_container
fmlogger = logging.getLogger("nipype.utils")
related_filetype_sets = [(".hdr", ".img", ".mat"), (".nii", ".mat"), (".BRIK", ".HEAD")]
def _resolve_with_filenotfound(path, **kwargs):
"""Raise FileNotFoundError instead of OSError"""
try:
return path.resolve(**kwargs)
except OSError as e:
if isinstance(e, FileNotFoundError):
raise
raise FileNotFoundError(str(path))
def path_resolve(path, strict=False):
try:
return _resolve_with_filenotfound(path, strict=strict)
except TypeError: # PY35
pass
path = path.absolute()
if strict or path.exists():
return _resolve_with_filenotfound(path)
# This is a hacky shortcut, using path.absolute() unmodified
# In cases where the existing part of the path contains a
# symlink, different results will be produced
return path
def split_filename(fname):
"""Split a filename into parts: path, base filename and extension.
Parameters
----------
fname : str
file or path name
Returns
-------
pth : str
base path from fname
fname : str
filename from fname, without extension
ext : str
file extension from fname
Examples
--------
>>> from nipype.utils.filemanip import split_filename
>>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
>>> pth
'/home/data'
>>> fname
'subject'
>>> ext
'.nii.gz'
"""
special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"]
pth = op.dirname(fname)
fname = op.basename(fname)
ext = None
for special_ext in special_extensions:
ext_len = len(special_ext)
if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()):
ext = fname[-ext_len:]
fname = fname[:-ext_len]
break
if not ext:
fname, ext = op.splitext(fname)
return pth, fname, ext
def fname_presuffix(fname, prefix="", suffix="", newpath=None, use_ext=True):
"""Manipulates path and name of input filename
Parameters
----------
fname : string
A filename (may or may not include path)
prefix : string
Characters to prepend to the filename
suffix : string
Characters to append to the filename
newpath : string
Path to replace the path of the input fname
use_ext : boolean
If True (default), appends the extension of the original file
to the output name.
Returns
-------
Absolute path of the modified filename
>>> from nipype.utils.filemanip import fname_presuffix
>>> fname = 'foo.nii.gz'
>>> fname_presuffix(fname,'pre','post','/tmp')
'/tmp/prefoopost.nii.gz'
>>> from nipype.interfaces.base import Undefined
>>> fname_presuffix(fname, 'pre', 'post', Undefined) == \
fname_presuffix(fname, 'pre', 'post')
True
"""
pth, fname, ext = split_filename(fname)
if not use_ext:
ext = ""
# No need for isdefined: bool(Undefined) evaluates to False
if newpath:
pth = op.abspath(newpath)
return op.join(pth, prefix + fname + suffix + ext)
def fnames_presuffix(fnames, prefix="", suffix="", newpath=None, use_ext=True):
"""Calls fname_presuffix for a list of files."""
f2 = []
for fname in fnames:
f2.append(fname_presuffix(fname, prefix, suffix, newpath, use_ext))
return f2
def hash_rename(filename, hashvalue):
"""renames a file given original filename and hash
and sets path to output_directory
"""
path, name, ext = split_filename(filename)
newfilename = "".join((name, "_0x", hashvalue, ext))
return op.join(path, newfilename)
def check_forhash(filename):
"""checks if file has a hash in its filename"""
if isinstance(filename, list):
filename = filename[0]
path, name = op.split(filename)
if re.search("(_0x[a-z0-9]{32})", name):
hashvalue = re.findall("(_0x[a-z0-9]{32})", name)
return True, hashvalue
else:
return False, None
def hash_infile(afile, chunk_len=8192, crypto=hashlib.md5, raise_notfound=False):
"""
Computes hash of a file using 'crypto' module
>>> hash_infile('smri_ants_registration_settings.json')
'f225785dfb0db9032aa5a0e4f2c730ad'
>>> hash_infile('surf01.vtk')
'fdf1cf359b4e346034372cdeb58f9a88'
>>> hash_infile('spminfo')
'0dc55e3888c98a182dab179b976dfffc'
>>> hash_infile('fsl_motion_outliers_fd.txt')
'defd1812c22405b1ee4431aac5bbdd73'
"""
if not op.isfile(afile):
if raise_notfound:
raise RuntimeError('File "%s" not found.' % afile)
return None
crypto_obj = crypto()
with open(afile, "rb") as fp:
while True:
data = fp.read(chunk_len)
if not data:
break
crypto_obj.update(data)
return crypto_obj.hexdigest()
def hash_timestamp(afile):
"""Computes md5 hash of the timestamp of a file"""
md5hex = None
if op.isfile(afile):
md5obj = md5()
stat = os.stat(afile)
md5obj.update(str(stat.st_size).encode())
md5obj.update(str(stat.st_mtime).encode())
md5hex = md5obj.hexdigest()
return md5hex
def _parse_mount_table(exit_code, output):
"""Parses the output of ``mount`` to produce (path, fs_type) pairs
Separated from _generate_cifs_table to enable testing logic with real
outputs
"""
# Not POSIX
if exit_code != 0:
return []
# Linux mount example: sysfs on /sys type sysfs (rw,nosuid,nodev,noexec)
# <PATH>^^^^ ^^^^^<FSTYPE>
# OSX mount example: /dev/disk2 on / (hfs, local, journaled)
# <PATH>^ ^^^<FSTYPE>
pattern = re.compile(r".*? on (/.*?) (?:type |\()([^\s,\)]+)")
# Keep line and match for error reporting (match == None on failure)
# Ignore empty lines
matches = [(l, pattern.match(l)) for l in output.strip().splitlines() if l]
# (path, fstype) tuples, sorted by path length (longest first)
mount_info = sorted(
(match.groups() for _, match in matches if match is not None),
key=lambda x: len(x[0]),
reverse=True,
)
cifs_paths = [path for path, fstype in mount_info if fstype.lower() == "cifs"]
# Report failures as warnings
for line, match in matches:
if match is None:
fmlogger.debug("Cannot parse mount line: '%s'", line)
return [
mount
for mount in mount_info
if any(mount[0].startswith(path) for path in cifs_paths)
]
def _generate_cifs_table():
"""Construct a reverse-length-ordered list of mount points that
fall under a CIFS mount.
This precomputation allows efficient checking for whether a given path
would be on a CIFS filesystem.
On systems without a ``mount`` command, or with no CIFS mounts, returns an
empty list.
"""
exit_code, output = sp.getstatusoutput("mount")
return _parse_mount_table(exit_code, output)
_cifs_table = _generate_cifs_table()
def on_cifs(fname):
"""
Checks whether a file path is on a CIFS filesystem mounted in a POSIX
host (i.e., has the ``mount`` command).
On Windows, Docker mounts host directories into containers through CIFS
shares, which has support for Minshall+French symlinks, or text files that
the CIFS driver exposes to the OS as symlinks.
We have found that under concurrent access to the filesystem, this feature
can result in failures to create or read recently-created symlinks,
leading to inconsistent behavior and ``FileNotFoundError``.
This check is written to support disabling symlinks on CIFS shares.
"""
# Only the first match (most recent parent) counts
for fspath, fstype in _cifs_table:
if fname.startswith(fspath):
return fstype == "cifs"
return False
def copyfile(
originalfile,
newfile,
copy=False,
create_new=False,
hashmethod=None,
use_hardlink=False,
copy_related_files=True,
):
"""Copy or link ``originalfile`` to ``newfile``.
If ``use_hardlink`` is True, and the file can be hard-linked, then a
link is created, instead of copying the file.
If a hard link is not created and ``copy`` is False, then a symbolic
link is created.
Parameters
----------
originalfile : str
full path to original file
newfile : str
full path to new file
copy : Bool
specifies whether to copy or symlink files
(default=False) but only for POSIX systems
use_hardlink : Bool
specifies whether to hard-link files, when able
(Default=False), taking precedence over copy
copy_related_files : Bool
specifies whether to also operate on related files, as defined in
``related_filetype_sets``
Returns
-------
None
"""
newhash = None
orighash = None
fmlogger.debug(newfile)
if create_new:
while op.exists(newfile):
base, fname, ext = split_filename(newfile)
s = re.search("_c[0-9]{4,4}$", fname)
i = 0
if s:
i = int(s.group()[2:]) + 1
fname = fname[:-6] + "_c%04d" % i
else:
fname += "_c%04d" % i
newfile = base + os.sep + fname + ext
if hashmethod is None:
hashmethod = config.get("execution", "hash_method").lower()
# Don't try creating symlinks on CIFS
if copy is False and on_cifs(newfile):
copy = True
# Existing file
# -------------
# Options:
# symlink
# to regular file originalfile (keep if symlinking)
# to same dest as symlink originalfile (keep if symlinking)
# to other file (unlink)
# regular file
# hard link to originalfile (keep)
# copy of file (same hash) (keep)
# different file (diff hash) (unlink)
keep = False
if op.lexists(newfile):
if op.islink(newfile):
if all(
(
os.readlink(newfile) == op.realpath(originalfile),
not use_hardlink,
not copy,
)
):
keep = True
elif posixpath.samefile(newfile, originalfile):
keep = True
else:
if hashmethod == "timestamp":
hashfn = hash_timestamp
elif hashmethod == "content":
hashfn = hash_infile
else:
raise AttributeError("Unknown hash method found:", hashmethod)
newhash = hashfn(newfile)
fmlogger.debug(
"File: %s already exists,%s, copy:%d", newfile, newhash, copy
)
orighash = hashfn(originalfile)
keep = newhash == orighash
if keep:
fmlogger.debug(
"File: %s already exists, not overwriting, copy:%d", newfile, copy
)
else:
os.unlink(newfile)
# New file
# --------
# use_hardlink & can_hardlink => hardlink
# ~hardlink & ~copy & can_symlink => symlink
# ~hardlink & ~symlink => copy
if not keep and use_hardlink:
try:
fmlogger.debug("Linking File: %s->%s", newfile, originalfile)
# Use realpath to avoid hardlinking symlinks
os.link(op.realpath(originalfile), newfile)
except OSError:
use_hardlink = False # Disable hardlink for associated files
else:
keep = True
if not keep and not copy and os.name == "posix":
try:
fmlogger.debug("Symlinking File: %s->%s", newfile, originalfile)
os.symlink(originalfile, newfile)
except OSError:
copy = True # Disable symlink for associated files
else:
keep = True
if not keep:
try:
fmlogger.debug("Copying File: %s->%s", newfile, originalfile)
shutil.copyfile(originalfile, newfile)
except shutil.Error as e:
fmlogger.warning(str(e))
# Associated files
if copy_related_files:
related_file_pairs = (
get_related_files(f, include_this_file=False)
for f in (originalfile, newfile)
)
for alt_ofile, alt_nfile in zip(*related_file_pairs):
if op.exists(alt_ofile):
copyfile(
alt_ofile,
alt_nfile,
copy,
hashmethod=hashmethod,
use_hardlink=use_hardlink,
copy_related_files=False,
)
return newfile
def get_related_files(filename, include_this_file=True):
"""Returns a list of related files, as defined in
``related_filetype_sets``, for a filename. (e.g., Nifti-Pair, Analyze (SPM)
and AFNI files).
Parameters
----------
filename : str
File name to find related filetypes of.
include_this_file : bool
If true, output includes the input filename.
"""
related_files = []
path, name, this_type = split_filename(filename)
for type_set in related_filetype_sets:
if this_type in type_set:
for related_type in type_set:
if include_this_file or related_type != this_type:
related_files.append(op.join(path, name + related_type))
if not len(related_files):
related_files = [filename]
return related_files
def copyfiles(filelist, dest, copy=False, create_new=False):
"""Copy or symlink files in ``filelist`` to ``dest`` directory.
Parameters
----------
filelist : list
List of files to copy.
dest : path/files
full path to destination. If it is a list of length greater
than 1, then it assumes that these are the names of the new
files.
copy : Bool
specifies whether to copy or symlink files
(default=False) but only for posix systems
Returns
-------
None
"""
outfiles = ensure_list(dest)
newfiles = []
for i, f in enumerate(ensure_list(filelist)):
if isinstance(f, list):
newfiles.insert(i, copyfiles(f, dest, copy=copy, create_new=create_new))
else:
if len(outfiles) > 1:
destfile = outfiles[i]
else:
destfile = fname_presuffix(f, newpath=outfiles[0])
destfile = copyfile(f, destfile, copy, create_new=create_new)
newfiles.insert(i, destfile)
return newfiles
def ensure_list(filename):
"""Returns a list given either a string or a list"""
if isinstance(filename, (str, bytes)):
return [filename]
elif isinstance(filename, list):
return filename
elif is_container(filename):
return [x for x in filename]
else:
return None
def simplify_list(filelist):
"""Returns a list if filelist is a list of length greater than 1,
otherwise returns the first element
"""
if len(filelist) > 1:
return filelist
else:
return filelist[0]
filename_to_list = ensure_list
list_to_filename = simplify_list
def check_depends(targets, dependencies):
"""Return true if all targets exist and are newer than all dependencies.
An OSError will be raised if there are missing dependencies.
"""
tgts = ensure_list(targets)
deps = ensure_list(dependencies)
return all(map(op.exists, tgts)) and min(map(op.getmtime, tgts)) > max(
list(map(op.getmtime, deps)) + [0]
)
def save_json(filename, data):
"""Save data to a json file
Parameters
----------
filename : str
Filename to save data in.
data : dict
Dictionary to save in json file.
"""
mode = "w"
with open(filename, mode) as fp:
json.dump(data, fp, sort_keys=True, indent=4)
def load_json(filename):
"""Load data from a json file
Parameters
----------
filename : str
Filename to load data from.
Returns
-------
data : dict
"""
with open(filename, "r") as fp:
data = json.load(fp)
return data
def loadcrash(infile, *args):
if infile.endswith("pkl") or infile.endswith("pklz"):
return loadpkl(infile)
else:
raise ValueError("Only pickled crashfiles are supported")
def loadpkl(infile):
"""Load a zipped or plain cPickled file."""
infile = Path(infile)
fmlogger.debug("Loading pkl: %s", infile)
pklopen = gzip.open if infile.suffix == ".pklz" else open
t = time()
timeout = float(config.get("execution", "job_finished_timeout"))
timed_out = True
while (time() - t) < timeout:
if infile.exists():
timed_out = False
break
fmlogger.debug("'{}' missing; waiting 2s".format(infile))
sleep(2)
if timed_out:
error_message = (
"Result file {0} expected, but "
"does not exist after ({1}) "
"seconds.".format(infile, timeout)
)
raise IOError(error_message)
with pklopen(str(infile), "rb") as pkl_file:
pkl_contents = pkl_file.read()
pkl_metadata = None
# Look if pkl file contains version metadata
idx = pkl_contents.find(b"\n")
if idx >= 0:
try:
pkl_metadata = json.loads(pkl_contents[:idx])
except (UnicodeDecodeError, json.JSONDecodeError):
# Could not get version info
pass
else:
# On success, skip JSON metadata
pkl_contents = pkl_contents[idx + 1 :]
# Pickle files may contain relative paths that must be resolved relative
# to the working directory, so use indirectory while attempting to load
unpkl = None
try:
with indirectory(infile.parent):
unpkl = pickle.loads(pkl_contents)
except UnicodeDecodeError:
# Was this pickle created with Python 2.x?
with indirectory(infile.parent):
unpkl = pickle.loads(pkl_contents, fix_imports=True, encoding="utf-8")
fmlogger.info("Successfully loaded pkl in compatibility mode.")
# Unpickling problems
except Exception as e:
if pkl_metadata and "version" in pkl_metadata:
if pkl_metadata["version"] != version:
fmlogger.error(
"""\
Attempted to open a results file generated by Nipype version %s, \
with an incompatible Nipype version (%s)""",
pkl_metadata["version"],
version,
)
raise e
fmlogger.warning(
"""\
No metadata was found in the pkl file. Make sure you are currently using \
the same Nipype version from the generated pkl."""
)
raise e
if unpkl is None:
raise ValueError("Loading %s resulted in None." % infile)
return unpkl
def crash2txt(filename, record):
"""Write out plain text crash file"""
with open(filename, "w") as fp:
if "node" in record:
node = record["node"]
fp.write("Node: {}\n".format(node.fullname))
fp.write("Working directory: {}\n".format(node.output_dir()))
fp.write("\n")
fp.write("Node inputs:\n{}\n".format(node.inputs))
fp.write("".join(record["traceback"]))
def read_stream(stream, logger=None, encoding=None):
"""
Robustly reads a stream, sending a warning to a logger
if some decoding error was raised.
>>> read_stream(bytearray([65, 0xc7, 65, 10, 66])) # doctest: +ELLIPSIS
['A...A', 'B']
"""
default_encoding = encoding or locale.getdefaultlocale()[1] or "UTF-8"
logger = logger or fmlogger
try:
out = stream.decode(default_encoding)
except UnicodeDecodeError as err:
out = stream.decode(default_encoding, errors="replace")
logger.warning("Error decoding string: %s", err)
return out.splitlines()
def savepkl(filename, record, versioning=False):
from io import BytesIO
with BytesIO() as f:
if versioning:
metadata = json.dumps({"version": version})
f.write(metadata.encode("utf-8"))
f.write("\n".encode("utf-8"))
pickle.dump(record, f)
content = f.getvalue()
pkl_open = gzip.open if filename.endswith(".pklz") else open
tmpfile = filename + ".tmp"
with pkl_open(tmpfile, "wb") as pkl_file:
pkl_file.write(content)
os.rename(tmpfile, filename)
rst_levels = ["=", "-", "~", "+"]
def write_rst_header(header, level=0):
return "\n".join((header, "".join([rst_levels[level] for _ in header]))) + "\n\n"
def write_rst_list(items, prefix=""):
out = []
for item in ensure_list(items):
out.append("{} {}".format(prefix, str(item)))
return "\n".join(out) + "\n\n"
def write_rst_dict(info, prefix=""):
out = []
for key, value in sorted(info.items()):
out.append("{}* {} : {}".format(prefix, key, str(value)))
return "\n".join(out) + "\n\n"
def dist_is_editable(dist):
"""Is distribution an editable install?
Parameters
----------
dist : string
Package name
# Borrowed from `pip`'s' API
"""
for path_item in sys.path:
egg_link = op.join(path_item, dist + ".egg-link")
if op.isfile(egg_link):
return True
return False
def emptydirs(path, noexist_ok=False):
"""
Empty an existing directory, without deleting it. Do not
raise error if the path does not exist and noexist_ok is True.
Parameters
----------
path : directory that should be empty
"""
fmlogger.debug("Removing contents of %s", path)
if noexist_ok and not op.exists(path):
return True
if op.isfile(path):
raise OSError('path "%s" should be a directory' % path)
try:
shutil.rmtree(path)
except OSError as ex:
elcont = [
Path(root) / file
for root, _, files in os.walk(path)
for file in files
if not file.startswith(".nfs")
]
if ex.errno in [errno.ENOTEMPTY, errno.EBUSY] and not elcont:
fmlogger.warning(
"An exception was raised trying to remove old %s, but the path"
" seems empty. Is it an NFS mount?. Passing the exception.",
path,
)
elif ex.errno == errno.ENOTEMPTY and elcont:
fmlogger.debug("Folder %s contents (%d items).", path, len(elcont))
raise ex
else:
raise ex
os.makedirs(path, exist_ok=True)
def silentrm(filename):
"""
Equivalent to ``rm -f``, returns ``False`` if the file did not
exist.
Parameters
----------
filename : str
file to be deleted
"""
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return False
return True
def which(cmd, env=None, pathext=None):
"""
Return the path to an executable which would be run if the given
cmd was called. If no cmd would be called, return ``None``.
Code for Python < 3.3 is based on a code snippet from
http://orip.org/2009/08/python-checking-if-executable-exists-in.html
"""
if pathext is None:
pathext = os.getenv("PATHEXT", "").split(os.pathsep)
pathext.insert(0, "")
path = os.getenv("PATH", os.defpath)
if env and "PATH" in env:
path = env.get("PATH")
for ext in pathext:
filename = shutil.which(cmd + ext, path=path)
if filename:
return filename
return None
def get_dependencies(name, environ):
"""Return library dependencies of a dynamically linked executable
Uses otool on darwin, ldd on linux. Currently doesn't support windows.
"""
command = None
if sys.platform == "darwin":
command = "otool -L `which %s`" % name
elif "linux" in sys.platform:
command = "ldd `which %s`" % name
else:
return "Platform %s not supported" % sys.platform
deps = None
try:
proc = sp.Popen(
command, stdout=sp.PIPE, stderr=sp.PIPE, shell=True, env=environ
)
o, e = proc.communicate()
deps = o.rstrip()
except Exception as ex:
deps = f"{command!r} failed"
fmlogger.warning(f"Could not get dependencies of {name}s. Error:\n{ex}")
return deps
def canonicalize_env(env):
"""Windows requires that environment be dicts with str as keys and values
This function converts any unicode entries for Windows only, returning the
dictionary untouched in other environments.
Parameters
----------
env : dict
environment dictionary with unicode or bytes keys and values
Returns
-------
env : dict
Windows: environment dictionary with str keys and values
Other: untouched input ``env``
"""
if os.name != "nt":
return env
out_env = {}
for key, val in env.items():
if not isinstance(key, str):
key = key.decode("utf-8")
if not isinstance(val, str):
val = val.decode("utf-8")
out_env[key] = val
return out_env
def relpath(path, start=None):
"""Return a relative version of a path"""
try:
return op.relpath(path, start)
except AttributeError:
pass
if start is None:
start = os.curdir
if not path:
raise ValueError("no path specified")
start_list = op.abspath(start).split(op.sep)
path_list = op.abspath(path).split(op.sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = op.splitunc(path)
unc_start, rest = op.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError(
("Cannot mix UNC and non-UNC paths " "(%s and %s)") % (path, start)
)
else:
raise ValueError(
"path is on drive %s, start on drive %s" % (path_list[0], start_list[0])
)
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [op.pardir] * (len(start_list) - i) + path_list[i:]
if not rel_list:
return os.curdir
return op.join(*rel_list)
@contextlib.contextmanager
def indirectory(path):
cwd = os.getcwd()
os.chdir(str(path))
try:
yield
finally:
os.chdir(cwd)
| []
| []
| [
"PATH",
"PATHEXT"
]
| [] | ["PATH", "PATHEXT"] | python | 2 | 0 | |
tools/test.py | import argparse
import mmcv
import os
import torch
import warnings
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet3d.apis import single_gpu_test
from mmdet3d.datasets import build_dataloader, build_dataset
from mmdet3d.models import build_model
from mmdet.apis import multi_gpu_test, set_random_seed
from mmdet.datasets import replace_ImageToTensor
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where results will be saved')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both specified, '
'--options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# set random seeds
if args.seed is not None:
set_random_seed(args.seed, deterministic=args.deterministic)
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_model(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
# palette for visualization in segmentation tasks
if 'PALETTE' in checkpoint.get('meta', {}):
model.PALETTE = checkpoint['meta']['PALETTE']
elif hasattr(dataset, 'PALETTE'):
# segmentation dataset has `PALETTE` attribute
model.PALETTE = dataset.PALETTE
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
| []
| []
| [
"LOCAL_RANK"
]
| [] | ["LOCAL_RANK"] | python | 1 | 0 | |
heron/spi/tests/java/com/twitter/heron/spi/common/ConfigLoaderTest.java | // Copyright 2016 Twitter. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.twitter.heron.spi.common;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.Set;
import java.util.TreeSet;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.eq;
import static org.mockito.Matchers.isNotNull;
import static org.mockito.Matchers.isNull;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
@RunWith(PowerMockRunner.class)
@PrepareForTest(ConfigLoader.class)
public class ConfigLoaderTest {
private static final String TEST_DATA_PATH =
"/__main__/heron/spi/tests/java/com/twitter/heron/spi/common/testdata";
private final String heronHome =
Paths.get(System.getenv("JAVA_RUNFILES"), TEST_DATA_PATH).toString();
private final String configPath = Paths.get(heronHome, "local").toString();
private Config basicConfig;
@Before
public void setUp() {
PowerMockito.spy(ConfigLoader.class);
basicConfig = Config.toLocalMode(ConfigLoader.loadConfig(
heronHome, configPath, "/release/file", "/override/file"));
}
@Test
public void testLoadClusterConfig() {
PowerMockito.spy(ConfigLoader.class);
Config config = Config.toClusterMode(ConfigLoader.loadConfig(
heronHome, configPath, "/release/file", "/override/file"));
assertConfig(config, "./heron-core", "./heron-conf");
}
@Test
public void testLoadDefaultConfig() {
assertConfig(basicConfig, heronHome, configPath);
assertKeyValue(basicConfig, Key.PACKING_CLASS,
"com.twitter.heron.packing.roundrobin.RoundRobinPacking");
assertKeyValue(basicConfig, Key.SCHEDULER_CLASS,
"com.twitter.heron.scheduler.local.LocalScheduler");
assertKeyValue(basicConfig, Key.LAUNCHER_CLASS,
"com.twitter.heron.scheduler.local.LocalLauncher");
assertKeyValue(basicConfig, Key.STATE_MANAGER_CLASS,
"com.twitter.heron.state.localfile.LocalFileStateManager");
assertKeyValue(basicConfig, Key.UPLOADER_CLASS,
"com.twitter.heron.uploader.localfs.FileSystemUploader");
}
private static void assertConfig(Config config,
String heronHome,
String heronConfigPath) {
// assert that the config filenames passed to loadConfig are never null. If they are, the
// configs defaults are not producing the config files.
PowerMockito.verifyStatic(times(10));
ConfigLoader.loadConfig(isNotNull(String.class));
PowerMockito.verifyStatic(never());
ConfigLoader.loadConfig(isNull(String.class));
// addFromFile with an empty map means that the config file was not found. Of the 9 files that
// are attempted to be loaded, all but 3 should be found (clientConfig, overrideConfigFile and
// releaseFile do not exist)
PowerMockito.verifyStatic(times(3));
ConfigLoader.addFromFile(eq(new HashMap<String, Object>()));
Set<String> tokenizedValues = new TreeSet<>();
for (Key key : Key.values()) {
if (key.getType() == Key.Type.STRING) {
String value = config.getStringValue(key);
// assert all tokens got replaced, except JAVA_HOME which might not be set on CI hosts
if (value != null && value.contains("${") && !value.contains("${JAVA_HOME}")) {
tokenizedValues.add(value);
}
}
}
assertTrue("Default config values have not all had tokens replaced: " + tokenizedValues,
tokenizedValues.isEmpty());
assertKeyValue(config, Key.HERON_HOME, heronHome);
assertKeyValue(config, Key.HERON_CONF, heronConfigPath);
assertKeyValue(config, Key.HERON_BIN, heronHome + "/bin");
assertKeyValue(config, Key.HERON_DIST, heronHome + "/dist");
assertKeyValue(config, Key.HERON_LIB, heronHome + "/lib");
assertKeyValue(config, Key.HERON_ETC, heronHome + "/etc");
assertKeyValue(config, Key.CLUSTER_YAML, heronConfigPath + "/cluster.yaml");
assertKeyValue(config, Key.CLIENT_YAML, heronConfigPath + "/client.yaml");
assertKeyValue(config, Key.METRICS_YAML, heronConfigPath + "/metrics_sinks.yaml");
assertKeyValue(config, Key.PACKING_YAML, heronConfigPath + "/packing.yaml");
assertKeyValue(config, Key.SCHEDULER_YAML, heronConfigPath + "/scheduler.yaml");
assertKeyValue(config, Key.STATEMGR_YAML, heronConfigPath + "/statemgr.yaml");
assertKeyValue(config, Key.SYSTEM_YAML, heronConfigPath + "/heron_internals.yaml");
assertKeyValue(config, Key.UPLOADER_YAML, heronConfigPath + "/uploader.yaml");
String binPath = config.getStringValue(Key.HERON_BIN);
assertKeyValue(config, Key.EXECUTOR_BINARY, binPath + "/heron-executor");
assertKeyValue(config, Key.STMGR_BINARY, binPath + "/heron-stmgr");
assertKeyValue(config, Key.TMASTER_BINARY, binPath + "/heron-tmaster");
assertKeyValue(config, Key.SHELL_BINARY, binPath + "/heron-shell");
assertKeyValue(config, Key.PYTHON_INSTANCE_BINARY, binPath + "/heron-python-instance");
assertKeyValue(config, Key.CPP_INSTANCE_BINARY, binPath + "/heron-cpp-instance");
String libPath = config.getStringValue(Key.HERON_LIB);
assertKeyValue(config, Key.SCHEDULER_JAR, libPath + "/scheduler/heron-scheduler.jar");
assertKeyValue(config, Key.INSTANCE_CLASSPATH, libPath + "/instance/*");
assertKeyValue(config, Key.METRICSMGR_CLASSPATH, libPath + "/metricsmgr/*");
assertKeyValue(config, Key.PACKING_CLASSPATH, libPath + "/packing/*");
assertKeyValue(config, Key.SCHEDULER_CLASSPATH, libPath + "/scheduler/*");
assertKeyValue(config, Key.STATEMGR_CLASSPATH, libPath + "/statemgr/*");
assertKeyValue(config, Key.UPLOADER_CLASSPATH, libPath + "/uploader/*");
}
private static void assertKeyValue(Config config, Key key, String expected) {
assertEquals("Unexpected value for key " + key, expected, config.get(key));
}
/**
* Test reading the cluster.yaml file
*/
@Test
public void testClusterFile() throws Exception {
Config props = ConfigLoader.loadConfig(Context.clusterFile(basicConfig));
assertEquals(4, props.size());
assertEquals(
"com.twitter.heron.uploader.localfs.FileSystemUploader",
Context.uploaderClass(props)
);
}
@Test
public void testSchedulerFile() throws Exception {
Config props = ConfigLoader.loadConfig(Context.schedulerFile(basicConfig));
assertEquals(2, props.size());
assertEquals(
"com.twitter.heron.scheduler.local.LocalScheduler",
Context.schedulerClass(props)
);
assertEquals(
"com.twitter.heron.scheduler.local.LocalLauncher",
Context.launcherClass(props)
);
}
@Test
public void testPackingFile() throws Exception {
Config props = ConfigLoader.loadConfig(Context.packingFile(basicConfig));
assertEquals(1, props.size());
assertEquals(
"com.twitter.heron.packing.roundrobin.RoundRobinPacking",
props.getStringValue("heron.class.packing.algorithm")
);
}
@Test
public void testUploaderFile() throws Exception {
Config props = ConfigLoader.loadConfig(Context.uploaderFile(basicConfig));
assertEquals(2, props.size());
assertEquals(
"/vagrant/heron/jobs",
props.getStringValue("heron.uploader.file.system.path")
);
}
}
| [
"\"JAVA_RUNFILES\""
]
| []
| [
"JAVA_RUNFILES"
]
| [] | ["JAVA_RUNFILES"] | java | 1 | 0 | |
Three/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Three.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/sample-gitops/main.go | package main
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"os"
"time"
"github.com/fluxcd/go-git-providers/github"
"github.com/fluxcd/go-git-providers/gitprovider"
"github.com/labstack/echo"
homedir "github.com/mitchellh/go-homedir"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/weaveworks/libgitops/cmd/common"
"github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme"
"github.com/weaveworks/libgitops/pkg/gitdir"
"github.com/weaveworks/libgitops/pkg/logs"
"github.com/weaveworks/libgitops/pkg/storage"
"github.com/weaveworks/libgitops/pkg/storage/transaction"
githubpr "github.com/weaveworks/libgitops/pkg/storage/transaction/pullrequest/github"
"github.com/weaveworks/libgitops/pkg/storage/watch"
"github.com/weaveworks/libgitops/pkg/storage/watch/update"
)
var (
identityFlag = pflag.String("identity-file", "", "Path to where the SSH private key is")
authorNameFlag = pflag.String("author-name", defaultAuthorName, "Author name for Git commits")
authorEmailFlag = pflag.String("author-email", defaultAuthorEmail, "Author email for Git commits")
gitURLFlag = pflag.String("git-url", "", "HTTPS Git URL; where the Git repository is, e.g. https://github.com/luxas/ignite-gitops")
prAssigneeFlag = pflag.StringSlice("pr-assignees", nil, "What user logins to assign for the created PR. The user must have pull access to the repo.")
prMilestoneFlag = pflag.String("pr-milestone", "", "What milestone to tag the PR with")
)
const (
sshKnownHostsFile = "~/.ssh/known_hosts"
defaultAuthorName = "Weave libgitops"
defaultAuthorEmail = "[email protected]"
)
func main() {
// Parse the version flag
common.ParseVersionFlag()
// Run the application
if err := run(*identityFlag, *gitURLFlag, os.Getenv("GITHUB_TOKEN"), *authorNameFlag, *authorEmailFlag); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func expandAndRead(filePath string) ([]byte, error) {
expandedPath, err := homedir.Expand(filePath)
if err != nil {
return nil, err
}
return ioutil.ReadFile(expandedPath)
}
func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error {
// Validate parameters
if len(identityFile) == 0 {
return fmt.Errorf("--identity-file is required")
}
if len(gitURL) == 0 {
return fmt.Errorf("--git-url is required")
}
if len(ghToken) == 0 {
return fmt.Errorf("--github-token is required")
}
if len(authorName) == 0 {
return fmt.Errorf("--author-name is required")
}
if len(authorEmail) == 0 {
return fmt.Errorf("--author-email is required")
}
// Read the identity and known_hosts files
identityContent, err := expandAndRead(identityFile)
if err != nil {
return err
}
knownHostsContent, err := expandAndRead(sshKnownHostsFile)
if err != nil {
return err
}
// Parse the HTTPS clone URL
repoRef, err := gitprovider.ParseOrgRepositoryURL(gitURL)
if err != nil {
return err
}
// Create a new GitHub client using the given token
ghClient, err := github.NewClient(github.WithOAuth2Token(ghToken))
if err != nil {
return err
}
// Authenticate to the GitDirectory using Git SSH
authMethod, err := gitdir.NewSSHAuthMethod(identityContent, knownHostsContent)
if err != nil {
return err
}
// Construct the GitDirectory implementation which backs the storage
gitDir, err := gitdir.NewGitDirectory(repoRef, gitdir.GitDirectoryOptions{
Branch: "master",
Interval: 10 * time.Second,
AuthMethod: authMethod,
})
if err != nil {
return err
}
// Create a new PR provider for the GitStorage
prProvider, err := githubpr.NewGitHubPRProvider(ghClient)
if err != nil {
return err
}
// Create a new GitStorage using the GitDirectory, PR provider, and Serializer
gitStorage, err := transaction.NewGitStorage(gitDir, prProvider, scheme.Serializer)
if err != nil {
return err
}
// Set the log level
logs.Logger.SetLevel(logrus.InfoLevel)
watchStorage, err := watch.NewManifestStorage(gitDir.Dir(), scheme.Serializer)
if err != nil {
return err
}
defer func() { _ = watchStorage.Close() }()
updates := make(chan update.Update, 4096)
watchStorage.SetUpdateStream(updates)
go func() {
for upd := range updates {
logrus.Infof("Got %s update for: %v %v", upd.Event, upd.PartialObject.GetObjectKind().GroupVersionKind(), upd.PartialObject.GetObjectMeta())
}
}()
e := common.NewEcho()
e.GET("/git/", func(c echo.Context) error {
objs, err := gitStorage.List(storage.NewKindKey(common.CarGVK))
if err != nil {
return err
}
return c.JSON(http.StatusOK, objs)
})
e.PUT("/git/:name", func(c echo.Context) error {
name := c.Param("name")
if len(name) == 0 {
return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
}
objKey := common.CarKeyForName(name)
err := gitStorage.Transaction(context.Background(), fmt.Sprintf("%s-update-", name), func(ctx context.Context, s storage.Storage) (transaction.CommitResult, error) {
// Update the status of the car
if err := common.SetNewCarStatus(s, objKey); err != nil {
return nil, err
}
return &transaction.GenericPullRequestResult{
CommitResult: &transaction.GenericCommitResult{
AuthorName: authorName,
AuthorEmail: authorEmail,
Title: "Update Car speed",
Description: "We really need to sync this state!",
},
Labels: []string{"user/bot", "actuator/libgitops", "kind/status-update"},
Assignees: *prAssigneeFlag,
Milestone: *prMilestoneFlag,
}, nil
})
if err != nil {
return err
}
return c.String(200, "OK!")
})
return common.StartEcho(e)
}
| [
"\"GITHUB_TOKEN\""
]
| []
| [
"GITHUB_TOKEN"
]
| [] | ["GITHUB_TOKEN"] | go | 1 | 0 | |
pkg/cmd/login/login.go | // Copyright 2020 The Okteto Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package login
import (
"context"
"fmt"
"net/http"
"os"
"time"
"github.com/okteto/okteto/pkg/log"
"github.com/okteto/okteto/pkg/model"
"github.com/okteto/okteto/pkg/okteto"
"github.com/skratchdot/open-golang/open"
)
// WithEnvVarIfAvailable authenticates the user with OKTETO_TOKEN value
func WithEnvVarIfAvailable(ctx context.Context) error {
oktetoToken := os.Getenv("OKTETO_TOKEN")
if oktetoToken == "" {
return nil
}
if u, err := okteto.GetToken(); err == nil {
if u.Token == oktetoToken {
return nil
}
}
oktetoURL := os.Getenv("OKTETO_URL")
if oktetoURL == "" {
oktetoURL = okteto.CloudURL
}
if _, err := WithToken(ctx, oktetoURL, oktetoToken); err != nil {
return fmt.Errorf("error executing auto-login with 'OKTETO_TOKEN': %s", err)
}
return nil
}
// WithToken authenticates the user with an API token
func WithToken(ctx context.Context, url, token string) (*okteto.User, error) {
return okteto.AuthWithToken(ctx, url, token)
}
//WithBrowser authenticates the user with the brower
func WithBrowser(ctx context.Context, oktetoURL string) (*okteto.User, error) {
h, err := StartWithBrowser(ctx, oktetoURL)
if err != nil {
log.Infof("couldn't start the login process: %s", err)
return nil, fmt.Errorf("couldn't start the login process, please try again")
}
authorizationURL := h.AuthorizationURL()
fmt.Println("Authentication will continue in your default browser")
if err := open.Start(authorizationURL); err != nil {
log.Errorf("Something went wrong opening your browser: %s\n", err)
}
fmt.Printf("You can also open a browser and navigate to the following address:\n")
fmt.Println(authorizationURL)
return EndWithBrowser(ctx, h)
}
// StartWithBrowser starts the authentication of the user with the IDP via a browser
func StartWithBrowser(ctx context.Context, url string) (*Handler, error) {
state, err := randToken()
if err != nil {
log.Infof("couldn't generate random token: %s", err)
return nil, fmt.Errorf("couldn't generate a random token, please try again")
}
port, err := model.GetAvailablePort()
if err != nil {
log.Infof("couldn't access the network: %s", err)
return nil, fmt.Errorf("couldn't access the network")
}
handler := &Handler{
baseURL: url,
port: port,
ctx: context.Background(),
state: state,
errChan: make(chan error, 2),
response: make(chan string, 2),
}
return handler, nil
}
// EndWithBrowser finishes the browser based auth
func EndWithBrowser(ctx context.Context, h *Handler) (*okteto.User, error) {
go func() {
http.Handle("/authorization-code/callback", h.handle())
h.errChan <- http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", h.port), nil)
}()
ticker := time.NewTicker(5 * time.Minute)
var code string
select {
case <-ticker.C:
h.ctx.Done()
return nil, fmt.Errorf("authentication timeout")
case code = <-h.response:
break
case e := <-h.errChan:
h.ctx.Done()
return nil, e
}
return okteto.Auth(ctx, code, h.baseURL)
}
| [
"\"OKTETO_TOKEN\"",
"\"OKTETO_URL\""
]
| []
| [
"OKTETO_TOKEN",
"OKTETO_URL"
]
| [] | ["OKTETO_TOKEN", "OKTETO_URL"] | go | 2 | 0 | |
api.go | // Package api starts the api server.
package api
import (
"encoding/json"
"flag"
"fmt"
"log"
"net/http"
"os"
"time"
"github.com/uttarasridhar/api/postgres"
"github.com/uttarasridhar/api/server"
"github.com/uttarasridhar/api/tweets"
"github.com/gorilla/mux"
)
// Run starts the server.
func Run() error {
addr := flag.String("addr", ":8080", "port to listen on")
flag.Parse()
secret := struct {
Username string `json:"username"`
Password string `json:"password"`
}{}
if err := json.Unmarshal([]byte(os.Getenv("RDS_SECRET")), &secret); err != nil {
return fmt.Errorf("api: unmarshal rds secret: %v", err)
}
conn, close, err := postgres.Connect(
os.Getenv("RDS_ENDPOINT"),
postgres.Port,
secret.Username,
secret.Password,
os.Getenv("DB_NAME"),
os.Getenv("DB_SSL_MODE"),
)
if err != nil {
return fmt.Errorf("api: connect to postgres db: %v", err)
}
defer close()
if err := tweets.CreateTweetsTableIfNotExist(conn); err != nil {
return fmt.Errorf("api: create table: %v", err)
}
if err := tweets.CreateEmojisTableIfNotExist(conn); err != nil {
return fmt.Errorf("api: create table: %v", err)
}
db := tweets.NewSQLDB(conn)
tweets := tweets.NewTweetsController(db)
emojis := tweets.NewEmojisController(db)
s := http.Server{
Addr: *addr,
Handler: &server.Server{
Router: mux.NewRouter(),
Tweets: tweets,
Emojis: emojis,
},
WriteTimeout: time.Second * 15,
ReadTimeout: time.Second * 15,
IdleTimeout: time.Second * 60,
}
log.Printf("listen on port %s\n", *addr)
return s.ListenAndServe()
}
| [
"\"RDS_SECRET\"",
"\"RDS_ENDPOINT\"",
"\"DB_NAME\"",
"\"DB_SSL_MODE\""
]
| []
| [
"DB_SSL_MODE",
"DB_NAME",
"RDS_ENDPOINT",
"RDS_SECRET"
]
| [] | ["DB_SSL_MODE", "DB_NAME", "RDS_ENDPOINT", "RDS_SECRET"] | go | 4 | 0 | |
integration_tests/test_suites/celery-k8s-integration-test-suite/test_integration.py | # pylint doesn't know about pytest fixtures
# pylint: disable=unused-argument
import datetime
import os
import time
import boto3
import pytest
from dagster import DagsterEventType
from dagster.core.storage.pipeline_run import PipelineRunStatus
from dagster.core.storage.tags import DOCKER_IMAGE_TAG
from dagster.core.test_utils import create_run_for_test
from dagster.utils import merge_dicts
from dagster.utils.yaml_utils import merge_yamls
from dagster_celery_k8s.launcher import CeleryK8sRunLauncher
from dagster_k8s.test import wait_for_job_and_get_raw_logs
from dagster_k8s_test_infra.helm import TEST_AWS_CONFIGMAP_NAME
from dagster_k8s_test_infra.integration_utils import image_pull_policy
from dagster_test.test_project import (
ReOriginatedExternalPipelineForTest,
get_test_project_environments_path,
get_test_project_workspace_and_external_pipeline,
)
IS_BUILDKITE = os.getenv("BUILDKITE") is not None
def get_celery_engine_config(dagster_docker_image, job_namespace):
return {
"execution": {
"celery-k8s": {
"config": merge_dicts(
(
{
"job_image": dagster_docker_image,
}
if dagster_docker_image
else {}
),
{
"job_namespace": job_namespace,
"image_pull_policy": image_pull_policy(),
"env_config_maps": ["dagster-pipeline-env"]
+ ([TEST_AWS_CONFIGMAP_NAME] if not IS_BUILDKITE else []),
},
)
}
},
}
def test_execute_on_celery_k8s_default( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
pipeline_name = "demo_pipeline_celery"
with get_test_project_workspace_and_external_pipeline(dagster_instance, pipeline_name) as (
workspace,
external_pipeline,
):
reoriginated_pipeline = ReOriginatedExternalPipelineForTest(external_pipeline)
run = create_run_for_test(
dagster_instance,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
external_pipeline_origin=reoriginated_pipeline.get_external_origin(),
pipeline_code_origin=reoriginated_pipeline.get_python_origin(),
)
dagster_instance.launch_run(run.run_id, workspace)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
updated_run = dagster_instance.get_run_by_id(run.run_id)
assert updated_run.tags[DOCKER_IMAGE_TAG] == dagster_docker_image
def test_execute_on_celery_k8s_image_from_origin( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
# Like the previous test, but the image is included in the pipeline origin
# rather than in the executor config
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(dagster_docker_image=None, job_namespace=helm_namespace),
)
pipeline_name = "demo_pipeline_celery"
with get_test_project_workspace_and_external_pipeline(
dagster_instance, pipeline_name, container_image=dagster_docker_image
) as (workspace, external_pipeline):
reoriginated_pipeline = ReOriginatedExternalPipelineForTest(
external_pipeline, container_image=dagster_docker_image
)
run = create_run_for_test(
dagster_instance,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
external_pipeline_origin=reoriginated_pipeline.get_external_origin(),
pipeline_code_origin=reoriginated_pipeline.get_python_origin(),
)
dagster_instance.launch_run(run.run_id, workspace)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
updated_run = dagster_instance.get_run_by_id(run.run_id)
assert updated_run.tags[DOCKER_IMAGE_TAG] == dagster_docker_image
def test_execute_subset_on_celery_k8s( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_subset.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
pipeline_name = "demo_pipeline_celery"
with get_test_project_workspace_and_external_pipeline(dagster_instance, pipeline_name) as (
workspace,
external_pipeline,
):
reoriginated_pipeline = ReOriginatedExternalPipelineForTest(external_pipeline)
run = create_run_for_test(
dagster_instance,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
solids_to_execute={"count_letters"},
external_pipeline_origin=reoriginated_pipeline.get_external_origin(),
pipeline_code_origin=reoriginated_pipeline.get_python_origin(),
)
dagster_instance.launch_run(run.run_id, workspace)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
def test_execute_on_celery_k8s_retry_pipeline( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
run_config = merge_dicts(
merge_yamls([os.path.join(get_test_project_environments_path(), "env_s3.yaml")]),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
pipeline_name = "retry_pipeline"
with get_test_project_workspace_and_external_pipeline(dagster_instance, pipeline_name) as (
workspace,
external_pipeline,
):
reoriginated_pipeline = ReOriginatedExternalPipelineForTest(external_pipeline)
run = create_run_for_test(
dagster_instance,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
external_pipeline_origin=reoriginated_pipeline.get_external_origin(),
pipeline_code_origin=reoriginated_pipeline.get_python_origin(),
)
dagster_instance.launch_run(run.run_id, workspace)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
stats = dagster_instance.get_run_stats(run.run_id)
assert stats.steps_succeeded == 1
assert DagsterEventType.STEP_START in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run.run_id)
if event.is_dagster_event
]
assert DagsterEventType.STEP_UP_FOR_RETRY in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run.run_id)
if event.is_dagster_event
]
assert DagsterEventType.STEP_RESTARTED in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run.run_id)
if event.is_dagster_event
]
assert DagsterEventType.STEP_SUCCESS in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run.run_id)
if event.is_dagster_event
]
def test_execute_on_celery_k8s_with_resource_requirements( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
pipeline_name = "resources_limit_pipeline"
with get_test_project_workspace_and_external_pipeline(dagster_instance, pipeline_name) as (
workspace,
external_pipeline,
):
reoriginated_pipeline = ReOriginatedExternalPipelineForTest(external_pipeline)
run = create_run_for_test(
dagster_instance,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
external_pipeline_origin=reoriginated_pipeline.get_external_origin(),
pipeline_code_origin=reoriginated_pipeline.get_python_origin(),
)
dagster_instance.launch_run(run.run_id, workspace)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
def _test_termination(dagster_instance, run_config):
pipeline_name = "resource_pipeline"
with get_test_project_workspace_and_external_pipeline(dagster_instance, pipeline_name) as (
workspace,
external_pipeline,
):
reoriginated_pipeline = ReOriginatedExternalPipelineForTest(external_pipeline)
run = create_run_for_test(
dagster_instance,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
external_pipeline_origin=reoriginated_pipeline.get_external_origin(),
pipeline_code_origin=reoriginated_pipeline.get_python_origin(),
)
dagster_instance.launch_run(run.run_id, workspace)
assert isinstance(dagster_instance.run_launcher, CeleryK8sRunLauncher)
# Wait for pipeline run to start
timeout = datetime.timedelta(0, 120)
start_time = datetime.datetime.now()
can_terminate = False
while datetime.datetime.now() < start_time + timeout:
if dagster_instance.run_launcher.can_terminate(run_id=run.run_id):
can_terminate = True
break
time.sleep(5)
assert can_terminate
# Wait for step to start
step_start_found = False
start_time = datetime.datetime.now()
while datetime.datetime.now() < start_time + timeout:
event_records = dagster_instance.all_logs(run.run_id)
for event_record in event_records:
if (
event_record.dagster_event
and event_record.dagster_event.event_type == DagsterEventType.STEP_START
):
step_start_found = True
break
if step_start_found:
break
time.sleep(5)
assert step_start_found
# Terminate run
assert dagster_instance.run_launcher.can_terminate(run_id=run.run_id)
assert dagster_instance.run_launcher.terminate(run_id=run.run_id)
# Check that pipeline run is marked as canceled
pipeline_run_status_canceled = False
start_time = datetime.datetime.now()
while datetime.datetime.now() < start_time + timeout:
pipeline_run = dagster_instance.get_run_by_id(run.run_id)
if pipeline_run.status == PipelineRunStatus.CANCELED:
pipeline_run_status_canceled = True
break
time.sleep(5)
assert pipeline_run_status_canceled
# Check that terminate cannot be called again
assert not dagster_instance.run_launcher.can_terminate(run_id=run.run_id)
assert not dagster_instance.run_launcher.terminate(run_id=run.run_id)
# Check for step failure and resource tear down
expected_events_found = False
start_time = datetime.datetime.now()
while datetime.datetime.now() < start_time + timeout:
step_failures_count = 0
resource_tear_down_count = 0
resource_init_count = 0
termination_request_count = 0
termination_success_count = 0
event_records = dagster_instance.all_logs(run.run_id)
for event_record in event_records:
if event_record.dagster_event:
if event_record.dagster_event.event_type == DagsterEventType.STEP_FAILURE:
step_failures_count += 1
elif (
event_record.dagster_event.event_type == DagsterEventType.PIPELINE_CANCELING
):
termination_request_count += 1
elif (
event_record.dagster_event.event_type == DagsterEventType.PIPELINE_CANCELED
):
termination_success_count += 1
elif event_record.message:
if "initializing s3_resource_with_context_manager" in event_record.message:
resource_init_count += 1
if "tearing down s3_resource_with_context_manager" in event_record.message:
resource_tear_down_count += 1
if (
step_failures_count == 1
and resource_init_count == 1
and resource_tear_down_count == 1
and termination_request_count == 1
and termination_success_count == 1
):
expected_events_found = True
break
time.sleep(5)
assert expected_events_found
s3 = boto3.resource(
"s3", region_name="us-west-1", use_ssl=True, endpoint_url=None
).meta.client
bucket = "dagster-scratch-80542c2"
key = "resource_termination_test/{}".format(run.run_id)
assert s3.get_object(Bucket=bucket, Key=key)
def test_execute_on_celery_k8s_with_termination( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
_test_termination(dagster_instance, run_config)
@pytest.fixture(scope="function")
def set_dagster_k8s_pipeline_run_namespace_env(helm_namespace):
try:
old_value = os.getenv("DAGSTER_K8S_PIPELINE_RUN_NAMESPACE")
os.environ["DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"] = helm_namespace
yield
finally:
if old_value is not None:
os.environ["DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"] = old_value
def test_execute_on_celery_k8s_with_env_var_and_termination( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, set_dagster_k8s_pipeline_run_namespace_env
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image,
job_namespace={"env": "DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"},
),
)
_test_termination(dagster_instance, run_config)
def test_execute_on_celery_k8s_with_hard_failure( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, set_dagster_k8s_pipeline_run_namespace_env
):
run_config = merge_dicts(
merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image,
job_namespace={"env": "DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"},
),
),
{"solids": {"hard_fail_or_0": {"config": {"fail": True}}}},
)
pipeline_name = "hard_failer"
with get_test_project_workspace_and_external_pipeline(dagster_instance, pipeline_name) as (
workspace,
external_pipeline,
):
reoriginated_pipeline = ReOriginatedExternalPipelineForTest(external_pipeline)
run = create_run_for_test(
dagster_instance,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
external_pipeline_origin=reoriginated_pipeline.get_external_origin(),
pipeline_code_origin=reoriginated_pipeline.get_python_origin(),
)
dagster_instance.launch_run(run.run_id, workspace)
assert isinstance(dagster_instance.run_launcher, CeleryK8sRunLauncher)
# Check that pipeline run is marked as failed
pipeline_run_status_failure = False
start_time = datetime.datetime.now()
timeout = datetime.timedelta(0, 120)
while datetime.datetime.now() < start_time + timeout:
pipeline_run = dagster_instance.get_run_by_id(run.run_id)
if pipeline_run.status == PipelineRunStatus.FAILURE:
pipeline_run_status_failure = True
break
time.sleep(5)
assert pipeline_run_status_failure
# Check for step failure for hard_fail_or_0.compute
start_time = datetime.datetime.now()
step_failure_found = False
while datetime.datetime.now() < start_time + timeout:
event_records = dagster_instance.all_logs(run.run_id)
for event_record in event_records:
if event_record.dagster_event:
if (
event_record.dagster_event.event_type == DagsterEventType.STEP_FAILURE
and event_record.dagster_event.step_key == "hard_fail_or_0"
):
step_failure_found = True
break
time.sleep(5)
assert step_failure_found
| []
| []
| [
"DAGSTER_K8S_PIPELINE_RUN_NAMESPACE",
"BUILDKITE"
]
| [] | ["DAGSTER_K8S_PIPELINE_RUN_NAMESPACE", "BUILDKITE"] | python | 2 | 0 | |
core/src/test/java/google/registry/testing/GpgSystemCommandExtension.java | // Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.testing;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.base.Strings.isNullOrEmpty;
import static com.google.common.truth.Truth.assertWithMessage;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.io.ByteSource;
import com.google.common.io.CharStreams;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.file.Files;
import java.nio.file.attribute.PosixFilePermissions;
import java.util.Objects;
import org.junit.jupiter.api.extension.AfterEachCallback;
import org.junit.jupiter.api.extension.BeforeEachCallback;
import org.junit.jupiter.api.extension.ExtensionContext;
/**
* GnuPG system command JUnit extension.
*
* <p>This extension creates a isolated environment for running the {@code gpg} command inside
* system integration tests. It reduces a lot of the boilerplate of setting up the shell environment
* and importing your keyrings into a temporary config folder.
*/
public final class GpgSystemCommandExtension implements BeforeEachCallback, AfterEachCallback {
private static final File DEV_NULL = new File("/dev/null");
private static final String TEMP_FILE_PREFIX = "gpgtest";
private File cwd = DEV_NULL;
private File conf = DEV_NULL;
private String[] env = {};
private final ByteSource publicKeyring;
private final ByteSource privateKeyring;
private final Runtime runtime = Runtime.getRuntime();
/** Constructs a new {@link GpgSystemCommandExtension} instance. */
public GpgSystemCommandExtension(ByteSource publicKeyring, ByteSource privateKeyring) {
this.publicKeyring = checkNotNull(publicKeyring, "publicKeyring");
this.privateKeyring = checkNotNull(privateKeyring, "privateKeyring");
}
/** Returns the temporary directory from which commands are run. */
public File getCwd() {
checkState(!Objects.equals(cwd, DEV_NULL));
return cwd;
}
/** Returns the temporary directory in which GnuPG configs are stored. */
public File getConf() {
checkState(!Objects.equals(conf, DEV_NULL));
return conf;
}
/**
* Runs specified system command and arguments within the GPG testing environment.
*
* @see Runtime#exec(String[])
*/
public final Process exec(String... args) throws IOException {
checkState(!Objects.equals(cwd, DEV_NULL));
checkArgument(args.length > 0, "args");
return runtime.exec(args, env, cwd);
}
@Override
public void beforeEach(ExtensionContext context) throws IOException, InterruptedException {
checkState(Objects.equals(cwd, DEV_NULL));
String tmpRootDirString = System.getenv("TMPDIR");
// Create the working directory for the forked process on Temp file system. Create under the
// path specified by 'TMPDIR' envrionment variable if defined, otherwise create under the
// runtime's default (typically /tmp).
cwd =
isNullOrEmpty(tmpRootDirString)
? File.createTempFile(TEMP_FILE_PREFIX, "")
: File.createTempFile(TEMP_FILE_PREFIX, "", new File(tmpRootDirString));
cwd.delete();
cwd.mkdir();
conf = new File(cwd, ".gnupg");
conf.mkdir();
Files.setPosixFilePermissions(conf.toPath(), PosixFilePermissions.fromString("rwx------"));
env =
new String[] {
"PATH=" + System.getenv("PATH"), "GNUPGHOME=" + conf.getAbsolutePath(),
};
Process pid = exec("gpg", "--import");
publicKeyring.copyTo(pid.getOutputStream());
pid.getOutputStream().close();
int returnValue = pid.waitFor();
assertWithMessage(
String.format("Failed to import public keyring: \n%s", slurp(pid.getErrorStream())))
.that(returnValue)
.isEqualTo(0);
pid = exec("gpg", "--allow-secret-key-import", "--import");
privateKeyring.copyTo(pid.getOutputStream());
pid.getOutputStream().close();
returnValue = pid.waitFor();
assertWithMessage(
String.format("Failed to import private keyring: \n%s", slurp(pid.getErrorStream())))
.that(returnValue)
.isEqualTo(0);
}
@Override
public void afterEach(ExtensionContext context) {
// TODO(weiminyu): we should delete the cwd tree.
cwd = DEV_NULL;
conf = DEV_NULL;
}
private String slurp(InputStream is) throws IOException {
return CharStreams.toString(new InputStreamReader(is, UTF_8));
}
}
| [
"\"TMPDIR\"",
"\"PATH\""
]
| []
| [
"PATH",
"TMPDIR"
]
| [] | ["PATH", "TMPDIR"] | java | 2 | 0 | |
private/model/cli/gen-api/main.go | // +build codegen
// Command aws-gen-gocli parses a JSON description of an AWS API and generates a
// Go file containing a client for the API.
//
// aws-gen-gocli apis/s3/2006-03-03/api-2.json
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime/debug"
"sort"
"strings"
"sync"
"github.com/ks3sdk/ks3-go-sdk/private/model/api"
"github.com/ks3sdk/ks3-go-sdk/private/util"
)
type generateInfo struct {
*api.API
PackageDir string
}
var excludeServices = map[string]struct{}{
"importexport": {},
}
// newGenerateInfo initializes the service API's folder structure for a specific service.
// If the SERVICES environment variable is set, and this service is not apart of the list
// this service will be skipped.
func newGenerateInfo(modelFile, svcPath, svcImportPath string) *generateInfo {
g := &generateInfo{API: &api.API{SvcClientImportPath: svcImportPath, BaseCrosslinkURL: "https://docs.aws.amazon.com"}}
g.API.Attach(modelFile)
if _, ok := excludeServices[g.API.PackageName()]; ok {
return nil
}
paginatorsFile := strings.Replace(modelFile, "api-2.json", "paginators-1.json", -1)
if _, err := os.Stat(paginatorsFile); err == nil {
g.API.AttachPaginators(paginatorsFile)
} else if !os.IsNotExist(err) {
fmt.Println("api-2.json error:", err)
}
docsFile := strings.Replace(modelFile, "api-2.json", "docs-2.json", -1)
if _, err := os.Stat(docsFile); err == nil {
g.API.AttachDocs(docsFile)
} else {
fmt.Println("docs-2.json error:", err)
}
waitersFile := strings.Replace(modelFile, "api-2.json", "waiters-2.json", -1)
if _, err := os.Stat(waitersFile); err == nil {
g.API.AttachWaiters(waitersFile)
} else if !os.IsNotExist(err) {
fmt.Println("waiters-2.json error:", err)
}
examplesFile := strings.Replace(modelFile, "api-2.json", "examples-1.json", -1)
if _, err := os.Stat(examplesFile); err == nil {
g.API.AttachExamples(examplesFile)
} else if !os.IsNotExist(err) {
fmt.Println("examples-1.json error:", err)
}
// pkgDocAddonsFile := strings.Replace(modelFile, "api-2.json", "go-pkg-doc.gotmpl", -1)
// if _, err := os.Stat(pkgDocAddonsFile); err == nil {
// g.API.AttachPackageDocAddons(pkgDocAddonsFile)
// } else if !os.IsNotExist(err) {
// fmt.Println("go-pkg-doc.gotmpl error:", err)
// }
g.API.Setup()
if svc := os.Getenv("SERVICES"); svc != "" {
svcs := strings.Split(svc, ",")
included := false
for _, s := range svcs {
if s == g.API.PackageName() {
included = true
break
}
}
if !included {
// skip this non-included service
return nil
}
}
// ensure the directory exists
pkgDir := filepath.Join(svcPath, g.API.PackageName())
os.MkdirAll(pkgDir, 0775)
os.MkdirAll(filepath.Join(pkgDir, g.API.InterfacePackageName()), 0775)
g.PackageDir = pkgDir
return g
}
// Generates service api, examples, and interface from api json definition files.
//
// Flags:
// -path alternative service path to write generated files to for each service.
//
// Env:
// SERVICES comma separated list of services to generate.
func main() {
var svcPath, sessionPath, svcImportPath string
flag.StringVar(&svcPath, "path", "service", "directory to generate service clients in")
flag.StringVar(&sessionPath, "sessionPath", filepath.Join("aws", "session"), "generate session service client factories")
flag.StringVar(&svcImportPath, "svc-import-path", "github.com/ks3sdk/ks3-go-sdk/service", "namespace to generate service client Go code import path under")
flag.Parse()
api.Bootstrap()
files := []string{}
for i := 0; i < flag.NArg(); i++ {
file := flag.Arg(i)
if strings.Contains(file, "*") {
paths, _ := filepath.Glob(file)
files = append(files, paths...)
} else {
files = append(files, file)
}
}
for svcName := range excludeServices {
if strings.Contains(os.Getenv("SERVICES"), svcName) {
fmt.Printf("Service %s is not supported\n", svcName)
os.Exit(1)
}
}
sort.Strings(files)
// Remove old API versions from list
m := map[string]bool{}
for i := range files {
idx := len(files) - 1 - i
parts := strings.Split(files[idx], string(filepath.Separator))
svc := parts[len(parts)-3] // service name is 2nd-to-last component
if m[svc] {
files[idx] = "" // wipe this one out if we already saw the service
}
m[svc] = true
}
wg := sync.WaitGroup{}
for i := range files {
filename := files[i]
if filename == "" { // empty file
continue
}
genInfo := newGenerateInfo(filename, svcPath, svcImportPath)
if genInfo == nil {
continue
}
if _, ok := excludeServices[genInfo.API.PackageName()]; ok {
// Skip services not yet supported.
continue
}
wg.Add(1)
go func(g *generateInfo, filename string) {
defer wg.Done()
writeServiceFiles(g, filename)
}(genInfo, filename)
}
wg.Wait()
}
func writeServiceFiles(g *generateInfo, filename string) {
defer func() {
if r := recover(); r != nil {
fmt.Fprintf(os.Stderr, "Error generating %s\n%s\n%s\n",
filename, r, debug.Stack())
}
}()
fmt.Printf("Generating %s (%s)...\n",
g.API.PackageName(), g.API.Metadata.APIVersion)
// write files for service client and API
Must(writeServiceDocFile(g))
Must(writeAPIFile(g))
Must(writeServiceFile(g))
Must(writeInterfaceFile(g))
Must(writeWaitersFile(g))
Must(writeAPIErrorsFile(g))
Must(writeExamplesFile(g))
}
// Must will panic if the error passed in is not nil.
func Must(err error) {
if err != nil {
panic(err)
}
}
const codeLayout = `// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
%s
package %s
%s
`
func writeGoFile(file string, layout string, args ...interface{}) error {
return ioutil.WriteFile(file, []byte(util.GoFmt(fmt.Sprintf(layout, args...))), 0664)
}
// writeServiceDocFile generates the documentation for service package.
func writeServiceDocFile(g *generateInfo) error {
return writeGoFile(filepath.Join(g.PackageDir, "doc.go"),
codeLayout,
strings.TrimSpace(g.API.ServicePackageDoc()),
g.API.PackageName(),
"",
)
}
// writeExamplesFile writes out the service example file.
func writeExamplesFile(g *generateInfo) error {
code := g.API.ExamplesGoCode()
if len(code) > 0 {
return writeGoFile(filepath.Join(g.PackageDir, "examples_test.go"),
codeLayout,
"",
g.API.PackageName()+"_test",
code,
)
}
return nil
}
// writeServiceFile writes out the service initialization file.
func writeServiceFile(g *generateInfo) error {
return writeGoFile(filepath.Join(g.PackageDir, "service.go"),
codeLayout,
"",
g.API.PackageName(),
g.API.ServiceGoCode(),
)
}
// writeInterfaceFile writes out the service interface file.
func writeInterfaceFile(g *generateInfo) error {
const pkgDoc = `
// Package %s provides an interface to enable mocking the %s service client
// for testing your code.
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters.`
return writeGoFile(filepath.Join(g.PackageDir, g.API.InterfacePackageName(), "interface.go"),
codeLayout,
fmt.Sprintf(pkgDoc, g.API.InterfacePackageName(), g.API.Metadata.ServiceFullName),
g.API.InterfacePackageName(),
g.API.InterfaceGoCode(),
)
}
func writeWaitersFile(g *generateInfo) error {
if len(g.API.Waiters) == 0 {
return nil
}
return writeGoFile(filepath.Join(g.PackageDir, "waiters.go"),
codeLayout,
"",
g.API.PackageName(),
g.API.WaitersGoCode(),
)
}
// writeAPIFile writes out the service API file.
func writeAPIFile(g *generateInfo) error {
return writeGoFile(filepath.Join(g.PackageDir, "api.go"),
codeLayout,
"",
g.API.PackageName(),
g.API.APIGoCode(),
)
}
// writeAPIErrorsFile writes out the service API errors file.
func writeAPIErrorsFile(g *generateInfo) error {
return writeGoFile(filepath.Join(g.PackageDir, "errors.go"),
codeLayout,
"",
g.API.PackageName(),
g.API.APIErrorsGoCode(),
)
}
| [
"\"SERVICES\"",
"\"SERVICES\""
]
| []
| [
"SERVICES"
]
| [] | ["SERVICES"] | go | 1 | 0 | |
tfx/components/schema_gen/executor_test.py | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.schema_gen.executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tfx.components.schema_gen import executor
from tfx.types import standard_artifacts
from tfx.utils import io_utils
class ExecutorTest(tf.test.TestCase):
def setUp(self):
super(ExecutorTest, self).setUp()
self.source_data_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'testdata')
self.train_stats_artifact = standard_artifacts.ExampleStatistics(
split='train')
self.train_stats_artifact.uri = os.path.join(self.source_data_dir,
'statistics_gen/train/')
self.output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
self.schema_output = standard_artifacts.Schema()
self.schema_output.uri = os.path.join(self.output_data_dir, 'schema_output')
self.schema = standard_artifacts.Schema()
self.schema.uri = os.path.join(self.source_data_dir, 'fixed_schema/')
self.expected_schema = standard_artifacts.Schema()
self.expected_schema.uri = os.path.join(self.source_data_dir, 'schema_gen/')
self.input_dict = {
'stats': [self.train_stats_artifact],
'schema': None
}
self.output_dict = {
'output': [self.schema_output],
}
self.exec_properties = {'infer_feature_shape': False}
def _assertSchemaEqual(self, expected_schema, actual_schema):
schema_reader = io_utils.SchemaReader()
expected_schema_proto = schema_reader.read(
os.path.join(expected_schema.uri, executor._DEFAULT_FILE_NAME))
actual_schema_proto = schema_reader.read(
os.path.join(actual_schema.uri, executor._DEFAULT_FILE_NAME))
self.assertProtoEquals(expected_schema_proto, actual_schema_proto)
def testDoWithStatistics(self):
schema_gen_executor = executor.Executor()
schema_gen_executor.Do(self.input_dict, self.output_dict,
self.exec_properties)
self.assertNotEqual(0, len(tf.gfile.ListDirectory(self.schema_output.uri)))
self._assertSchemaEqual(self.expected_schema, self.schema_output)
def testDoWithSchema(self):
self.input_dict['schema'] = [self.schema]
self.input_dict.pop('stats')
schema_gen_executor = executor.Executor()
schema_gen_executor.Do(self.input_dict, self.output_dict,
self.exec_properties)
self.assertNotEqual(0, len(tf.gfile.ListDirectory(self.schema_output.uri)))
self._assertSchemaEqual(self.schema, self.schema_output)
def testDoWithNonExistentSchema(self):
non_existent_schema = standard_artifacts.Schema()
non_existent_schema.uri = '/path/to/non_existent/schema'
self.input_dict['schema'] = [non_existent_schema]
self.input_dict.pop('stats')
with self.assertRaises(ValueError):
schema_gen_executor = executor.Executor()
schema_gen_executor.Do(self.input_dict, self.output_dict,
self.exec_properties)
if __name__ == '__main__':
tf.test.main()
| []
| []
| [
"TEST_UNDECLARED_OUTPUTS_DIR"
]
| [] | ["TEST_UNDECLARED_OUTPUTS_DIR"] | python | 1 | 0 | |
Linkedin Connections Spammer.py | """
Adarsh Anand
Spamming people with connection requests -> 100 in a week
"""
import os
import time
import warnings
from dotenv import load_dotenv
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from webdriver_manager.chrome import ChromeDriverManager
load_dotenv()
warnings.filterwarnings("ignore", category=DeprecationWarning)
URL = "https://www.linkedin.com/"
endpoint = "https://www.linkedin.com/mynetwork/"
# Here the process of automation is achieved by using the framework Selenium.
# Selenium is a portable framework for testing and automating web applications web applications.
# Installing the Chrome Driver
# driver = webdriver.Chrome(ChromeDriverManager().install())
driver = webdriver.Chrome(executable_path='./chromedriver.exe')
driver.get(URL)
driver.maximize_window()
# This process is used to implement the login details
btn = driver.find_element_by_link_text("Sign in").click()
# ----------------------------------------------------------------------------------------------------------------------
# TODO - Create a .env file in the root directory of the project and add LINKEDIN_USERNAME and LINKEDIN_PASSWORD
name = os.getenv("LINKEDIN_USERNAME")
password = os.getenv("LINKEDIN_PASSWORD")
# ----------------------------------------------------------------------------------------------------------------------
time.sleep(1)
driver.find_element_by_id("username").send_keys(name)
driver.find_element_by_id("password").send_keys(password)
driver.find_element_by_xpath(
'//*[@id="organic-div"]/form/div[3]/button').click()
# The user can send out multiple connection requests to people at the endpoint -
# https: // www.linkedin.com/mynetwork/
while 1:
time.sleep(3)
driver.get(endpoint)
time.sleep(5)
# click all the connections buttons
all_buttons = driver.find_elements_by_tag_name("button")
# filter out the buttons that are not connect buttons
valid_buttons = [btn for btn in all_buttons if btn.text == "Connect"]
for btn in valid_buttons:
driver.execute_script("arguments[0].click();", btn) # click the button
time.sleep(0.1) # wait for the connection request to be sent
try:
send = driver.find_element_by_xpath(
"//button[@aria-label='Send now']") # check if the button is present
driver.execute_script(
"arguments[0].click();", send) # click the button
time.sleep(0.1) # wait for the connection request to be sent
except:
pass
| []
| []
| [
"LINKEDIN_USERNAME",
"LINKEDIN_PASSWORD"
]
| [] | ["LINKEDIN_USERNAME", "LINKEDIN_PASSWORD"] | python | 2 | 0 | |
app/app/settings.py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@#796d9ti+ew3@smxzjzvc^etl7=4smi=x@^26i3oit=yj&=$c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS')
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
AUTH_USER_MODEL = 'core.User'
| []
| []
| [
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
]
| [] | ["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"] | python | 4 | 0 | |
model_tools/activations/hooks.py | from abc import ABC, abstractmethod
import logging
import os
from typing import Optional, Union, Iterable, Dict
import h5py
import numpy as np
import torch
from PIL import Image
from tqdm import tqdm
from brainio.stimuli import StimulusSet
from model_tools.activations import ActivationsModel
from model_tools.activations.core import flatten, change_dict
from model_tools.utils import fullname, s3
from model_tools.utils.pca import IncrementalPCAPytorch, PCAPytorch
from result_caching import store_dict
Stimuli = Union[Iterable[str], StimulusSet, Iterable[os.PathLike]]
BasePCA = Union[IncrementalPCAPytorch, PCAPytorch]
class LayerHookBase(ABC):
def __init__(self, activations_extractor: ActivationsModel, identifier: Optional[str] = None):
self._extractor = activations_extractor
self.identifier = identifier
self.handle = None
def __call__(self, batch_activations: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
self.setup(batch_activations)
return change_dict(batch_activations, self.layer_apply, keep_name=True,
multithread=os.getenv('MT_MULTITHREAD', '1') == '1')
@classmethod
def hook(cls, activations_extractor: ActivationsModel, identifier: Optional[str] = None, **kwargs):
hook = cls(activations_extractor=activations_extractor, identifier=identifier, **kwargs)
assert not cls.is_hooked(activations_extractor), f"{cls.__name__} is already hooked"
handle = activations_extractor.register_batch_activations_hook(hook)
hook.handle = handle
return handle
@classmethod
def is_hooked(cls, activations_extractor: ActivationsModel) -> bool:
return any(isinstance(hook, cls) for hook in
activations_extractor._extractor._batch_activations_hooks.values())
def setup(self, batch_activations: Dict[str, np.ndarray]) -> None:
pass
@abstractmethod
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
pass
class LayerGlobalMaxPool2d(LayerHookBase):
def __init__(self, *args, identifier: Optional[str] = None, **kwargs):
if identifier is None:
identifier = 'maxpool'
super(LayerGlobalMaxPool2d, self).__init__(*args, **kwargs, identifier=identifier)
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
if activations.ndim != 4:
return activations
return np.max(activations, axis=(2, 3))
class LayerRandomProjection(LayerHookBase):
def __init__(self, *args,
n_components: int = 1000,
force: bool = False,
identifier: Optional[str] = None,
**kwargs):
if identifier is None:
identifier = f'randproj_ncomponents={n_components}_force={force}'
super(LayerRandomProjection, self).__init__(*args, **kwargs, identifier=identifier)
self._n_components = n_components
self._force = force
self._layer_ws = {}
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
activations = flatten(activations)
if activations.shape[1] <= self._n_components and not self._force:
return activations
if layer not in self._layer_ws:
w = np.random.normal(size=(activations.shape[-1], self._n_components)) / np.sqrt(self._n_components)
self._layer_ws[layer] = w
else:
w = self._layer_ws[layer]
activations = activations @ w
return activations
class LayerPCA(LayerHookBase):
def __init__(self, *args,
n_components: int = 1000,
force: bool = False,
stimuli: Optional[Stimuli] = None,
stimuli_identifier: Optional[str] = None,
identifier: Optional[str] = None,
batch_size: Optional[int] = None,
device: Optional[Union[str, torch.device]] = None,
**kwargs):
if stimuli is None:
# Default to ImageNet validation with 1 image per class
stimuli = _get_imagenet_val(n_components)
stimuli_identifier = 'brainscore-imagenetval'
if isinstance(stimuli, StimulusSet) and stimuli_identifier is None and hasattr(stimuli, 'identifier'):
stimuli_identifier = stimuli.identifier
if stimuli_identifier is None:
raise ValueError('If passing a list of paths for stimuli '
'or a StimulusSet without an identifier attribute, '
'you must provide a stimuli_identifier')
if identifier is None:
identifier = f'pca_ncomponents={n_components}_force={force}_stimuli_identifier={stimuli_identifier}'
super(LayerPCA, self).__init__(*args, **kwargs, identifier=identifier)
self._n_components = n_components
self._force = force
self._stimuli_identifier = stimuli_identifier
self._stimuli = stimuli
self._batch_size = batch_size
self._device = device
self._logger = logging.getLogger(fullname(self))
self._layer_pcas = {}
def setup(self, batch_activations) -> None:
layers = batch_activations.keys()
missing_layers = [layer for layer in layers if layer not in self._layer_pcas]
if len(missing_layers) == 0:
return
layer_pcas = self._pcas(identifier=self._extractor.identifier,
layers=missing_layers,
n_components=self._n_components,
force=self._force,
stimuli_identifier=self._stimuli_identifier)
self._layer_pcas = {**self._layer_pcas, **layer_pcas}
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
pca = self._layer_pcas[layer]
activations = flatten(activations)
if pca is None:
return activations
return pca.transform(torch.from_numpy(activations).to(self._device))
@store_dict(dict_key='layers', identifier_ignore=['layers'])
def _pcas(self, identifier, layers, n_components, force, stimuli_identifier) -> Dict[str, BasePCA]:
self._logger.debug(f'Retrieving {stimuli_identifier} activations')
self.handle.disable()
activations = self._extractor(self._stimuli, layers=layers, stimuli_identifier=False)
activations = {layer: activations.sel(layer=layer).values
for layer in np.unique(activations['layer'])}
assert len(set(layer_activations.shape[0] for layer_activations in activations.values())) == 1, "stimuli differ"
self.handle.enable()
self._logger.debug(f'Computing {stimuli_identifier} principal components')
progress = tqdm(total=len(activations), desc="layer principal components", leave=False)
def init_and_progress(layer, activations):
activations = flatten(activations)
if activations.shape[1] <= n_components and not force:
self._logger.debug(f"Not computing principal components for {layer} "
f"activations {activations.shape} as shape is small enough already")
progress.update(1)
return None
n_components_ = n_components if activations.shape[1] > n_components else activations.shape[1]
if self._batch_size is None:
pca = PCAPytorch(n_components_, device=self._device)
pca.fit(torch.from_numpy(activations).to(self._device))
else:
pca = IncrementalPCAPytorch(n_components_, device=self._device)
for i in range(0, activations.shape[0], self._batch_size):
activations_batch = torch.from_numpy(activations[i:i + self._batch_size]).to(self._device)
pca.fit_partial(activations_batch)
return pca
layer_pcas = change_dict(activations, init_and_progress, keep_name=True,
multithread=os.getenv('MT_MULTITHREAD', '1') == '1')
progress.close()
return layer_pcas
def _get_imagenet_val(num_images):
_logger = logging.getLogger(fullname(_get_imagenet_val))
num_classes = 1000
num_images_per_class = (num_images - 1) // num_classes
base_indices = np.arange(num_images_per_class).astype(int)
indices = []
for i in range(num_classes):
indices.extend(50 * i + base_indices)
for i in range((num_images - 1) % num_classes + 1):
indices.extend(50 * i + np.array([num_images_per_class]).astype(int))
framework_home = os.path.expanduser(os.getenv('MT_HOME', '~/.model-tools'))
imagenet_filepath = os.getenv('MT_IMAGENET_PATH', os.path.join(framework_home, 'imagenet2012.hdf5'))
imagenet_dir = f"{imagenet_filepath}-files"
os.makedirs(imagenet_dir, exist_ok=True)
if not os.path.isfile(imagenet_filepath):
os.makedirs(os.path.dirname(imagenet_filepath), exist_ok=True)
_logger.debug(f"Downloading ImageNet validation to {imagenet_filepath}")
s3.download_file("imagenet2012-val.hdf5", imagenet_filepath)
filepaths = []
with h5py.File(imagenet_filepath, 'r') as f:
for index in indices:
imagepath = os.path.join(imagenet_dir, f"{index}.png")
if not os.path.isfile(imagepath):
image = np.array(f['val/images'][index])
Image.fromarray(image).save(imagepath)
filepaths.append(imagepath)
return filepaths
| []
| []
| [
"MT_HOME",
"MT_MULTITHREAD",
"MT_IMAGENET_PATH"
]
| [] | ["MT_HOME", "MT_MULTITHREAD", "MT_IMAGENET_PATH"] | python | 3 | 0 | |
plugins/__init__.py | import os
from telethon import version
from telethon.errors.rpcerrorlist import (
MediaEmptyError,
WebpageCurlFailedError,
WebpageMediaEmptyError,
)
from telethon.events import CallbackQuery
from userbot import *
from userbot import CMD_HELP, CMD_HELP_BOT
from userbot.config import Config
from userbot.helpers import *
from userbot.random_strings import *
from userbot.utils import *
from userbot.var import Config, Var
from userbot.config import Config
bot = Andencento
uptime = "dekhna jaruri hai kya"
Eiva_USER = Andencento.me.first_name
ForGo10God = Andencento.uid
Eiva_mention = f"[{Eiva_USER}](tg://user?id={ForGo10God})"
Andencento_USER = bot.me.first_name
Andencento_mention = f"[{Andencento_USER}](tg://user?id={ForGo10God})"
Andencento_logo = "./userbot/resources/andencento_logo.jpg"
cjb = "./userbot/resources/cjb.jpg"
restlo = "./userbot/resources/rest.jpeg"
shuru = "./userbot/resources/shuru.jpg"
hl = Config.HANDLER
shl = Config.SUDO_HANDLER
Andencento_ver = "0.1"
tel_ver = version.__version__
devs = DEVLIST
user_mention = Andencento_mention
async def get_user_id(ids):
if str(ids).isdigit():
userid = int(ids)
else:
userid = (await bot.get_entity(ids)).id
return userid
sudos = Config.SUDO_USERS
if sudos:
is_sudo = "True"
else:
is_sudo = "False"
abus = Config.ABUSE
if abus == "ON":
abuse_m = "Enabled"
else:
abuse_m = "Disabled"
START_TIME = datetime.datetime.now()
HANDLER = os.environ.get("HANDLER", ".")
chnl_link = "https://t.me/Andencento"
COMMAND_HAND_LER = os.environ.get("HANDLER", ".")
##########################################################################
class CmdHelp:
"""
The class I wrote to better generate command aids.
"""
FILE = ""
ORIGINAL_FILE = ""
FILE_AUTHOR = ""
IS_OFFICIAL = True
COMMANDS = {}
PREFIX = COMMAND_HAND_LER
WARNING = ""
INFO = ""
def __init__(self, file: str, official: bool = True, file_name: str = None):
self.FILE = file
self.ORIGINAL_FILE = file
self.IS_OFFICIAL = official
self.FILE_NAME = file_name if file_name is not None else file + ".py"
self.COMMANDS = {}
self.FILE_AUTHOR = ""
self.WARNING = ""
self.INFO = ""
def set_file_info(self, name: str, value: str):
if name == "name":
self.FILE = value
elif name == "author":
self.FILE_AUTHOR = value
return self
def add_command(self, command: str, params=None, usage: str = "", example=None):
"""
Inserts commands..
"""
self.COMMANDS[command] = {
"command": command,
"params": params,
"usage": usage,
"example": example,
}
return self
def add_warning(self, warning):
self.WARNING = warning
return self
def add_info(self, info):
self.INFO = info
return self
def get_result(self):
"""
Brings results.
"""
result = f"**📗 File :** `{self.FILE}`\n"
if self.WARNING == "" and self.INFO == "":
result += f"**⬇️ Official:** {'✅' if self.IS_OFFICIAL else '❌'}\n\n"
else:
result += f"**⬇️ Official:** {'✅' if self.IS_OFFICIAL else '❌'}\n"
if self.INFO == "":
if not self.WARNING == "":
result += f"**⚠️ Warning :** {self.WARNING}\n\n"
else:
if not self.WARNING == "":
result += f"**⚠️ Warning :** {self.WARNING}\n"
result += f"**ℹ️ Info:** {self.INFO}\n\n"
for command in self.COMMANDS:
command = self.COMMANDS[command]
if command["params"] is None:
result += (
f"**🛠 Command :** `{COMMAND_HAND_LER[:1]}{command['command']}`\n"
)
else:
result += f"**🛠 Command :** `{COMMAND_HAND_LER[:1]}{command['command']} {command['params']}`\n"
if command["example"] is None:
result += f"**💬 Details :** `{command['usage']}`\n\n"
else:
result += f"**💬 Details :** `{command['usage']}`\n"
result += f"**⌨️ For Example :** `{COMMAND_HAND_LER[:1]}{command['example']}`\n\n"
return result
def add(self):
"""
Directly adds CMD_HELP.
"""
CMD_HELP_BOT[self.FILE] = {
"info": {
"official": self.IS_OFFICIAL,
"warning": self.WARNING,
"info": self.INFO,
},
"commands": self.COMMANDS,
}
CMD_HELP[self.FILE] = self.get_result()
return True
def getText(self, text: str):
if text == "REPLY_OR_USERNAME":
return "<user name> <user name/answer >"
elif text == "OR":
return "or"
elif text == "USERNAMES":
return "<user name (s)>"
KANGING_STR = [
"Using Witchery to kang this sticker...",
"Plagiarising hehe...",
"Inviting this sticker over to my pack...",
"Kanging this sticker...",
"Hey that's a nice sticker!\nMind if I kang?!..",
"hehe me stel ur stikér\nhehe.",
"Ay look over there (☉。☉)!→\nWhile I kang this...",
"Roses are red violets are blue, kanging this sticker so my pacc looks cool",
"Imprisoning this sticker...",
"Mr.Steal Your Sticker is stealing this sticker... ",
"Hey! That's my sticker. Lemme get it back...",
"Turn around, Go straight and f*ck off...",
]
async def bash(cmd):
process = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
err = stderr.decode().strip()
out = stdout.decode().strip()
return out, err
| []
| []
| [
"HANDLER"
]
| [] | ["HANDLER"] | python | 1 | 0 | |
libkbfs/init.go | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"errors"
"flag"
"fmt"
"os"
"os/signal"
"path/filepath"
"runtime/pprof"
"time"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
)
// InitParams contains the initialization parameters for Init(). It is
// usually filled in by the flags parser passed into AddFlags().
type InitParams struct {
// Whether to print debug messages.
Debug bool
// If non-empty, where to write a CPU profile.
CPUProfile string
// If non-empty, the host:port of the block server. If empty,
// a default value is used depending on the run mode.
BServerAddr string
// If non-empty the host:port of the metadata server. If
// empty, a default value is used depending on the run mode.
MDServerAddr string
// If true, use in-memory servers and ignore BServerAddr,
// MDServerAddr, and ServerRootDir.
ServerInMemory bool
// If non-empty, use on-disk servers and ignore BServerAddr
// and MDServerAddr.
ServerRootDir string
// Fake local user name. If non-empty, either ServerInMemory
// must be true or ServerRootDir must be non-empty.
LocalUser string
// TLFValidDuration is the duration that TLFs are valid
// before marked for lazy revalidation.
TLFValidDuration time.Duration
// LogToFile if true, logs to a default file location.
LogToFile bool
// LogFileConfig tells us where to log and rotation config.
LogFileConfig logger.LogFileConfig
}
// GetDefaultBServer returns the default value for the -bserver flag.
func GetDefaultBServer(ctx Context) string {
switch ctx.GetRunMode() {
case libkb.StagingRunMode:
return "bserver.dev.keybase.io:443"
case libkb.ProductionRunMode:
return "bserver.kbfs.keybase.io:443"
default:
return ""
}
}
// GetDefaultMDServer returns the default value for the -mdserver flag.
func GetDefaultMDServer(ctx Context) string {
switch ctx.GetRunMode() {
case libkb.StagingRunMode:
return "mdserver.dev.keybase.io:443"
case libkb.ProductionRunMode:
return "mdserver.kbfs.keybase.io:443"
default:
return ""
}
}
func defaultLogPath(ctx Context) string {
return filepath.Join(ctx.GetLogDir(), libkb.KBFSLogFileName)
}
// DefaultInitParams returns default init params
func DefaultInitParams(ctx Context) InitParams {
return InitParams{
Debug: BoolForString(os.Getenv("KBFS_DEBUG")),
BServerAddr: GetDefaultBServer(ctx),
MDServerAddr: GetDefaultMDServer(ctx),
TLFValidDuration: tlfValidDurationDefault,
LogFileConfig: logger.LogFileConfig{
MaxAge: 30 * 24 * time.Hour,
MaxSize: 128 * 1024 * 1024,
MaxKeepFiles: 3,
},
}
}
// AddFlags adds libkbfs flags to the given FlagSet. Returns an
// InitParams that will be filled in once the given FlagSet is parsed.
func AddFlags(flags *flag.FlagSet, ctx Context) *InitParams {
defaultParams := DefaultInitParams(ctx)
var params InitParams
flags.BoolVar(¶ms.Debug, "debug", defaultParams.Debug, "Print debug messages")
flags.StringVar(¶ms.CPUProfile, "cpuprofile", "", "write cpu profile to file")
flags.StringVar(¶ms.BServerAddr, "bserver", defaultParams.BServerAddr, "host:port of the block server")
flags.StringVar(¶ms.MDServerAddr, "mdserver", defaultParams.MDServerAddr, "host:port of the metadata server")
flags.BoolVar(¶ms.ServerInMemory, "server-in-memory", false, "use in-memory server (and ignore -bserver, -mdserver, and -server-root)")
flags.StringVar(¶ms.ServerRootDir, "server-root", "", "directory to put local server files (and ignore -bserver and -mdserver)")
flags.StringVar(¶ms.LocalUser, "localuser", "", "fake local user (used only with -server-in-memory or -server-root)")
flags.DurationVar(¶ms.TLFValidDuration, "tlf-valid", defaultParams.TLFValidDuration, "time tlfs are valid before redoing identification")
flags.BoolVar(¶ms.LogToFile, "log-to-file", false, fmt.Sprintf("Log to default file: %s", defaultLogPath(ctx)))
flags.StringVar(¶ms.LogFileConfig.Path, "log-file", "", "Path to log file")
flags.DurationVar(¶ms.LogFileConfig.MaxAge, "log-file-max-age", defaultParams.LogFileConfig.MaxAge, "Maximum age of a log file before rotation")
params.LogFileConfig.MaxSize = defaultParams.LogFileConfig.MaxSize
flag.Var(SizeFlag{¶ms.LogFileConfig.MaxSize}, "log-file-max-size", "Maximum size of a log file before rotation")
// The default is to *DELETE* old log files for kbfs.
flag.IntVar(¶ms.LogFileConfig.MaxKeepFiles, "log-file-max-keep-files", defaultParams.LogFileConfig.MaxKeepFiles, "Maximum number of log files for this service, older ones are deleted. 0 for infinite.")
return ¶ms
}
func makeMDServer(config Config, serverInMemory bool, serverRootDir, mdserverAddr string, ctx Context) (
MDServer, error) {
if serverInMemory {
// local in-memory MD server
return NewMDServerMemory(config)
}
if len(serverRootDir) > 0 {
// local persistent MD server
mdPath := filepath.Join(serverRootDir, "kbfs_md")
return NewMDServerDir(config, mdPath)
}
if len(mdserverAddr) == 0 {
return nil, errors.New("Empty MD server address")
}
// remote MD server. this can't fail. reconnection attempts
// will be automatic.
mdServer := NewMDServerRemote(config, mdserverAddr, ctx)
return mdServer, nil
}
func makeKeyServer(config Config, serverInMemory bool, serverRootDir, keyserverAddr string) (
KeyServer, error) {
if serverInMemory {
// local in-memory key server
return NewKeyServerMemory(config)
}
if len(serverRootDir) > 0 {
// local persistent key server
keyPath := filepath.Join(serverRootDir, "kbfs_key")
return NewKeyServerDir(config, keyPath)
}
if len(keyserverAddr) == 0 {
return nil, errors.New("Empty key server address")
}
// currently the MD server also acts as the key server.
keyServer, ok := config.MDServer().(KeyServer)
if !ok {
return nil, errors.New("MD server is not a key server")
}
return keyServer, nil
}
func makeBlockServer(config Config, serverInMemory bool, serverRootDir, bserverAddr string, ctx Context, log logger.Logger) (
BlockServer, error) {
if serverInMemory {
// local in-memory block server
return NewBlockServerMemory(config), nil
}
if len(serverRootDir) > 0 {
// local persistent block server
blockPath := filepath.Join(serverRootDir, "kbfs_block")
return NewBlockServerDir(config, blockPath), nil
}
if len(bserverAddr) == 0 {
return nil, errors.New("Empty block server address")
}
log.Debug("Using remote bserver %s", bserverAddr)
return NewBlockServerRemote(config, bserverAddr, ctx), nil
}
// InitLog sets up logging switching to a log file if necessary.
// Returns a valid logger even on error, which are non-fatal, thus
// errors from this function may be ignored.
// Possible errors are logged to the logger returned.
func InitLog(params InitParams, ctx Context) (logger.Logger, error) {
var err error
log := logger.NewWithCallDepth("kbfs", 1)
// Set log file to default if log-to-file was specified
if params.LogToFile {
if params.LogFileConfig.Path != "" {
return nil, fmt.Errorf("log-to-file and log-file flags can't be specified together")
}
params.LogFileConfig.Path = defaultLogPath(ctx)
}
if params.LogFileConfig.Path != "" {
err = logger.SetLogFileConfig(¶ms.LogFileConfig)
}
log.Configure("", params.Debug, "")
log.Info("KBFS version %s", VersionString())
if err != nil {
log.Warning("Failed to setup log file %q: %v", params.LogFileConfig.Path, err)
}
return log, err
}
// Init initializes a config and returns it.
//
// onInterruptFn is called whenever an interrupt signal is received
// (e.g., if the user hits Ctrl-C).
//
// Init should be called at the beginning of main. Shutdown (see
// below) should then be called at the end of main (usually via
// defer).
//
// The keybaseDaemonFn argument is to temporarily support KBFS on
// mobile (for using a custom KeybaseDaemon implementation) and will
// be removed in the future, when we use a non-RPC implementation.
func Init(ctx Context, params InitParams, keybaseDaemonFn KeybaseDaemonFn, onInterruptFn func(), log logger.Logger) (Config, error) {
if params.CPUProfile != "" {
// Let the GC/OS clean up the file handle.
f, err := os.Create(params.CPUProfile)
if err != nil {
return nil, err
}
pprof.StartCPUProfile(f)
}
interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, os.Interrupt)
go func() {
_ = <-interruptChan
if onInterruptFn != nil {
onInterruptFn()
}
os.Exit(1)
}()
config := NewConfigLocal()
bsplitter, err := NewBlockSplitterSimple(MaxBlockSizeBytesDefault, 8*1024,
config.Codec())
if err != nil {
return nil, err
}
config.SetBlockSplitter(bsplitter)
if registry := config.MetricsRegistry(); registry != nil {
keyCache := config.KeyCache()
keyCache = NewKeyCacheMeasured(keyCache, registry)
config.SetKeyCache(keyCache)
}
// Set logging
config.SetLoggerMaker(func(module string) logger.Logger {
mname := "kbfs"
if module != "" {
mname += fmt.Sprintf("(%s)", module)
}
// Add log depth so that context-based messages get the right
// file printed out.
lg := logger.NewWithCallDepth(mname, 1)
if params.Debug {
// Turn on debugging. TODO: allow a proper log file and
// style to be specified.
lg.Configure("", true, "")
}
return lg
})
config.SetTLFValidDuration(params.TLFValidDuration)
kbfsOps := NewKBFSOpsStandard(config)
config.SetKBFSOps(kbfsOps)
config.SetNotifier(kbfsOps)
config.SetKeyManager(NewKeyManagerStandard(config))
config.SetMDOps(NewMDOpsStandard(config))
mdServer, err := makeMDServer(
config, params.ServerInMemory, params.ServerRootDir, params.MDServerAddr, ctx)
if err != nil {
return nil, fmt.Errorf("problem creating MD server: %v", err)
}
config.SetMDServer(mdServer)
// note: the mdserver is the keyserver at the moment.
keyServer, err := makeKeyServer(
config, params.ServerInMemory, params.ServerRootDir, params.MDServerAddr)
if err != nil {
return nil, fmt.Errorf("problem creating key server: %v", err)
}
if registry := config.MetricsRegistry(); registry != nil {
keyServer = NewKeyServerMeasured(keyServer, registry)
}
config.SetKeyServer(keyServer)
if keybaseDaemonFn == nil {
keybaseDaemonFn = makeKeybaseDaemon
}
daemon, err := keybaseDaemonFn(config, params, ctx, config.MakeLogger(""))
if err != nil {
return nil, fmt.Errorf("problem creating daemon: %s", err)
}
if registry := config.MetricsRegistry(); registry != nil {
daemon = NewKeybaseDaemonMeasured(daemon, registry)
}
config.SetKeybaseDaemon(daemon)
k := NewKBPKIClient(config)
config.SetKBPKI(k)
config.SetReporter(NewReporterKBPKI(config, 10, 1000))
localUser := libkb.NewNormalizedUsername(params.LocalUser)
if localUser == "" {
c := NewCryptoClient(config, ctx)
config.SetCrypto(c)
} else {
signingKey := MakeLocalUserSigningKeyOrBust(localUser)
cryptPrivateKey := MakeLocalUserCryptPrivateKeyOrBust(localUser)
config.SetCrypto(NewCryptoLocal(config, signingKey, cryptPrivateKey))
}
bserv, err := makeBlockServer(config, params.ServerInMemory, params.ServerRootDir, params.BServerAddr, ctx, log)
if err != nil {
return nil, fmt.Errorf("cannot open block database: %v", err)
}
if registry := config.MetricsRegistry(); registry != nil {
bserv = NewBlockServerMeasured(bserv, registry)
}
config.SetBlockServer(bserv)
return config, nil
}
// Shutdown does any necessary shutdown tasks for libkbfs. Shutdown
// should be called at the end of main.
func Shutdown() {
pprof.StopCPUProfile()
}
| [
"\"KBFS_DEBUG\""
]
| []
| [
"KBFS_DEBUG"
]
| [] | ["KBFS_DEBUG"] | go | 1 | 0 | |
exec/jvm/sandbox.go | package jvm
import (
"context"
"fmt"
"os"
osuser "os/user"
"path"
"strconv"
"strings"
"time"
specchannel "github.com/chaosblade-io/chaosblade-spec-go/channel"
"github.com/chaosblade-io/chaosblade-spec-go/spec"
"github.com/chaosblade-io/chaosblade-spec-go/util"
"github.com/shirou/gopsutil/process"
"github.com/sirupsen/logrus"
)
// attach sandbox to java process
var channel = specchannel.NewLocalChannel()
const DefaultNamespace = "default"
func Attach(port string, javaHome string, pid string) (*spec.Response, string) {
// refresh
response, username := attach(pid, port, context.TODO(), javaHome)
if !response.Success {
return response, username
}
time.Sleep(5 * time.Second)
// active
response = active(port)
if !response.Success {
return response, username
}
// check
return check(port), username
}
// curl -s http://localhost:$2/sandbox/default/module/http/chaosblade/status 2>&1
func check(port string) *spec.Response {
url := getSandboxUrl(port, "chaosblade/status", "")
result, err, code := util.Curl(url)
if code == 200 {
return spec.ReturnSuccess(result)
}
if err != nil {
return spec.ReturnFail(spec.Code[spec.SandboxInvokeError], err.Error())
}
return spec.ReturnFail(spec.Code[spec.SandboxInvokeError],
fmt.Sprintf("response code is %d, result: %s", code, result))
}
// active chaosblade bin/sandbox.sh -p $pid -P $2 -a chaosblade 2>&1
func active(port string) *spec.Response {
url := getSandboxUrl(port, "sandbox-module-mgr/active", "&ids=chaosblade")
result, err, code := util.Curl(url)
if err != nil {
return spec.ReturnFail(spec.Code[spec.SandboxInvokeError], err.Error())
}
if code != 200 {
return spec.ReturnFail(spec.Code[spec.SandboxInvokeError],
fmt.Sprintf("active module response code: %d, result: %s", code, result))
}
return spec.ReturnSuccess("success")
}
// attach java agent to application process
func attach(pid, port string, ctx context.Context, javaHome string) (*spec.Response, string) {
username, err := getUsername(pid)
if err != nil {
return spec.ReturnFail(spec.Code[spec.StatusError],
fmt.Sprintf("get username failed by %s pid, %v", pid, err)), ""
}
javaBin, javaHome := getJavaBinAndJavaHome(javaHome, ctx, pid)
toolsJar := getToolJar(javaHome)
token, err := getSandboxToken(ctx)
if err != nil {
return spec.ReturnFail(spec.Code[spec.ServerError],
fmt.Sprintf("create sandbox token failed, %v", err)), username
}
javaArgs := getAttachJvmOpts(toolsJar, token, port, pid)
currUser, err := osuser.Current()
if err != nil {
logrus.Warnf("get current user info failed, %v", err)
}
var response *spec.Response
if currUser != nil && (currUser.Username == username) {
response = channel.Run(ctx, javaBin, javaArgs)
} else {
if currUser != nil {
logrus.Debugf("current user name is %s, not equal %s, so use sudo command to execute",
currUser.Username, username)
}
response = channel.Run(ctx, "sudo", fmt.Sprintf("-u %s %s %s", username, javaBin, javaArgs))
}
if !response.Success {
return response, username
}
response = channel.Run(ctx, "grep", fmt.Sprintf(`%s %s | grep %s | tail -1 | awk -F ";" '{print $3";"$4}'`,
token, getSandboxTokenFile(username), DefaultNamespace))
// if attach successfully, the sandbox-agent.jar will write token to local file
if !response.Success {
return spec.ReturnFail(spec.Code[spec.SandboxInvokeError],
fmt.Sprintf("attach JVM %s failed, loss response; %s", pid, response.Err)), username
}
return response, username
}
// port: 指定sandbox 使用的端口
func getAttachJvmOpts(toolsJar string, token string, port string, pid string) string {
jvmOpts := fmt.Sprintf("-Xms128M -Xmx128M -Xnoclassgc -ea -Xbootclasspath/a:%s", toolsJar)
sandboxHome := path.Join(util.GetLibHome(), "sandbox")
sandboxLibPath := path.Join(sandboxHome, "lib")
sandboxAttachArgs := fmt.Sprintf("home=%s;token=%s;server.ip=%s;server.port=%s;namespace=%s",
sandboxHome, token, "127.0.0.1", port, DefaultNamespace)
javaArgs := fmt.Sprintf(`%s -jar %s/sandbox-core.jar %s "%s/sandbox-agent.jar" "%s"`,
jvmOpts, sandboxLibPath, pid, sandboxLibPath, sandboxAttachArgs)
return javaArgs
}
func getSandboxToken(ctx context.Context) (string, error) {
// create sandbox token
response := channel.Run(ctx, "date", "| head | cksum | sed 's/ //g'")
if !response.Success {
return "", fmt.Errorf(response.Err)
}
token := strings.TrimSpace(response.Result.(string))
return token, nil
}
func getToolJar(javaHome string) string {
toolsJar := path.Join(util.GetBinPath(), "tools.jar")
originalJar := path.Join(javaHome, "lib/tools.jar")
if util.IsExist(originalJar) {
toolsJar = originalJar
}
return toolsJar
}
func getUsername(pid string) (string, error) {
p, err := strconv.Atoi(pid)
if err != nil {
return "", err
}
javaProcess, err := process.NewProcess(int32(p))
if err != nil {
return "", err
}
return javaProcess.Username()
}
func getJavaBinAndJavaHome(javaHome string, ctx context.Context, pid string) (string, string) {
javaBin := "java"
if javaHome == "" {
javaHome = os.Getenv("JAVA_HOME")
javaBin = path.Join(javaHome, "bin/java")
}
if javaHome == "" {
psArgs := specchannel.GetPsArgs()
response := channel.Run(ctx, "ps", fmt.Sprintf(`%s | grep -w %s | grep java | grep -v grep | awk '{print $4}'`,
psArgs, pid))
if response.Success {
javaBin = strings.TrimSpace(response.Result.(string))
}
if strings.HasPrefix(javaBin, "/bin/java") {
javaHome = javaBin[:len(javaBin)-9]
}
}
return javaBin, javaHome
}
func Detach(port string) *spec.Response {
return shutdown(port)
}
// CheckPortFromSandboxToken will read last line and curl the port for testing connectivity
func CheckPortFromSandboxToken(username string) (port string, err error) {
port, err = getPortFromSandboxToken(username)
if err != nil {
return port, err
}
versionUrl := getSandboxUrl(port, "sandbox-info/version", "")
_, err, _ = util.Curl(versionUrl)
if err != nil {
return "", err
}
return port, nil
}
func getPortFromSandboxToken(username string) (port string, err error) {
response := channel.Run(context.TODO(), "grep",
fmt.Sprintf(`%s %s | tail -1 | awk -F ";" '{print $4}'`,
DefaultNamespace, getSandboxTokenFile(username)))
if !response.Success {
return "", fmt.Errorf(response.Err)
}
if response.Result == nil {
return "", fmt.Errorf("get empty from sandbox token file")
}
port = strings.TrimSpace(response.Result.(string))
if port == "" {
return "", fmt.Errorf("read empty from sandbox token file")
}
_, err = strconv.Atoi(port)
if err != nil {
return "", fmt.Errorf("can not find port from sandbox token file, %v", err)
}
return port, nil
}
// sudo -u $user -H bash bin/sandbox.sh -p $pid -S 2>&1
func shutdown(port string) *spec.Response {
url := getSandboxUrl(port, "sandbox-control/shutdown", "")
result, err, code := util.Curl(url)
if err != nil {
return spec.ReturnFail(spec.Code[spec.SandboxInvokeError], err.Error())
}
if code != 200 {
return spec.ReturnFail(spec.Code[spec.SandboxInvokeError],
fmt.Sprintf("shutdown module response code: %d, result: %s", code, result))
}
return spec.ReturnSuccess("success")
}
func getSandboxUrl(port, uri, param string) string {
// "sandbox-module-mgr/reset"
return fmt.Sprintf("http://127.0.0.1:%s/sandbox/%s/module/http/%s?1=1%s",
port, DefaultNamespace, uri, param)
}
func getSandboxTokenFile(username string) string {
userHome := util.GetSpecifyingUserHome(username)
return path.Join(userHome, ".sandbox.token")
}
| [
"\"JAVA_HOME\""
]
| []
| [
"JAVA_HOME"
]
| [] | ["JAVA_HOME"] | go | 1 | 0 | |
src/ssh-backup-download/helpers.py | import logging
import os
import requests
from config import LOGGER_NAME, LOGS_PATH, DEBUG
def send_telegram_msg(message=''):
"""Envía un mensaje al chat de telegram que se indica
en la variable de entorno TELEGRAM_CHAT_ID con el bot
con token especificado en la var de entorno TELEGRAM_BOT_TOKEN.
Parameters
----------
message : str, optional
Mensaje a enviar por telegram, by default ''
"""
chat_id = os.getenv('TELEGRAM_CHAT_ID')
telegram_api = os.getenv('TELEGRAM_API')
bot_token = os.getenv('TELEGRAM_BOT_TOKEN')
url = f'{telegram_api}/bot{bot_token}/sendMessage?chat_id={chat_id}&text={message}'
requests.post(url)
def set_logging(log_name):
"""Setea el logger, para usarlo se debe llamar a
logging.getLogger(LOGGER_NAME)
Parameters
----------
log_name : str
nombre del file en donde se va a escribir el log
"""
logger = logging.getLogger(LOGGER_NAME)
general_level = logging.DEBUG if DEBUG else logging.INFO
logger.setLevel(general_level)
console_handler = logging.StreamHandler()
file_handler = logging.FileHandler(f'{LOGS_PATH}{log_name}')
console_handler.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
msg_format = '%(asctime)s [%(filename)s]-[%(levelname)s]: %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
console_format = logging.Formatter(msg_format, datefmt=date_format)
file_format = logging.Formatter(msg_format, datefmt=date_format)
console_handler.setFormatter(console_format)
file_handler.setFormatter(file_format)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
| []
| []
| [
"TELEGRAM_CHAT_ID",
"TELEGRAM_API",
"TELEGRAM_BOT_TOKEN"
]
| [] | ["TELEGRAM_CHAT_ID", "TELEGRAM_API", "TELEGRAM_BOT_TOKEN"] | python | 3 | 0 | |
db/db.go | package db
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
)
const (
// The default database suffix.
defaultDatabaseSuffix = ".trdb"
)
var (
// Transaction could not be found (maybe invalid ID?)
errTransactionNotFound = errors.New("not found: the transaction does not exist")
// The default database storage path.
defaultDatabasePath = filepath.Join(os.Getenv("HOME"), defaultDatabaseSuffix)
)
// Currency stores information about a currency.
type Currency struct {
Name, Format string
Ratio Value
}
var (
// Euro currency
Euro = Currency{"Euro", "%d.%02d€", Value(100)}
// Dollar currency
Dollar = Currency{"Dollar", "%d.%02d$", Value(100)}
// DefaultCurrency for display
DefaultCurrency = Euro
)
// Action is a transaction type.
type Action string
const (
// Withdraw takes money from the account.
Withdraw Action = "withdraw"
// Deposit stores money on the account.
Deposit Action = "deposit"
)
// Value is a specific amount of money.
type Value int
const (
// ZeroValue represents a 0.
ZeroValue = Value(0)
)
func abs(x Value) Value {
if x < ZeroValue {
return -x
}
return x
}
// Stringifies the value in a currency format.
func (v Value) String() string {
return fmt.Sprintf(DefaultCurrency.Format, v/DefaultCurrency.Ratio, abs(v%DefaultCurrency.Ratio))
}
// Add more money onto the existing value.
func (v Value) Add(a Value) Value {
return v + a
}
// Smaller compares if the value is smaller than the argument.
func (v Value) Smaller(a Value) bool {
return int(v) < int(a)
}
// Larger compares if the value is larger than the argument.
func (v Value) Larger(a Value) bool {
return int(v) > int(a)
}
// Parse a string into a pile of money.
func Parse(in string) Value {
var maj, min int
fmt.Sscanf(in, DefaultCurrency.Format, &maj, &min)
return Value(Value(maj)*DefaultCurrency.Ratio + Value(min)%DefaultCurrency.Ratio)
}
// Transaction stores a virtual transaction.
type Transaction struct {
Name string `json:"name"`
Amount Value `json:"amount"`
Type Action `json:"type"`
Date time.Time `json:"date"`
}
// NewTransaction initializes a new transaction.
func NewTransaction(name string, action Action, amount Value, date time.Time) Transaction {
return Transaction{
Name: name,
Amount: amount,
Type: action,
Date: date,
}
}
// Database with a name and a list of transactions.
type Database struct {
Name string `json:"name"`
Transactions []Transaction `json:"transaction"`
}
// NewDatabase intializes a empty list of transactions.
func NewDatabase(name string) Database {
return Database{
Name: name,
Transactions: make([]Transaction, 0),
}
}
// Size returns the count of transactions.
func (db *Database) Size() int {
return len(db.Transactions)
}
// Store the transaction in the database.
func (db *Database) Store(transact Transaction) {
db.Transactions = append(db.Transactions, transact)
}
// Delete a transaction at the given position.
func (db *Database) Delete(ID int) error {
if ID < 0 || ID >= db.Size() {
return errTransactionNotFound
}
db.Transactions = append(db.Transactions[:ID], db.Transactions[ID+1:]...)
return nil
}
// Retrieve a transaction from the database.
func (db *Database) Read(ID int) (Transaction, error) {
if ID < 0 || ID >= db.Size() {
return Transaction{}, errTransactionNotFound
}
return db.Transactions[ID], nil
}
// Open a existing database.
func Open() (Database, error) {
var database Database
bytes, err := ioutil.ReadFile(defaultDatabasePath)
if err != nil {
return Database{}, err
}
err = json.Unmarshal(bytes, &database)
if err != nil {
return Database{}, nil
}
return database, nil
}
// Exists is true if the database already exists.
func Exists() bool {
if _, err := os.Stat(defaultDatabasePath); os.IsNotExist(err) {
return false
}
return true
}
// Write the database to the hard drive.
func Write(database Database) error {
json, err := json.Marshal(database)
if err != nil {
return err
}
ioutil.WriteFile(defaultDatabasePath, json, 0644)
return nil
}
// Store the transaction in the existing database.
func Store(transact Transaction) error {
database, err := Open()
if err != nil {
return err
}
database.Store(transact)
err = Write(database)
return err
}
// Get a transaction from an existing database.
func Get(ID int) (Transaction, error) {
database, err := Open()
if err != nil {
return Transaction{}, err
}
return database.Read(ID)
}
// Delete a transaction from an existing database.
func Delete(ID int) error {
database, err := Open()
if err != nil {
return err
}
err = database.Delete(ID)
if err != nil {
return err
}
err = Write(database)
return err
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
hello-echo/server.go | package main
import (
"fmt"
"net/http"
"os"
"time"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
"go.uber.org/zap"
)
func newSugar() (sugar *zap.SugaredLogger, logger *zap.Logger, err error) {
logger, err = zap.NewProduction()
if err != nil {
return
}
sugar = logger.Sugar()
return
}
func main() {
e := echo.New()
e.Use(middleware.Logger())
e.GET("/", func(c echo.Context) error {
sugar, logger, err := newSugar()
if err != nil {
panic(err)
}
defer logger.Sync() // flushes buffer, if any
sugar.Infow("failed to fetch URL",
// Structured context as loosely typed key-value pairs.
"url", "https://33",
"attempt", 3,
"backoff", time.Second,
)
return c.String(http.StatusOK, "Landing Page!")
})
e.GET("/hello", func(c echo.Context) error {
sugar, logger, err := newSugar()
if err != nil {
panic(err)
}
defer logger.Sync() // flushes buffer, if any
sugar.Infow("failed to fetch URL",
// Structured context as loosely typed key-value pairs.
"url", "https://33",
"attempt", 3,
"backoff", time.Second,
)
return c.String(http.StatusOK, "Hello, World!")
})
port := getPort()
e.Logger.Fatal(e.Start(fmt.Sprintf(":%v", port)))
}
func getPort() string {
port := os.Getenv("PORT")
if len(port) == 0 {
return "1323"
}
return port
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
pkg/chartverifier/checks/charttesting_test.go | package checks
import (
"errors"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"helm.sh/helm/v3/pkg/cli"
)
// absPathFromSourceFileLocation returns the absolute path of a file or directory under the current source file's
// location.
func absPathFromSourceFileLocation(name string) (string, error) {
_, filename, _, ok := runtime.Caller(1)
if !ok {
panic("couldn't get current path")
}
filename, err := filepath.Abs(filename)
if err != nil {
return "", fmt.Errorf("retrieving current source file's location: %w", err)
}
dirname := path.Dir(filename)
return filepath.Join(dirname, name), nil
}
func lookPath(programs ...string) error {
for _, p := range programs {
_, err := exec.LookPath(p)
if err != nil {
return fmt.Errorf("required program %q not found", p)
}
}
return nil
}
func TestChartTesting(t *testing.T) {
if os.Getenv("CHART_VERIFIER_ENABLE_CLUSTER_TESTING") == "" {
t.Skip("CHART_VERIFIER_ENABLE_CLUSTER_TESTING not set, skipping in cluster tests")
}
if err := lookPath("helm", "kubectl"); err != nil {
t.Skip(err.Error())
}
type testCase struct {
description string
opts CheckOptions
}
chartUri, err := absPathFromSourceFileLocation("psql-service-0.1.7")
if err != nil {
t.Error(err)
}
positiveTestCases := []testCase{
{
description: "providing a valid k8Project value should succeed",
opts: CheckOptions{
URI: chartUri,
Values: map[string]interface{}{
"k8Project": "default",
},
ViperConfig: viper.New(),
HelmEnvSettings: cli.New(),
},
},
}
for _, tc := range positiveTestCases {
t.Run(tc.description, func(t *testing.T) {
r, err := ChartTesting(&tc.opts)
require.NoError(t, err)
require.NotNil(t, r)
require.Equal(t, ChartTestingSuccess, r.Reason)
require.True(t, r.Ok)
})
}
negativeTestCases := []testCase{
{
description: "providing a bogus k8Project should fail",
opts: CheckOptions{
URI: chartUri,
Values: map[string]interface{}{
"k8Project": "bogus",
},
ViperConfig: viper.New(),
HelmEnvSettings: cli.New(),
},
},
{
// the chart being used in this test forces the rendered resources to have an empty namespace field, which
// is invalid and can't be overriden using helm's namespace option.
description: "empty values should fail",
opts: CheckOptions{
URI: chartUri,
Values: map[string]interface{}{},
ViperConfig: viper.New(),
HelmEnvSettings: cli.New(),
},
},
}
for _, tc := range negativeTestCases {
t.Run(tc.description, func(t *testing.T) {
r, err := ChartTesting(&tc.opts)
require.NotNil(t, r)
require.False(t, r.Ok)
require.NoError(t, err)
require.Contains(t, r.Reason, "executing helm with args")
})
}
}
func getVersionError() (string, error) {
return "", errors.New("error")
}
func getVersionGood() (string, error) {
return "4.7.9", nil
}
type testAnnotationHolder struct {
OpenShiftVersion string
CertifiedOpenShiftVersionFlag string
}
func (holder *testAnnotationHolder) SetCertifiedOpenShiftVersion(version string) {
holder.OpenShiftVersion = version
}
func (holder *testAnnotationHolder) GetCertifiedOpenShiftVersionFlag() string {
return holder.CertifiedOpenShiftVersionFlag
}
func (holder *testAnnotationHolder) SetSupportedOpenShiftVersions(version string) {}
func TestVersionSetting(t *testing.T) {
type testCase struct {
description string
holder *testAnnotationHolder
versioner Versioner
version string
error string
}
testCases := []testCase{
{
description: "oc.Version returns 4.7.9",
holder: &testAnnotationHolder{},
versioner: getVersionGood,
version: "4.7.9",
},
{
description: "oc.Version returns error, flag set to 4.7.8",
holder: &testAnnotationHolder{CertifiedOpenShiftVersionFlag: "4.7.8"},
versioner: getVersionError,
version: "4.7.8",
},
{
description: "oc.Version returns semantic error, flag set to fourseveneight",
holder: &testAnnotationHolder{CertifiedOpenShiftVersionFlag: "fourseveneight"},
versioner: getVersionError,
error: "OpenShift version is not following SemVer spec. Invalid Semantic Version",
},
{
description: "oc.Version returns error, flag not set",
holder: &testAnnotationHolder{},
versioner: getVersionError,
error: "Missing OpenShift version. error. And the 'openshift-version' flag has not set.",
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := setOCVersion(tc.holder, tc.versioner)
if len(tc.error) > 0 {
require.Error(t, err)
require.Equal(t, tc.error, err.Error())
} else {
require.NoError(t, err)
require.Equal(t, tc.version, tc.holder.OpenShiftVersion)
}
})
}
}
| [
"\"CHART_VERIFIER_ENABLE_CLUSTER_TESTING\""
]
| []
| [
"CHART_VERIFIER_ENABLE_CLUSTER_TESTING"
]
| [] | ["CHART_VERIFIER_ENABLE_CLUSTER_TESTING"] | go | 1 | 0 | |
http/attestationdata_test.go | // Copyright © 2020, 2021 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package http_test
import (
"context"
"os"
"testing"
"time"
"github.com/attestantio/go-eth2-client/http"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/stretchr/testify/require"
)
func TestAttestationData(t *testing.T) {
tests := []struct {
name string
committeeIndex phase0.CommitteeIndex
slot int64 // -1 for current
}{
// {
// name: "Future",
// committeeIndex: 1,
// slot: 0x00ffffff,
// },
{
name: "Good",
committeeIndex: 0,
slot: -1,
},
}
service, err := http.New(context.Background(),
http.WithTimeout(timeout),
http.WithAddress(os.Getenv("HTTP_ADDRESS")),
)
require.NoError(t, err)
// Need to fetch current slot for attestation data.
genesis, err := service.Genesis(context.Background())
require.NoError(t, err)
slotDuration, err := service.SlotDuration(context.Background())
require.NoError(t, err)
for _, test := range tests {
var slot phase0.Slot
if test.slot == -1 {
slot = phase0.Slot(uint64(time.Since(genesis.GenesisTime).Seconds()) / uint64(slotDuration.Seconds()))
} else {
slot = phase0.Slot(uint64(test.slot))
}
t.Run(test.name, func(t *testing.T) {
attestationData, err := service.AttestationData(context.Background(), slot, test.committeeIndex)
require.NoError(t, err)
require.NotNil(t, attestationData)
})
}
}
| [
"\"HTTP_ADDRESS\""
]
| []
| [
"HTTP_ADDRESS"
]
| [] | ["HTTP_ADDRESS"] | go | 1 | 0 | |
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/merchant_center_link_service/client.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.enums.types import merchant_center_link_status
from google.ads.googleads.v8.resources.types import merchant_center_link
from google.ads.googleads.v8.services.types import merchant_center_link_service
from .transports.base import MerchantCenterLinkServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import MerchantCenterLinkServiceGrpcTransport
class MerchantCenterLinkServiceClientMeta(type):
"""Metaclass for the MerchantCenterLinkService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[MerchantCenterLinkServiceTransport]]
_transport_registry['grpc'] = MerchantCenterLinkServiceGrpcTransport
def get_transport_class(cls,
label: str = None,
) -> Type[MerchantCenterLinkServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class MerchantCenterLinkServiceClient(metaclass=MerchantCenterLinkServiceClientMeta):
"""This service allows management of links between Google Ads
and Google Merchant Center.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = 'googleads.googleapis.com'
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MerchantCenterLinkServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MerchantCenterLinkServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> MerchantCenterLinkServiceTransport:
"""Return the transport used by the client instance.
Returns:
MerchantCenterLinkServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def merchant_center_link_path(customer_id: str,merchant_center_id: str,) -> str:
"""Return a fully-qualified merchant_center_link string."""
return "customers/{customer_id}/merchantCenterLinks/{merchant_center_id}".format(customer_id=customer_id, merchant_center_id=merchant_center_id, )
@staticmethod
def parse_merchant_center_link_path(path: str) -> Dict[str,str]:
"""Parse a merchant_center_link path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/merchantCenterLinks/(?P<merchant_center_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, MerchantCenterLinkServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the merchant center link service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.MerchantCenterLinkServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, MerchantCenterLinkServiceTransport):
# transport is a MerchantCenterLinkServiceTransport instance.
if credentials:
raise ValueError('When providing a transport instance, '
'provide its credentials directly.')
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = MerchantCenterLinkServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def list_merchant_center_links(self,
request: Union[merchant_center_link_service.ListMerchantCenterLinksRequest, dict] = None,
*,
customer_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> merchant_center_link_service.ListMerchantCenterLinksResponse:
r"""Returns Merchant Center links available for this customer.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v8.services.types.ListMerchantCenterLinksRequest, dict]):
The request object. Request message for
[MerchantCenterLinkService.ListMerchantCenterLinks][google.ads.googleads.v8.services.MerchantCenterLinkService.ListMerchantCenterLinks].
customer_id (:class:`str`):
Required. The ID of the customer onto
which to apply the Merchant Center link
list operation.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.services.types.ListMerchantCenterLinksResponse:
Response message for
[MerchantCenterLinkService.ListMerchantCenterLinks][google.ads.googleads.v8.services.MerchantCenterLinkService.ListMerchantCenterLinks].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a merchant_center_link_service.ListMerchantCenterLinksRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, merchant_center_link_service.ListMerchantCenterLinksRequest):
request = merchant_center_link_service.ListMerchantCenterLinksRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_merchant_center_links]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('customer_id', request.customer_id),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_merchant_center_link(self,
request: Union[merchant_center_link_service.GetMerchantCenterLinkRequest, dict] = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> merchant_center_link.MerchantCenterLink:
r"""Returns the Merchant Center link in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v8.services.types.GetMerchantCenterLinkRequest, dict]):
The request object. Request message for
[MerchantCenterLinkService.GetMerchantCenterLink][google.ads.googleads.v8.services.MerchantCenterLinkService.GetMerchantCenterLink].
resource_name (:class:`str`):
Required. Resource name of the
Merchant Center link.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.MerchantCenterLink:
A data sharing connection, proposed
or in use, between a Google Ads Customer
and a Merchant Center account.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a merchant_center_link_service.GetMerchantCenterLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, merchant_center_link_service.GetMerchantCenterLinkRequest):
request = merchant_center_link_service.GetMerchantCenterLinkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_merchant_center_link]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('resource_name', request.resource_name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def mutate_merchant_center_link(self,
request: Union[merchant_center_link_service.MutateMerchantCenterLinkRequest, dict] = None,
*,
customer_id: str = None,
operation: merchant_center_link_service.MerchantCenterLinkOperation = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> merchant_center_link_service.MutateMerchantCenterLinkResponse:
r"""Updates status or removes a Merchant Center link.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `FieldMaskError <>`__
`HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__
`RequestError <>`__
Args:
request (Union[google.ads.googleads.v8.services.types.MutateMerchantCenterLinkRequest, dict]):
The request object. Request message for
[MerchantCenterLinkService.MutateMerchantCenterLink][google.ads.googleads.v8.services.MerchantCenterLinkService.MutateMerchantCenterLink].
customer_id (:class:`str`):
Required. The ID of the customer
being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operation (:class:`google.ads.googleads.v8.services.types.MerchantCenterLinkOperation`):
Required. The operation to perform on
the link
This corresponds to the ``operation`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.services.types.MutateMerchantCenterLinkResponse:
Response message for Merchant Center
link mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operation]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a merchant_center_link_service.MutateMerchantCenterLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, merchant_center_link_service.MutateMerchantCenterLinkRequest):
request = merchant_center_link_service.MutateMerchantCenterLinkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operation is not None:
request.operation = operation
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_merchant_center_link]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('customer_id', request.customer_id),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
__all__ = (
'MerchantCenterLinkServiceClient',
)
| []
| []
| [
"GOOGLE_API_USE_MTLS_ENDPOINT",
"GOOGLE_API_USE_CLIENT_CERTIFICATE"
]
| [] | ["GOOGLE_API_USE_MTLS_ENDPOINT", "GOOGLE_API_USE_CLIENT_CERTIFICATE"] | python | 2 | 0 | |
source/parser.py | from bs4 import BeautifulSoup
import os
import csv
import time
import datetime
#returns list of files in the directory
def getAllFiles(dir):
files = []
listing = os.listdir(dir)
for fileName in listing:
files.append(fileName)
return files
#format and get output folder name
def getOutputFolder(folder, files):
#get quarter
filename = getCleanData(folder, files[0])[0][0]
quarter = filename[:filename.find('_')]
#assign output folders
outputFolder = "./Data/" + quarter + "/"
if (os.environ.get('cronFlag') == "True"):
outputFolder = "./DataCrontab/" + quarter + "/"
#create unexisted folders
if not os.path.exists(os.path.dirname(outputFolder)):
os.makedirs(os.path.dirname(outputFolder))
return outputFolder
#list for cleaned data
def getCleanData(folder, file):
cleanedData = []
#create bs object
soup = BeautifulSoup(open(folder + file), "html.parser")
#find all table bodies and store them in list
rows = soup.find_all('tr', {'class':'CourseInfoRow'})
#iterate over list and create our raw results
data = []
for row in rows:
colData = []
cols = row.find_all('td')
for col in cols:
for string in col.stripped_strings:
colData.append(string)
data.append(colData)
#getting significant information
for course in data:
cleanedDataTemp = []
#append filename for information about Quarter and Year
cleanedDataTemp.append(str(file))
cleanedDataTemp.append(course[0])
cleanedDataTemp.append(course[course.index('College:') + 1])
cleanedDataTemp.append(course[course.index('Units:') + 1])
cleanedDataTemp.append(course[course.index('Grading:') + 1])
#grab last 5 indices containing important information
for index in range(len(course) - 4, len(course)):
cleanedDataTemp.append(course[index])
#add the cleaned course data to cleanedData
if cleanedDataTemp[len(cleanedDataTemp) - 2] == 'T B A':
cleanedDataTemp[len(cleanedDataTemp) - 2] = 'N/A'
if cleanedDataTemp[len(cleanedDataTemp) - 3] == 'T B A':
cleanedDataTemp[len(cleanedDataTemp) - 3] = 'N/A'
cleanedDataTemp[len(cleanedDataTemp) - 4] = 'N/A'
elif ('am' not in cleanedDataTemp[len(cleanedDataTemp) - 3] and 'pm' not in cleanedDataTemp[len(cleanedDataTemp) - 3]):
cleanedDataTemp = []
cleanedData.append(cleanedDataTemp)
print('parsed: ' + file)
return cleanedData
#get start time
start = time.time()
#directory containing html files to parse ***EDIT THIS PATH IF USED ON A DIFFERENT MACHINE***
inputFolder = './Output/'
filesToParse = getAllFiles(inputFolder)
#format output directory
outputFolder = getOutputFolder(inputFolder, filesToParse)
#output to CSV file:
now = datetime.datetime.now()
with open(outputFolder + "data_" + str(now.month) + "_" + str(now.day) + "_" + str(now.hour) + ".csv", "w+", newline="") as f:
writer = csv.writer(f)
for file in filesToParse:
writer.writerows(getCleanData(inputFolder, file))
end = time.time()
#print elapsed time
print('Time to parse: ' + str(end - start))
#print out CSV file (testing purposes)
#os.system('column -s, -t < ./Data/data.csv | less -#2 -N -S')
#Print out raw results:
"""
for course in data:
i = 0
for x in course:
print(str(i) + ": ")
print(x)
i+=1
print('\n')
""" | []
| []
| [
"cronFlag"
]
| [] | ["cronFlag"] | python | 1 | 0 | |
peer-finder.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// A small utility program to lookup hostnames of endpoints in a service.
package main
import (
"context"
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"os/signal"
"regexp"
"strings"
"syscall"
"time"
"github.com/kballard/go-shellquote"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"
)
const (
pollPeriod = 1 * time.Second
)
type AddressType string
const (
AddressTypeDNS AddressType = "DNS"
// Uses spec.podIP as address for db pods.
AddressTypeIP AddressType = "IP"
// Uses first IPv4 address from spec.podIP, spec.podIPs fields as address for db pods.
AddressTypeIPv4 AddressType = "IPv4"
// Uses first IPv6 address from spec.podIP, spec.podIPs fields as address for db pods.
AddressTypeIPv6 AddressType = "IPv6"
)
var (
kc kubernetes.Interface
controller *Controller
log = klogr.New().WithName("peer-finder")
)
var (
masterURL = flag.String("master", "", "The address of the Kubernetes API server (overrides any value in kubeconfig)")
kubeconfigPath = flag.String("kubeconfig", "", "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
hostsFilePath = flag.String("hosts-file", "/etc/hosts", "Path to hosts file.")
onChange = flag.String("on-change", "", "Script to run on change, must accept a new line separated list of peers via stdin.")
onStart = flag.String("on-start", "", "Script to run on start, must accept a new line separated list of peers via stdin.")
addrType = flag.String("address-type", string(AddressTypeDNS), "Address type used to communicate with peers. Possible values: DNS, IP, IPv4, IPv6.")
svc = flag.String("service", "", "Governing service responsible for the DNS records of the domain this pod is in.")
namespace = flag.String("ns", "", "The namespace this pod is running in. If unspecified, the POD_NAMESPACE env var is used.")
domain = flag.String("domain", "", "The Cluster Domain which is used by the Cluster, if not set tries to determine it from /etc/resolv.conf file.")
selector = flag.String("selector", "", "The selector is used to select the pods whose ip will use to form peers")
)
func lookupDNS(svcName string) (sets.String, error) {
endpoints := sets.NewString()
_, srvRecords, err := net.LookupSRV("", "", svcName)
if err != nil {
return endpoints, err
}
for _, srvRecord := range srvRecords {
// The SRV records ends in a "." for the root domain
ep := fmt.Sprintf("%v", srvRecord.Target[:len(srvRecord.Target)-1])
endpoints.Insert(ep)
}
return endpoints, nil
}
func lookupHostIPs(hostName string) (sets.String, error) {
ips := sets.NewString()
hostIPs, err := net.LookupIP(hostName)
if err != nil {
return nil, err
}
for _, hostIP := range hostIPs {
ips.Insert(hostIP.String())
}
return ips, nil
}
func shellOut(script string, peers, hostIPs sets.String, fqHostname string) error {
// add extra newline at the end to ensure end of line for bash read command
sendStdin := strings.Join(peers.List(), "\n") + "\n"
fields, err := shellquote.Split(script)
if err != nil {
return err
}
if len(fields) == 0 {
return fmt.Errorf("missing command: %s", script)
}
log.Info("exec", "command", fields[0], "stdin", sendStdin)
cmd := exec.Command(fields[0], fields[1:]...)
cmd.Stdin = strings.NewReader(sendStdin)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
info, err := retrieveHostInfo(fqHostname, hostIPs, peers)
if err != nil {
return err
}
envs := sets.NewString(os.Environ()...)
envs.Insert("HOST_ADDRESS=" + info.HostAddr) // fqdn, ipv4, ipv6
envs.Insert("HOST_ADDRESS_TYPE=" + string(info.HostAddrType)) // DNS, IPv4, IPv6
// WARNING: Potentially overwrites the POD_IP from container env before passing to script in case of IPv4 or IPv6 in a dual stack cluster
envs.Insert("POD_IP=" + info.PodIP) // used for whitelist
envs.Insert("POD_IP_TYPE=" + string(info.PodIPType)) // IPv4, IPv6
cmd.Env = envs.List()
err = cmd.Run()
if err != nil {
return fmt.Errorf("execution failed of script=%s. reason:%v", script, err)
}
return nil
}
type HostInfo struct {
// fqdn, ipv4, ipv6
HostAddr string
// DNS, IPv4, IPv6
HostAddrType AddressType
// used for whitelist
// WARNING: Potentially overwrites the POD_IP from container env before passing to script in case of IPv4 or IPv6 in a dual stack cluster
PodIP string
// IPv4 or IPv6
PodIPType AddressType
}
func retrieveHostInfo(fqHostname string, hostIPs, peers sets.String) (*HostInfo, error) {
var info HostInfo
var err error
switch AddressType(*addrType) {
case AddressTypeDNS:
info.HostAddr = fqHostname
info.HostAddrType = AddressTypeDNS
info.PodIP = os.Getenv("POD_IP") // set using Downward api
info.PodIPType, err = IPType(info.PodIP)
if err != nil {
return nil, err
}
case AddressTypeIP:
hostAddrs := peers.Intersection(hostIPs).List()
if len(hostAddrs) == 0 {
return nil, fmt.Errorf("none of the hostIPs %q found in peers %q", strings.Join(hostIPs.List(), ","), strings.Join(peers.List(), ","))
}
info.HostAddr = hostAddrs[0]
info.HostAddrType, err = IPType(info.HostAddr)
if err != nil {
return nil, err
}
info.PodIP = info.HostAddr
info.PodIPType = info.HostAddrType
case AddressTypeIPv4:
hostAddrs := peers.Intersection(hostIPs).List()
if len(hostAddrs) == 0 {
return nil, fmt.Errorf("none of the hostIPs %q found in peers %q", strings.Join(hostIPs.List(), ","), strings.Join(peers.List(), ","))
}
info.HostAddr = hostAddrs[0]
info.HostAddrType = AddressTypeIPv4
info.PodIP = info.HostAddr
info.PodIPType = info.HostAddrType
case AddressTypeIPv6:
hostAddrs := peers.Intersection(hostIPs).List()
if len(hostAddrs) == 0 {
return nil, fmt.Errorf("none of the hostIPs %q found in peers %q", strings.Join(hostIPs.List(), ","), strings.Join(peers.List(), ","))
}
info.HostAddr = hostAddrs[0]
info.HostAddrType = AddressTypeIPv6
info.PodIP = info.HostAddr
info.PodIPType = info.HostAddrType
}
return &info, nil
}
func IPType(s string) (AddressType, error) {
ip := net.ParseIP(s)
if ip == nil {
return "", fmt.Errorf("%s is not a valid IP", s)
}
if strings.ContainsRune(s, ':') {
return AddressTypeIPv6, nil
}
return AddressTypeIPv4, nil
}
func forwardSigterm() <-chan struct{} {
shutdownHandler := make(chan os.Signal, 1)
ctx, cancel := context.WithCancel(context.Background())
signal.Notify(shutdownHandler, syscall.SIGTERM)
go func() {
<-shutdownHandler
pgid, err := syscall.Getpgid(os.Getpid())
if err != nil {
log.Error(err, "failed to retrieve pgid for process", "pid", os.Getpid())
} else {
log.Info("sending SIGTERM", "pgid", pgid)
err = syscall.Kill(-pgid, syscall.SIGTERM)
if err != nil {
log.Error(err, "failed to send SIGTERM", "pgid", pgid)
}
}
cancel()
fmt.Println("waiting for all child process to complete for SIGTERM")
<-shutdownHandler
}()
return ctx.Done()
}
func main() {
klog.InitFlags(nil)
_ = flag.Set("v", "3")
flag.Parse()
stopCh := forwardSigterm()
// TODO: Exit if there's no on-change?
if err := run(stopCh); err != nil {
log.Error(err, "peer finder exiting")
}
klog.Flush()
log.Info("Block until Kubernetes sends SIGKILL")
select {}
}
func run(stopCh <-chan struct{}) error {
ns := *namespace
if ns == "" {
ns = os.Getenv("POD_NAMESPACE")
}
hostname, err := os.Hostname()
if err != nil {
return fmt.Errorf("failed to get hostname: %s", err)
}
var domainName string
// If domain is not provided, try to get it from resolv.conf
if *domain == "" {
resolvConfBytes, err := ioutil.ReadFile("/etc/resolv.conf")
resolvConf := string(resolvConfBytes)
if err != nil {
return fmt.Errorf("unable to read /etc/resolv.conf")
}
var re *regexp.Regexp
if ns == "" {
// Looking for a domain that looks like with *.svc.**
re, err = regexp.Compile(`\A(.*\n)*search\s{1,}(.*\s{1,})*(?P<goal>[a-zA-Z0-9-]{1,63}.svc.([a-zA-Z0-9-]{1,63}\.)*[a-zA-Z0-9]{2,63})`)
} else {
// Looking for a domain that looks like svc.**
re, err = regexp.Compile(`\A(.*\n)*search\s{1,}(.*\s{1,})*(?P<goal>svc.([a-zA-Z0-9-]{1,63}\.)*[a-zA-Z0-9]{2,63})`)
}
if err != nil {
return fmt.Errorf("failed to create regular expression: %v", err)
}
groupNames := re.SubexpNames()
result := re.FindStringSubmatch(resolvConf)
for k, v := range result {
if groupNames[k] == "goal" {
if ns == "" {
// Domain is complete if ns is empty
domainName = v
} else {
// Need to convert svc.** into ns.svc.**
domainName = ns + "." + v
}
break
}
}
log.Info("determined", "domain", domainName)
} else {
domainName = strings.Join([]string{ns, "svc", *domain}, ".")
}
if (*selector == "" && *svc == "") || domainName == "" || (*onChange == "" && *onStart == "") {
return fmt.Errorf("incomplete args, require -on-change and/or -on-start, -service and -ns or an env var for POD_NAMESPACE")
}
if *selector != "" {
config, err := clientcmd.BuildConfigFromFlags(*masterURL, *kubeconfigPath)
if err != nil {
return fmt.Errorf("could not get Kubernetes config: %s", err)
}
kc, err = kubernetes.NewForConfig(config)
if err != nil {
return err
}
RunHostAliasSyncer(kc, ns, *selector, *addrType, stopCh)
}
myName := strings.Join([]string{hostname, *svc, domainName}, ".")
hostIPs, err := lookupHostIPs(hostname)
if err != nil {
return fmt.Errorf("failed to get ips from host %v", err)
}
script := *onStart
if script == "" {
script = *onChange
log.Info(fmt.Sprintf("no on-start supplied, on-change %q will be applied on start.", script))
}
for peers := sets.NewString(); script != ""; time.Sleep(pollPeriod) {
var newPeers sets.String
if *selector != "" {
newPeers, err = controller.listPodsIP()
if err != nil {
return err
}
if newPeers.Equal(peers) || !newPeers.HasAny(hostIPs.List()...) {
log.Info("have not found myself in list yet.", "hostname", myName, "hosts in list", strings.Join(newPeers.List(), ", "))
continue
}
} else {
newPeers, err = lookupDNS(*svc)
if err != nil {
log.Info(err.Error())
continue
}
if newPeers.Equal(peers) || !newPeers.Has(myName) {
log.Info("have not found myself in list yet.", "hostname", myName, "hosts in list", strings.Join(newPeers.List(), ", "))
continue
}
}
log.Info("peer list updated", "was", peers.List(), "now", newPeers.List())
// add extra newline at the end to ensure end of line for bash read command
err = shellOut(script, newPeers, hostIPs, myName)
if err != nil {
return err
}
peers = newPeers
script = *onChange
}
return nil
}
| [
"\"POD_IP\"",
"\"POD_NAMESPACE\""
]
| []
| [
"POD_NAMESPACE",
"POD_IP"
]
| [] | ["POD_NAMESPACE", "POD_IP"] | go | 2 | 0 | |
src/api_rest/Gui/Main/ClientGui.py | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from api_rest.Gui.Main.MainWindow import *
import json
import requests
import os.path
import pandas as pd
import sys
from matplotlib import pyplot as plt
import os
os.environ['no_proxy'] = '127.0.0.1,localhost'
__author__ = 'Santiago Peñate Vera'
# shit to be done for this to work in windows
proxyDict = {'no': 'pass',}
"""
This class is the handler of the main gui of GridCal.
"""
########################################################################################################################
# Main Window
########################################################################################################################
def get_list_model(lst, checks=False):
"""
Pass a list to a list model
"""
list_model = QStandardItemModel()
if lst is not None:
if not checks:
for val in lst:
# for the list model
item = QStandardItem(str(val))
item.setEditable(False)
list_model.appendRow(item)
else:
for val in lst:
# for the list model
item = QStandardItem(str(val))
item.setEditable(False)
item.setCheckable(True)
item.setCheckState(QtCore.Qt.Checked)
list_model.appendRow(item)
return list_model
class MainGUI(QMainWindow):
def __init__(self, parent=None, url='http://0.0.0.0:5000'):
"""
@param parent:
"""
# create main window
QWidget.__init__(self, parent)
self.ui = Ui_mainWindow()
self.ui.setupUi(self)
self.url = url
# Buttons
self.ui.refresh_items_pushButton.clicked.connect(self.update_list)
self.ui.send_pushButton.clicked.connect(self.send)
# call action
self.ui.url_lineEdit.setText(self.url)
try:
self.get_grid_name()
self.update()
self.update_voltages()
except:
self.msg('Could not connect to ' + self.url, 'Connection')
def msg(self, text, title="Warning"):
"""
Message box
:param text: Text to display
:param title: Name of the window
"""
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(text)
# msg.setInformativeText("This is additional information")
msg.setWindowTitle(title)
# msg.setDetailedText("The details are as follows:")
msg.setStandardButtons(QMessageBox.Ok)
retval = msg.exec_()
def get_grid_name(self):
"""
Get the grid name
Returns:
"""
response = requests.get(self.url + '/grid_name', proxies=proxyDict)
if response.status_code == 200:
jData = json.loads(response.content.decode('UTF-8'))
print(jData)
self.ui.status_label.setText(str(jData))
else:
print('error', response)
def update_list(self):
"""
Update the values
Returns:
"""
# pick URL from GUI
self.url = self.ui.url_lineEdit.text().strip()
response = requests.get(self.url + '/loads_list', proxies=proxyDict)
if response.status_code == 200:
jData = json.loads(response.content.decode('UTF-8'))
lst = jData['loads']
mdl = get_list_model(lst)
self.ui.items_listView.setModel(mdl)
print(jData)
else:
print('error', response)
def send(self):
"""
Send data
Returns:
"""
if len(self.ui.items_listView.selectedIndexes()) > 0:
idx = self.ui.items_listView.selectedIndexes()[0].row()
mx = self.ui.max_val_doubleSpinBox.value()
val = self.ui.value_horizontalSlider.value() / 100.0
P = mx * val
Q = 0.8 * P
data = {'idx': idx, 'P': P, 'Q': Q}
response = requests.post(self.url + '/set_load', json=data, proxies=proxyDict)
if response.status_code == 200:
jData = json.loads(response.content.decode('UTF-8'))
self.ui.status_label.setText(str(jData))
print(jData)
self.update_voltages()
else:
print('error', response)
self.ui.status_label.setText('Response: ' + str(response))
def update_voltages(self):
"""
Returns:
"""
response = requests.get(self.url + '/voltages', proxies=proxyDict)
if response.status_code == 200:
jData = json.loads(response.content.decode('UTF-8'))
voltages = jData['val']
print(voltages)
self.ui.status_label.setText(str(response.status_code))
# clear the plot display
self.ui.resultsPlot.clear()
# get the plot axis
ax = self.ui.resultsPlot.get_axis()
df = pd.DataFrame(data=voltages)
df.fillna(0, inplace=True)
df.plot(ax=ax, kind='bar')
ax.axhline(0.9, c='r', alpha=0.7)
ax.axhline(1.1, c='r', alpha=0.7)
self.ui.resultsPlot.redraw()
else:
print('error', response)
def run():
"""
Main function to run the GUI
:return:
"""
print('loading...')
app = QApplication(sys.argv)
# url = 'http://192.168.1.103:5000'
# url = 'http://192.168.197.22:5000'
url = 'http://127.0.0.1:5000'
window = MainGUI(url=url)
window.resize(1.61 * 700.0, 700.0) # golden ratio :)
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
run()
| []
| []
| [
"no_proxy"
]
| [] | ["no_proxy"] | python | 1 | 0 | |
misc_dev/Blender_Screenwriter_original.py | bl_info = {
"name": "Blender Screenwriter with Fountain Live Preview",
"author": "Tintwotin, Andrea Monzini, Fountain Module by Colton J. Provias & Manuel Senfft, Export Module by Martin Vilcans. Fountain Format by Nima Yousefi & John August",
"version": (0, 1),
"blender": (2, 81, 0),
"location": "Text Editor > Sidebar",
"description": "Adds functions for editing of Fountain file with live screenplay preview",
"warning": "",
"wiki_url": "",
"category": "Text Editor",
}
import bpy
import textwrap
import subprocess
import os
import sys
import fountain
from bpy.props import IntProperty, BoolProperty, PointerProperty, StringProperty, EnumProperty
from pathlib import Path
from bpy_extras.io_utils import ExportHelper
from bpy.types import Operator
def get_mergables(areas):
xs, ys = dict(), dict()
for a in areas:
xs[a.x] = a
ys[a.y] = a
for area in reversed(areas):
tx = area.x + area.width + 1
ty = area.y + area.height + 1
if tx in xs and xs[tx].y == area.y and xs[tx].height == area.height:
return area, xs[tx]
elif ty in ys and ys[ty].x == area.x and ys[ty].width == area.width:
return area, ys[ty]
return None, None
def teardown(context):
while len(context.screen.areas) > 1:
a, b = get_mergables(context.screen.areas)
if a and b:
bpy.ops.screen.area_join(cursor=(a.x, a.y)) #,max_x=b.x,max_y=b.y)
area = context.screen.areas[0]
region = area.regions[0]
blend_data = context.blend_data
bpy.ops.screen.screen_full_area(
dict(
screen=context.screen,
window=context.window,
region=region,
area=area,
blend_data=blend_data))
bpy.ops.screen.back_to_previous(
dict(
screen=context.screen,
window=context.window,
region=region,
area=area,
blend_data=blend_data))
def split_area(window,
screen,
region,
area,
xtype,
direction="VERTICAL",
factor=0.5,
mouse_x=-100,
mouse_y=-100):
beforeptrs = set(list((a.as_pointer() for a in screen.areas)))
bpy.ops.screen.area_split(
dict(region=region, area=area, screen=screen, window=window),
direction=direction,
factor=factor)
afterptrs = set(list((a.as_pointer() for a in screen.areas)))
newareaptr = list(afterptrs - beforeptrs)
newarea = area_from_ptr(newareaptr[0])
newarea.type = xtype
return newarea
def area_from_ptr(ptr):
for screen in bpy.data.screens:
for area in screen.areas:
if area.as_pointer() == ptr:
return area
class SCREENWRITER_PT_panel(bpy.types.Panel):
"""Preview fountain script as formatted screenplay"""
bl_label = "Screenwriter"
bl_space_type = 'TEXT_EDITOR'
bl_region_type = 'UI'
bl_category = "Text"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.operator("text.dual_view")
layout.operator("scene.preview_fountain")
repl = context.scene.text_replace
layout.prop(repl, "enabled")
layout.operator("text.scenes_to_strips")
class SCREENWRITER_OT_preview_fountain(bpy.types.Operator):
'''Updates the preview'''
bl_idname = "scene.preview_fountain"
bl_label = "Refresh"
@classmethod
def poll(cls, context):
space = bpy.context.space_data
filepath = bpy.context.area.spaces.active.text.filepath
if filepath.strip() == "": return False
return ((space.type == 'TEXT_EDITOR')
and Path(filepath).suffix == ".fountain")
def execute(self, context):
space = bpy.context.space_data
dir = os.path.dirname(bpy.data.filepath)
if not dir in sys.path:
sys.path.append(dir)
current_text = os.path.basename(bpy.context.space_data.text.filepath)
if current_text.strip() == "": return
fountain_script = bpy.context.area.spaces.active.text.as_string()
if fountain_script.strip() == "": return {"CANCELLED"}
F = fountain.Fountain(fountain_script)
filename = "Preview.txt"
if filename not in bpy.data.texts:
bpy.data.texts.new(filename) # New document in Text Editor
else:
bpy.data.texts[filename].clear() # Clear existing text
# Get number of header lines
contents = fountain_script.strip().replace('\r', '')
contents_has_metadata = ':' in contents.splitlines()[0]
contents_has_body = '\n\n' in contents
if contents_has_metadata and contents_has_body:
lines = fountain_script.split('\n\n')
lines = lines[0].splitlines()
current_line = bpy.data.texts[current_text].current_line_index - len(
lines) - 1
# elif contents_has_metadata and not contents_has_body:
# self._parse_head(contents.splitlines())
else:
current_line = bpy.data.texts[current_text].current_line_index
current_character = bpy.data.texts[current_text].current_character
jump_to_line = 0
margin = " " * 4
document_width = 60 + len(margin)
action_wrapper = textwrap.TextWrapper(width=document_width)
dialogue_wrapper = textwrap.TextWrapper(
width=37 + int(len(margin) / 2))
dialogue_indentation = 13 + int(len(margin) / 2)
cursor_indentation = margin
add_lines = 0
add_characters = current_character
cursor_indentation_actual = ""
text = bpy.context.area.spaces.active.text
current_line_length = len(text.current_line.body)
add_lines_actual = 0
add_characters_actual = 0
# This is the way to use title stuff
# for meta in iter(F.metadata.items()):
# if meta[0] == 'title':
# bpy.data.texts[filename].write((str(meta[1])).center(document_width)+chr(10))
add_lines = 0
for fc, f in enumerate(F.elements):
add_lines = -1
#add_lines = 0 #int(document_width/current_character)
add_characters = current_character
if f.element_type == 'Scene Heading':
if str(f.scene_number) != "": f.scene_number = f.scene_number+ " "
bpy.data.texts[filename].write(
margin + f.scene_number+ f.scene_abbreviation.upper() + " " + f.element_text.upper() +
chr(10))
cursor_indentation = margin
elif f.element_type == 'Action' and f.is_centered == False:
action = f.element_text
action_list = action_wrapper.wrap(text=action)
add_action_lines = 0
for action in action_list:
bpy.data.texts[filename].write(margin + action + chr(10))
cursor_indentation = margin
elif f.element_type == 'Action' and f.is_centered == True:
bpy.data.texts[filename].write(
margin + f.element_text.center(document_width) + chr(10))
cursor_indentation = margin + ("_" * (int(
(document_width / 2 - len(f.element_text) / 2)) - 2))
elif f.element_type == 'Character':
bpy.data.texts[filename].write(
margin + f.element_text.center(document_width).upper() +
chr(10)) # .upper()
cursor_indentation = margin + ("_" * ((f.element_text.center(
document_width)).find(f.element_text)))
elif f.element_type == 'Parenthetical':
bpy.data.texts[filename].write(
margin + f.element_text.center(document_width).lower() +
chr(10)) # .lower()
cursor_indentation = margin + ("_" * int(
(document_width / 2 - len(f.element_text) / 2)))
elif f.element_type == 'Dialogue':
dialogue = f.element_text
current_character
line_list = dialogue_wrapper.wrap(text=dialogue)
for dialogue in line_list:
bpy.data.texts[filename].write(margin + (
" " * dialogue_indentation + dialogue) + chr(10))
# if add_characters >= len(dialogue):
# add_characters = add_characters-len(dialogue)
# add_lines += 1
cursor_indentation = margin + (" " * dialogue_indentation
) # + (" "*add_characters)
elif f.element_type == 'Synopsis': # Ignored by Fountain formatting
bpy.data.texts[filename].write(chr(10))
elif f.element_type == 'Page Break':
bpy.data.texts[filename].write(
chr(10) + margin + ("_" * document_width) + chr(10))
elif f.element_type == 'Boneyard': # Ignored by Fountain formatting
bpy.data.texts[filename].write(chr(10))
elif f.element_type == 'Comment': # Ignored by Fountain formatting
bpy.data.texts[filename].write(chr(10))
elif f.element_type == 'Section Heading': # Ignored by Fountain formatting
bpy.data.texts[filename].write(chr(10))
elif f.element_type == 'Transition':
bpy.data.texts[filename].write(
margin + f.element_text.rjust(document_width).upper() + chr(10))
cursor_indentation = margin + ("_" * (
document_width - len(f.element_text)))
elif f.element_type == 'Empty Line':
bpy.data.texts[filename].write(chr(10))
if current_line >= f.original_line and f.original_line != 0: #current_line
jump_to_line = bpy.data.texts[filename].current_line_index
cursor_indentation_actual = cursor_indentation
add_lines_actual = add_lines
line = jump_to_line - 1
if line < 0: line = 0
bpy.data.texts[filename].current_line_index = line
cur = current_character + len(cursor_indentation_actual)
bpy.data.texts[filename].select_set(line, cur, line, cur)
return {"FINISHED"}
class TEXT_OT_dual_view(bpy.types.Operator):
'''Toggles screenplay preview'''
bl_idname = "text.dual_view"
bl_label = "Preview"
@classmethod
def poll(cls, context):
space = bpy.context.space_data
filepath = bpy.context.area.spaces.active.text.filepath
if filepath.strip() == "": return False
return ((space.type == 'TEXT_EDITOR')
and Path(filepath).suffix == ".fountain")
original_area = None
def execute(self, context):
main_scene = bpy.context.scene
count = 0
original_type = bpy.context.area.type
# # setting font (on Windows) not working
# try:
# for a in bpy.context.screen.areas:
# if a.type == 'PREFERENCES':
# bpy.context.area.type ="PREFERENCES"
# bpy.context.preferences.view.font_path_ui_mono("C:\\Windows\\Fonts\\Courier.ttf")
# break
# except RuntimeError as ex:
# error_report = "\n".join(ex.args)
# print("Caught error:", error_report)
# #pass
bpy.context.area.type = original_type
self.original_area = context.area
original = context.copy()
thisarea = context.area
otherarea = None
tgxvalue = thisarea.x + thisarea.width + 1
thistype = context.area.type
arealist = list(context.screen.areas)
filename = "Preview.txt"
if filename not in bpy.data.texts:
bpy.ops.scene.preview_fountain()
fountain_script = bpy.context.area.spaces.active.text.as_string()
if fountain_script.strip() == "":
msg = "Text-block can't be empty!"
self.report({'INFO'}, msg)
return {"CANCELLED"}
for area in context.screen.areas:
if area == thisarea:
continue
elif area.x == tgxvalue and area.y == thisarea.y:
otherarea = area
break
if otherarea: #leave trim-mode
# The 2.81 API doesn't have an option for automatic joining.
bpy.ops.screen.area_join(
'INVOKE_DEFAULT',
cursor=(otherarea.x, otherarea.y + int(otherarea.height / 2)))
# normal settings
bpy.ops.screen.screen_full_area()
bpy.ops.screen.screen_full_area()
override = context.copy()
area = self.original_area
override['area'] = area
override['space_data'] = area.spaces.active
return {"FINISHED"}
else: # enter dual-mode
areax = None
#split
window = context.window
region = context.region
screen = context.screen
main = context.area
main.type = "TEXT_EDITOR"
ctrlPanel = bpy.ops.screen.area_split(
direction="VERTICAL") #, factor=0.7)
#settings for preview 2.
bpy.ops.screen.screen_full_area()
bpy.ops.screen.screen_full_area()
override = original
area = self.original_area
override['area'] = area
override['space_data'] = area.spaces.active
override['space_data'].text = bpy.data.texts['Preview.txt']
override['space_data'].show_region_ui = False
override['space_data'].show_region_header = False
override['space_data'].show_region_footer = False
override['space_data'].show_line_numbers = False
override['space_data'].show_syntax_highlight = False
override['space_data'].show_word_wrap = False
for area in context.screen.areas:
if area not in arealist:
areax = area
break
if areax:
areax.type = thistype
return {"FINISHED"}
return {"CANCELLED"}
handler = None
def get_space(context):
for area in context.screen.areas:
if area.type == "TEXT_EDITOR":
return area.spaces.active
def text_handler(spc, context):
scene = bpy.context.scene
text = bpy.context.area.spaces.active.text
line = text.current_line.body
current_text = os.path.basename(bpy.context.space_data.text.filepath)
if current_text.strip() == "": return
current_character = bpy.data.texts[current_text].current_character
if not text:
return
if scene.last_line is None and scene.last_line_index != text.current_line_index:
scene.last_line = line
scene.last_line_index = text.current_line_index
if scene.last_character is None: # scene.last_character != current_character:
scene.last_character = current_character
if line != scene.last_line or len(line) != len(scene.last_line):
bpy.ops.scene.preview_fountain()
elif current_character != scene.last_character:
bpy.ops.scene.preview_fountain()
scene.last_line = line
scene.last_character = current_character
def redraw(context):
for window in context.window_manager.windows:
for area in window.screen.areas:
if area.type == 'TEXT_EDITOR':
area.tag_redraw()
def activate_handler(self, context):
global handler
spc = get_space(context)
if not spc:
return
enabled = context.scene.text_replace.enabled
if enabled:
handler = spc.draw_handler_add(text_handler, (
spc,
context,
), "WINDOW", "POST_PIXEL")
else:
if handler is not None:
spc.draw_handler_remove(handler, "WINDOW")
handler = None
class TextReplaceProperties(bpy.types.PropertyGroup):
enabled: BoolProperty(
name="Live Preview",
description="Enables live screenplay preview",
update=activate_handler,
default=False)
@classmethod
def poll(cls, context):
space = bpy.context.space_data
filepath = bpy.context.area.spaces.active.text.filepath
if filepath.strip() == "": return False
return ((space.type == 'TEXT_EDITOR')
and Path(filepath).suffix == ".fountain")
def execute(self, context):
return {"FINISHED"}
class SCREENWRITER_OT_export(Operator, ExportHelper):
"""Export Screenplay"""
bl_idname = "export.screenplay"
bl_label = "Export"
filename_ext = ""
filter_glob: StringProperty(
default="*.html;*.pdf;*.fdx",
options={'HIDDEN'},
maxlen=255,
)
# ("PDF", "pdf", "Exports pdf"), #not working currently
opt_exp: EnumProperty(
items=(("HTML", "Html", "Exports html"), ("PDF", "pdf", "Exports pdf"), ("FDX", "fdx", "Final Draft")),
name="Export Data Type",
description="Choose what format to export ",
default="HTML")
open_browser: BoolProperty(
name="Open in Browser",
description="Open exported html or pdf in browser",
default=True,
)
@classmethod
def poll(cls, context):
space = bpy.context.space_data
filepath = bpy.context.area.spaces.active.text.filepath
if filepath.strip() == "": return False
return ((space.type == 'TEXT_EDITOR')
and Path(filepath).suffix == ".fountain")
def execute(self, context):
return screenplay_export(context, self.filepath, self.opt_exp,
self.open_browser)
def screenwriter_menu_export(self, context):
self.layout.separator()
self.layout.operator(
SCREENWRITER_OT_export.bl_idname, text="Export Screenplay")
def screenplay_export(context, screenplay_filepath, opt_exp, open_browser):
import os
dir = os.path.dirname(bpy.data.filepath)
if not dir in sys.path:
sys.path.append(dir)
fountain_script = bpy.context.area.spaces.active.text.as_string()
if fountain_script.strip() == "": return {"CANCELLED"}
# screenplain
try:
import screenplain
except ImportError:
#Installing screenplain module (this is only required once)...
import urllib.request as urllib
import zipfile
import shutil
url = 'https://github.com/vilcans/screenplain/archive/0.8.0.zip'
home_url = bpy.utils.script_path_user() + "\\addons\\"
urllib.urlretrieve(url, home_url + 'screenplain-0.8.0.zip')
with zipfile.ZipFile(home_url + 'screenplain-0.8.0.zip', 'r') as z:
z.extractall(home_url)
target_dir = home_url
shutil.move(home_url + 'screenplain-0.8.0/screenplain', target_dir)
os.remove(home_url + 'screenplain-0.8.0.zip')
shutil.rmtree(home_url + 'screenplain-0.8.0')
import screenplain
import screenplain.parsers.fountain as fountain
from io import StringIO
import webbrowser
s = StringIO(fountain_script)
screenplay = fountain.parse(s)
output = StringIO()
if opt_exp == "HTML":
from screenplain.export.html import convert
convert(screenplay, output, bare=False)
if opt_exp == "FDX":
from screenplain.export.fdx import to_fdx
to_fdx(screenplay, output)
if opt_exp == "PDF":
from screenplain.export.pdf import to_pdf
to_pdf(screenplay, output)
sp_out = output.getvalue()
filename, extension = os.path.splitext(screenplay_filepath)
fileout_name = filename + "." + opt_exp.lower()
file = open(fileout_name, "w")
file.write(sp_out)
file.close()
if open_browser:
if opt_exp == "HTML" or opt_exp == "PDF":
webbrowser.open(fileout_name)
return {'FINISHED'}
class TEXT_OT_scenes_to_strips(bpy.types.Operator):
"""Convert screenplay data to scene and text strips"""
bl_idname = "text.scenes_to_strips"
bl_label = "Create Sequence"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
space = bpy.context.space_data
filepath = bpy.context.area.spaces.active.text.filepath
return ((space.type == 'TEXT_EDITOR') and
Path(filepath).suffix == ".fountain")
def execute(self, context):
fountain_script = bpy.context.area.spaces.active.text.as_string()
if fountain_script.strip() == "": return {"CANCELLED"}
F = fountain.Fountain(fountain_script)
if not bpy.context.scene.sequence_editor:
bpy.context.scene.sequence_editor_create()
addSceneChannel = 1
previous_time = 0
previous_line = 0
lines_pr_minute = 59
first_duration = 0
render = bpy.context.scene.render
fps = round((render.fps / render.fps_base), 3)
count = 0
f_collected = []
duration = 0
for fc, f in enumerate(F.elements):
if f.element_type == 'Scene Heading':
f_collected.append(f)
for fc, f in enumerate(f_collected):
if str(f.scene_number) != "": f.scene_number = f.scene_number+ " "
name = str(f.scene_number + f.element_text.title())
new_scene = bpy.data.scenes.new(name=name)
cam = bpy.data.cameras.new("Camera")
cam.lens = 35
cam_obj1 = bpy.data.objects.new("Camera", cam)
cam_obj1.location = (9.69, -10.85, 12.388)
cam_obj1.rotation_euler = (0.6799, 0, 0.8254)
new_scene.collection.objects.link(cam_obj1)
if fc == 0:
for ec, e in enumerate(f_collected):
if ec == fc + 1:
first_duration = int((((e.original_line)/lines_pr_minute)*60)*fps)
duration = first_duration
else:
for ec, e in enumerate(f_collected):
if ec == fc+1:
duration = int((((e.original_line - f.original_line)/lines_pr_minute)*60)*fps)
in_time = duration + previous_time
bpy.data.scenes[name].frame_start = 0
bpy.data.scenes[name].frame_end = duration
newScene=bpy.context.scene.sequence_editor.sequences.new_scene(f.element_text.title(), new_scene, addSceneChannel, previous_time)
bpy.context.scene.sequence_editor.sequences_all[newScene.name].scene_camera = bpy.data.objects[cam.name]
#bpy.context.scene.sequence_editor.sequences_all[newScene.name].animation_offset_start = 0
bpy.context.scene.sequence_editor.sequences_all[newScene.name].frame_final_end = in_time
bpy.context.scene.sequence_editor.sequences_all[newScene.name].frame_start = previous_time
previous_time = in_time
previous_line = f.original_line
bpy.ops.sequencer.set_range_to_strips()
characters_pr_minute = 900
for fc, f in enumerate(F.elements):
if f.element_type == 'Dialogue':
name = str(f.element_text)
duration = int(((len(f.original_content)/characters_pr_minute)*60)*fps)
in_time = int(((f.original_line/lines_pr_minute)*60)*fps)
text_strip = bpy.context.scene.sequence_editor.sequences.new_effect(
name=name,
type='TEXT',
channel=addSceneChannel+1,
frame_start=in_time,
frame_end=in_time + duration
)
text_strip.font_size = int(bpy.context.scene.render.resolution_y/18)
text_strip.text = str(name)
text_strip.use_shadow = True
text_strip.select = True
text_strip.wrap_width = 0.85
text_strip.location[1] = 0.10
text_strip.blend_type = 'ALPHA_OVER'
return {'FINISHED'}
def register():
bpy.utils.register_class(SCREENWRITER_PT_panel)
bpy.utils.register_class(SCREENWRITER_OT_preview_fountain)
bpy.utils.register_class(TEXT_OT_dual_view)
bpy.utils.register_class(SCREENWRITER_OT_export)
bpy.types.TEXT_MT_text.append(screenwriter_menu_export)
bpy.utils.register_class(TEXT_OT_scenes_to_strips)
bpy.types.Scene.last_character = IntProperty(default=0)
bpy.types.Scene.last_line = StringProperty(default="")
bpy.types.Scene.last_line_index = IntProperty(default=0)
bpy.utils.register_class(TextReplaceProperties)
bpy.types.Scene.text_replace = PointerProperty(type=TextReplaceProperties)
def unregister():
bpy.utils.unregister_class(SCREENWRITER_PT_panel)
bpy.utils.unregister_class(SCREENWRITER_OT_preview_fountain)
bpy.utils.unregister_class(TEXT_OT_dual_view)
bpy.utils.unregister_class(SCREENWRITER_OT_export)
bpy.types.TEXT_MT_text.remove(screenwriter_menu_export)
bpy.utils.unregister_class(TEXT_OT_scenes_to_strips)
del bpy.types.Scene.last_character
del bpy.types.Scene.last_line
del bpy.types.Scene.last_line_index
bpy.utils.unregister_class(TextReplaceProperties)
del bpy.types.Scene.text_replace
if __name__ == "__main__":
register()
| []
| []
| []
| [] | [] | python | null | null | null |
todolist_backend/todolist/wsgi.py | """
WSGI config for todolist project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "todolist.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
homeworker/wsgi.py | """
WSGI config for homeworker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'homeworker.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
acceptance/service_command_test.go | package acceptance
import (
"os"
"os/exec"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
)
var _ = Describe("CLI service command", func() {
var (
args []string
session *Session
)
BeforeEach(func() {
args = []string{"service"}
})
JustBeforeEach(func() {
var err error
command := exec.Command(pathToCLI, args...)
session, err = Start(command, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
})
When("--help is passed", func() {
BeforeEach(func() {
args = append(args, "--help")
})
It("displays help and exits 0", func() {
Eventually(session).Should(Exit(0))
Eventually(session).Should(Say("Usage:"))
Eventually(session).Should(Say(`sm \[OPTIONS\] service <list>`))
Eventually(session).Should(Say("\n"))
Eventually(session).Should(Say("The service command group lets you list the available services in the"))
Eventually(session).Should(Say("marketplace\\."))
})
})
Describe("list sub command", func() {
BeforeEach(func() {
args = append(args, "list")
})
When("--help is passed", func() {
BeforeEach(func() {
args = append(args, "--help")
})
It("displays help and exits 0", func() {
Eventually(session).Should(Exit(0))
Eventually(session).Should(Say("Usage:"))
Eventually(session).Should(Say(`sm \[OPTIONS\] service list`))
Eventually(session).Should(Say("\n"))
Eventually(session).Should(Say("List the services that are available in the marketplace\\."))
})
})
//TODO: -randomizeAllSpecs causes this test to flake due to shared k8s state
When("0 brokers are registered", func() {
It("displays 'No services found.' and exits 0", func() {
Eventually(session).Should(Exit(0))
Eventually(session).Should(Say("No services found\\."))
})
})
//TODO: -randomizeAllSpecs causes this test to flake due to shared k8s state
When("1 broker is registered", func() {
BeforeEach(func() {
brokerURL := os.Getenv("BROKER_URL")
brokerUsername := os.Getenv("BROKER_USERNAME")
brokerPassword := os.Getenv("BROKER_PASSWORD")
Expect(brokerURL).NotTo(Equal(""))
Expect(brokerUsername).NotTo(Equal(""))
Expect(brokerPassword).NotTo(Equal(""))
registerArgs := []string{"broker", "register",
"--name", "test-broker",
"--url", brokerURL,
"--username", brokerUsername,
"--password", brokerPassword}
command := exec.Command(pathToCLI, registerArgs...)
registerSession, err := Start(command, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
Eventually(registerSession).Should(Exit(0))
//TODO: Temporarily sleep until #164240938 is done.
time.Sleep(3 * time.Second)
})
AfterEach(func() {
deleteBrokers("test-broker")
})
It("displays services and plans for the broker", func() {
Eventually(session).Should(Exit(0))
Eventually(session).Should(Say("SERVICE\\s+PLANS\\s+BROKER\\s+DESCRIPTION"))
Eventually(session).Should(Say("overview-service\\s+simple, complex\\s+test-broker\\s+Provides an overview of any service instances and bindings that have been created by a platform"))
})
})
})
})
| [
"\"BROKER_URL\"",
"\"BROKER_USERNAME\"",
"\"BROKER_PASSWORD\""
]
| []
| [
"BROKER_USERNAME",
"BROKER_URL",
"BROKER_PASSWORD"
]
| [] | ["BROKER_USERNAME", "BROKER_URL", "BROKER_PASSWORD"] | go | 3 | 0 | |
main.go | /**
* Copyright 2019 Naoki.H
*
* golang HTTP Server
* for deployment k8s on GCP
*
*/
// [Start all]
package main
import (
"fmt"
"log"
"net/http"
"os"
)
func main() {
// use PORT enviroment variable, default to 8080
port := "8080"
if fromEnv := os.Getenv("PORT"); fromEnv != "" {
port = fromEnv
}
// register hello function to handle all requests
server := http.NewServeMux()
server.HandleFunc("/", hello)
// start the web server on port and accept requests
log.Printf("Server listening on port %s", port)
err := http.ListenAndServe(":"+port, server)
log.Fatal(err)
}
// hello responds to the request with a plain-text "Hello, world!" message.
func hello(w http.ResponseWriter, r *http.Request) {
log.Printf("Serving request: %s", r.URL.Path)
host, _ := os.Hostname()
fmt.Fprintf(w, "Hello, world!\n")
fmt.Fprintf(w, "Version: 1.0.0\n")
fmt.Fprintf(w, "Hostname: %s\n", host)
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
cmd/commands/version/banner.go | package version
import (
"io"
"io/ioutil"
"os"
"runtime"
"text/template"
"time"
beeLogger "github.com/yimishiji/bee/logger"
)
// RuntimeInfo holds information about the current runtime.
type RuntimeInfo struct {
GoVersion string
GOOS string
GOARCH string
NumCPU int
GOPATH string
GOROOT string
Compiler string
BeeVersion string
BeegoVersion string
}
// InitBanner loads the banner and prints it to output
// All errors are ignored, the application will not
// print the banner in case of error.
func InitBanner(out io.Writer, in io.Reader) {
if in == nil {
beeLogger.Log.Fatal("The input is nil")
}
banner, err := ioutil.ReadAll(in)
if err != nil {
beeLogger.Log.Fatalf("Error while trying to read the banner: %s", err)
}
show(out, string(banner))
}
func show(out io.Writer, content string) {
t, err := template.New("banner").
Funcs(template.FuncMap{"Now": Now}).
Parse(content)
if err != nil {
beeLogger.Log.Fatalf("Cannot parse the banner template: %s", err)
}
err = t.Execute(out, RuntimeInfo{
GetGoVersion(),
runtime.GOOS,
runtime.GOARCH,
runtime.NumCPU(),
os.Getenv("GOPATH"),
runtime.GOROOT(),
runtime.Compiler,
version,
GetBeegoVersion(),
})
if err != nil {
beeLogger.Log.Error(err.Error())
}
}
// Now returns the current local time in the specified layout
func Now(layout string) string {
return time.Now().Format(layout)
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
tests/tests_test.go | package tests_test
import (
"log"
"math/rand"
"os"
"path/filepath"
"time"
"gorm.io/driver/mysql"
"gorm.io/driver/postgres"
"gorm.io/driver/sqlite"
"gorm.io/driver/sqlserver"
"gorm.io/gorm"
"gorm.io/gorm/logger"
. "gorm.io/gorm/utils/tests"
)
var DB *gorm.DB
func init() {
var err error
if DB, err = OpenTestConnection(); err != nil {
log.Printf("failed to connect database, got error %v", err)
os.Exit(1)
} else {
sqlDB, err := DB.DB()
if err != nil {
log.Printf("failed to connect database, got error %v", err)
os.Exit(1)
}
err = sqlDB.Ping()
if err != nil {
log.Printf("failed to ping sqlDB, got error %v", err)
os.Exit(1)
}
RunMigrations()
if DB.Dialector.Name() == "sqlite" {
DB.Exec("PRAGMA foreign_keys = ON")
}
}
}
func OpenTestConnection() (db *gorm.DB, err error) {
dbDSN := os.Getenv("GORM_DSN")
switch os.Getenv("GORM_DIALECT") {
case "mysql":
log.Println("testing mysql...")
if dbDSN == "" {
dbDSN = "gorm:gorm@tcp(localhost:9910)/gorm?charset=utf8&parseTime=True&loc=Local"
}
db, err = gorm.Open(mysql.Open(dbDSN), &gorm.Config{})
case "postgres":
log.Println("testing postgres...")
if dbDSN == "" {
dbDSN = "user=gorm password=gorm dbname=gorm host=localhost port=9920 sslmode=disable TimeZone=Asia/Shanghai"
}
db, err = gorm.Open(postgres.New(postgres.Config{
DSN: dbDSN,
PreferSimpleProtocol: true,
}), &gorm.Config{})
case "sqlserver":
// go install github.com/microsoft/go-sqlcmd/cmd/sqlcmd@latest
// SQLCMDPASSWORD=LoremIpsum86 sqlcmd -U sa -S localhost:9930
// CREATE DATABASE gorm;
// GO
// CREATE LOGIN gorm WITH PASSWORD = 'LoremIpsum86';
// CREATE USER gorm FROM LOGIN gorm;
// ALTER SERVER ROLE sysadmin ADD MEMBER [gorm];
// GO
log.Println("testing sqlserver...")
if dbDSN == "" {
dbDSN = "sqlserver://gorm:LoremIpsum86@localhost:9930?database=gorm"
}
db, err = gorm.Open(sqlserver.Open(dbDSN), &gorm.Config{})
default:
log.Println("testing sqlite3...")
db, err = gorm.Open(sqlite.Open(filepath.Join(os.TempDir(), "gorm.db")), &gorm.Config{})
}
if err != nil {
return
}
if debug := os.Getenv("DEBUG"); debug == "true" {
db.Logger = db.Logger.LogMode(logger.Info)
} else if debug == "false" {
db.Logger = db.Logger.LogMode(logger.Silent)
}
return
}
func RunMigrations() {
var err error
allModels := []interface{}{&User{}, &Account{}, &Pet{}, &Company{}, &Toy{}, &Language{}, &Coupon{}, &CouponProduct{}, &Order{}, &Parent{}, &Child{}}
rand.Seed(time.Now().UnixNano())
rand.Shuffle(len(allModels), func(i, j int) { allModels[i], allModels[j] = allModels[j], allModels[i] })
DB.Migrator().DropTable("user_friends", "user_speaks")
if err = DB.Migrator().DropTable(allModels...); err != nil {
log.Printf("Failed to drop table, got error %v\n", err)
os.Exit(1)
}
if err = DB.AutoMigrate(allModels...); err != nil {
log.Printf("Failed to auto migrate, but got error %v\n", err)
os.Exit(1)
}
for _, m := range allModels {
if !DB.Migrator().HasTable(m) {
log.Printf("Failed to create table for %#v\n", m)
os.Exit(1)
}
}
}
| [
"\"GORM_DSN\"",
"\"GORM_DIALECT\"",
"\"DEBUG\""
]
| []
| [
"GORM_DIALECT",
"GORM_DSN",
"DEBUG"
]
| [] | ["GORM_DIALECT", "GORM_DSN", "DEBUG"] | go | 3 | 0 | |
tests/examples/minlplib/smallinvSNPr5b050-055.py | # MINLP written by GAMS Convert at 04/21/18 13:54:16
#
# Equation counts
# Total E G L N X C B
# 4 0 2 2 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 101 1 0 100 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 401 301 100 0
from pyomo.environ import *
model = m = ConcreteModel()
m.i1 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i2 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i3 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i4 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i5 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i6 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i7 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i8 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i9 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i10 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i11 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i12 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i13 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i14 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i15 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i16 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i17 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i18 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i19 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i20 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i21 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i22 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i23 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i24 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i25 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i26 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i27 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i28 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i29 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i30 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i31 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i32 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i33 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i34 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i35 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i36 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i37 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i38 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i39 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i40 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i41 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i42 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i43 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i44 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i45 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i46 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i47 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i48 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i49 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i50 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i51 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i52 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i53 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i54 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i55 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i56 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i57 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i58 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i59 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i60 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i61 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i62 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i63 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i64 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i65 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i66 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i67 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i68 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i69 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i70 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i71 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i72 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i73 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i74 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i75 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i76 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i77 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i78 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i79 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i80 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i81 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i82 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i83 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i84 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i85 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i86 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i87 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i88 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i89 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i90 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i91 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i92 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i93 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i94 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i95 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i96 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i97 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i98 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i99 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i100 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.x101 = Var(within=Reals,bounds=(None,None),initialize=0)
m.obj = Objective(expr=m.x101, sense=minimize)
m.c1 = Constraint(expr=0.00841507*m.i1**2 + 0.0222536*m.i2**2 + 0.0056479*m.i3**2 + 0.00333322*m.i4**2 + 0.00490963*m.i5
**2 + 0.0221034*m.i6**2 + 0.00509899*m.i7**2 + 0.049464*m.i8**2 + 0.0171508*m.i9**2 + 0.0064643*
m.i10**2 + 0.0218437*m.i11**2 + 0.00346366*m.i12**2 + 0.0458502*m.i13**2 + 0.0747061*m.i14**2 +
0.0196511*m.i15**2 + 0.014222*m.i16**2 + 0.0147535*m.i17**2 + 0.00398615*m.i18**2 + 0.00644484*
m.i19**2 + 0.0322232*m.i20**2 + 0.00887889*m.i21**2 + 0.0434025*m.i22**2 + 0.00981376*m.i23**2 +
0.0133193*m.i24**2 + 0.00471036*m.i25**2 + 0.00359843*m.i26**2 + 0.0112312*m.i27**2 + 0.00476479*
m.i28**2 + 0.00356255*m.i29**2 + 0.0730121*m.i30**2 + 0.00785721*m.i31**2 + 0.0243787*m.i32**2 +
0.0171188*m.i33**2 + 0.00439547*m.i34**2 + 0.00502594*m.i35**2 + 0.0580619*m.i36**2 + 0.0135984*
m.i37**2 + 0.00254137*m.i38**2 + 0.0153341*m.i39**2 + 0.109758*m.i40**2 + 0.0346065*m.i41**2 +
0.0127589*m.i42**2 + 0.011147*m.i43**2 + 0.0156318*m.i44**2 + 0.00556588*m.i45**2 + 0.00302864*
m.i46**2 + 0.0214898*m.i47**2 + 0.00499587*m.i48**2 + 0.00864393*m.i49**2 + 0.0228248*m.i50**2 +
0.0077726*m.i51**2 + 0.00992767*m.i52**2 + 0.0184506*m.i53**2 + 0.0113481*m.i54**2 + 0.0067583*
m.i55**2 + 0.0150416*m.i56**2 + 0.00324193*m.i57**2 + 0.00478196*m.i58**2 + 0.0132471*m.i59**2 +
0.00273446*m.i60**2 + 0.0282459*m.i61**2 + 0.0230221*m.i62**2 + 0.0240972*m.i63**2 + 0.00829946*
m.i64**2 + 0.00688665*m.i65**2 + 0.00858803*m.i66**2 + 0.00778038*m.i67**2 + 0.0082583*m.i68**2
+ 0.022885*m.i69**2 + 0.00568332*m.i70**2 + 0.0234021*m.i71**2 + 0.00924249*m.i72**2 +
0.00669675*m.i73**2 + 0.0109501*m.i74**2 + 0.00663385*m.i75**2 + 0.00328058*m.i76**2 + 0.0112814*
m.i77**2 + 0.00341076*m.i78**2 + 0.0400653*m.i79**2 + 0.00876827*m.i80**2 + 0.0138276*m.i81**2 +
0.00246987*m.i82**2 + 0.0406516*m.i83**2 + 0.00947194*m.i84**2 + 0.00647449*m.i85**2 + 0.0107715*
m.i86**2 + 0.00803069*m.i87**2 + 0.106502*m.i88**2 + 0.00815263*m.i89**2 + 0.0171707*m.i90**2 +
0.0163522*m.i91**2 + 0.00911726*m.i92**2 + 0.00287317*m.i93**2 + 0.00360309*m.i94**2 + 0.00699161
*m.i95**2 + 0.0340959*m.i96**2 + 0.00958446*m.i97**2 + 0.0147951*m.i98**2 + 0.0177595*m.i99**2 +
0.0208523*m.i100**2 + 0.00692522*m.i1*m.i2 + 0.00066464*m.i1*m.i3 + 0.00388744*m.i1*m.i4 +
0.001108218*m.i1*m.i5 + 0.0046712*m.i1*m.i6 + 0.00771824*m.i1*m.i7 + 0.0020653*m.i1*m.i8 +
0.001524626*m.i1*m.i9 + 0.00484724*m.i1*m.i10 + 0.00733242*m.i1*m.i11 + 0.00556218*m.i1*m.i12 +
0.0052571*m.i1*m.i13 + 0.0218926*m.i1*m.i14 + 0.01352862*m.i1*m.i15 + 0.00549784*m.i1*m.i16 +
0.00235342*m.i1*m.i17 + 0.00448206*m.i1*m.i18 + 0.0072148*m.i1*m.i19 + 0.00958894*m.i1*m.i20 +
0.00376328*m.i1*m.i21 + 0.0117501*m.i1*m.i22 + 0.00575998*m.i1*m.i23 - 0.000109147*m.i1*m.i24 +
0.000604944*m.i1*m.i25 + 0.00473296*m.i1*m.i26 + 0.000356572*m.i1*m.i27 - 0.001552262*m.i1*m.i28
+ 0.00119092*m.i1*m.i29 + 0.01373684*m.i1*m.i30 + 0.0059113*m.i1*m.i31 + 0.00623524*m.i1*m.i32
+ 0.00801204*m.i1*m.i33 + 0.00108736*m.i1*m.i34 + 0.001491474*m.i1*m.i35 + 0.01080356*m.i1*m.i36
+ 0.00559202*m.i1*m.i37 + 7.8057e-6*m.i1*m.i38 + 0.00831004*m.i1*m.i39 + 0.001096208*m.i1*m.i40
+ 0.001136658*m.i1*m.i41 + 0.0073715*m.i1*m.i42 + 0.000726938*m.i1*m.i43 + 0.00621872*m.i1*m.i44
+ 0.00646596*m.i1*m.i45 + 0.00441466*m.i1*m.i46 + 0.001262528*m.i1*m.i47 + 0.00567366*m.i1*m.i48
+ 0.00690472*m.i1*m.i49 + 0.01140754*m.i1*m.i50 + 0.00275514*m.i1*m.i51 + 0.00633434*m.i1*m.i52
+ 0.00842252*m.i1*m.i53 + 0.00674544*m.i1*m.i54 + 0.00577156*m.i1*m.i55 + 0.000723972*m.i1*m.i56
+ 0.00617654*m.i1*m.i57 + 0.00426758*m.i1*m.i58 + 0.00581362*m.i1*m.i59 + 0.00305964*m.i1*m.i60
+ 0.00915838*m.i1*m.i61 + 0.00408204*m.i1*m.i62 + 0.00526036*m.i1*m.i63 + 0.00641708*m.i1*m.i64
+ 0.001311362*m.i1*m.i65 + 0.00589896*m.i1*m.i66 + 0.001450664*m.i1*m.i67 + 0.0054669*m.i1*m.i68
+ 0.00759698*m.i1*m.i69 + 0.0069591*m.i1*m.i70 + 0.0023689*m.i1*m.i71 + 0.0026146*m.i1*m.i72 +
0.00520422*m.i1*m.i73 + 0.00959956*m.i1*m.i74 + 0.00799166*m.i1*m.i75 + 0.00256248*m.i1*m.i76 +
0.01210352*m.i1*m.i77 + 0.00469514*m.i1*m.i78 + 0.00329676*m.i1*m.i79 + 0.0068214*m.i1*m.i80 +
0.00190637*m.i1*m.i81 + 0.00256972*m.i1*m.i82 - 0.00577696*m.i1*m.i83 + 0.00245394*m.i1*m.i84 +
0.00585966*m.i1*m.i85 + 0.00330078*m.i1*m.i86 + 0.00362852*m.i1*m.i87 + 0.0064137*m.i1*m.i88 +
0.00375038*m.i1*m.i89 + 0.00666048*m.i1*m.i90 + 0.00942176*m.i1*m.i91 + 0.00379828*m.i1*m.i92 +
0.00246526*m.i1*m.i93 + 0.0029997*m.i1*m.i94 + 0.00592606*m.i1*m.i95 + 0.0136565*m.i1*m.i96 +
0.00562112*m.i1*m.i97 + 0.0031101*m.i1*m.i98 + 0.00328418*m.i1*m.i99 + 0.00992138*m.i1*m.i100 +
0.01159836*m.i2*m.i3 + 0.00432612*m.i2*m.i4 + 0.01055774*m.i2*m.i5 + 0.0235592*m.i2*m.i6 +
0.0053913*m.i2*m.i7 + 0.01748966*m.i2*m.i8 + 0.01322526*m.i2*m.i9 + 0.01103896*m.i2*m.i10 +
0.001420928*m.i2*m.i11 + 0.00303766*m.i2*m.i12 + 0.0325414*m.i2*m.i13 + 0.0528886*m.i2*m.i14 +
0.0344486*m.i2*m.i15 + 0.01889664*m.i2*m.i16 + 0.01085498*m.i2*m.i17 + 0.01133696*m.i2*m.i18 +
0.0105108*m.i2*m.i19 + 0.041965*m.i2*m.i20 + 0.01908526*m.i2*m.i21 + 0.0438608*m.i2*m.i22 +
0.01760436*m.i2*m.i23 + 0.0177692*m.i2*m.i24 + 0.01401386*m.i2*m.i25 + 0.01130076*m.i2*m.i26 +
0.0201926*m.i2*m.i27 + 0.00893526*m.i2*m.i28 + 0.01013464*m.i2*m.i29 + 0.0522552*m.i2*m.i30 +
0.00674062*m.i2*m.i31 + 0.0386894*m.i2*m.i32 + 0.01840562*m.i2*m.i33 + 0.0079061*m.i2*m.i34 +
0.01050574*m.i2*m.i35 + 0.038882*m.i2*m.i36 + 0.0209782*m.i2*m.i37 + 0.00569346*m.i2*m.i38 +
0.0259324*m.i2*m.i39 + 0.0472088*m.i2*m.i40 + 0.0282636*m.i2*m.i41 + 0.0225892*m.i2*m.i42 +
0.01104052*m.i2*m.i43 + 0.0218496*m.i2*m.i44 + 0.00682534*m.i2*m.i45 + 0.01022898*m.i2*m.i46 +
0.0273094*m.i2*m.i47 + 0.01045064*m.i2*m.i48 + 0.01767338*m.i2*m.i49 + 0.0311902*m.i2*m.i50 +
0.0126455*m.i2*m.i51 + 0.0206168*m.i2*m.i52 + 0.0261894*m.i2*m.i53 + 0.024527*m.i2*m.i54 +
0.01734138*m.i2*m.i55 + 0.01224052*m.i2*m.i56 + 0.01152072*m.i2*m.i57 + 0.01028864*m.i2*m.i58 +
0.01883544*m.i2*m.i59 + 0.00908648*m.i2*m.i60 + 0.0449708*m.i2*m.i61 + 0.0363664*m.i2*m.i62 +
0.01577062*m.i2*m.i63 + 0.01266282*m.i2*m.i64 + 0.01385216*m.i2*m.i65 + 0.00440902*m.i2*m.i66 +
0.01711764*m.i2*m.i67 + 0.0110787*m.i2*m.i68 + 0.0341778*m.i2*m.i69 + 0.0156542*m.i2*m.i70 +
0.01891112*m.i2*m.i71 + 0.0216326*m.i2*m.i72 + 0.01534328*m.i2*m.i73 + 0.01661334*m.i2*m.i74 +
0.01534594*m.i2*m.i75 + 0.01116732*m.i2*m.i76 + 0.01402982*m.i2*m.i77 + 0.00963242*m.i2*m.i78 +
0.0200668*m.i2*m.i79 + 0.01379116*m.i2*m.i80 + 0.01910046*m.i2*m.i81 + 0.0077605*m.i2*m.i82 -
0.000954558*m.i2*m.i83 + 0.01255918*m.i2*m.i84 + 0.0126639*m.i2*m.i85 + 0.0201936*m.i2*m.i86 +
0.017931*m.i2*m.i87 + 0.0389418*m.i2*m.i88 + 0.00845916*m.i2*m.i89 + 0.0267914*m.i2*m.i90 +
0.0193905*m.i2*m.i91 + 0.01261014*m.i2*m.i92 + 0.0069012*m.i2*m.i93 + 0.00876014*m.i2*m.i94 +
0.01829908*m.i2*m.i95 + 0.0373396*m.i2*m.i96 + 0.0211262*m.i2*m.i97 + 0.01549032*m.i2*m.i98 +
0.0247114*m.i2*m.i99 + 0.0324248*m.i2*m.i100 - 0.000720538*m.i3*m.i4 + 0.00453322*m.i3*m.i5 +
0.00638226*m.i3*m.i6 + 0.000938158*m.i3*m.i7 + 0.0035154*m.i3*m.i8 + 0.00681962*m.i3*m.i9 +
0.006345*m.i3*m.i10 + 0.00232904*m.i3*m.i11 - 0.00054599*m.i3*m.i12 + 0.01850556*m.i3*m.i13 +
0.01892336*m.i3*m.i14 + 0.00820906*m.i3*m.i15 + 0.00848796*m.i3*m.i16 + 0.0100743*m.i3*m.i17 +
0.00327798*m.i3*m.i18 + 0.000498452*m.i3*m.i19 + 0.01775572*m.i3*m.i20 + 0.00919688*m.i3*m.i21 +
0.01282772*m.i3*m.i22 + 0.00853066*m.i3*m.i23 + 0.00506148*m.i3*m.i24 + 0.004557*m.i3*m.i25 +
0.001737768*m.i3*m.i26 + 0.00560326*m.i3*m.i27 + 0.00374962*m.i3*m.i28 + 0.000427408*m.i3*m.i29
+ 0.01831098*m.i3*m.i30 + 0.00791496*m.i3*m.i31 + 0.01306*m.i3*m.i32 + 0.0143109*m.i3*m.i33 +
0.00324578*m.i3*m.i34 + 0.00289704*m.i3*m.i35 + 0.01899172*m.i3*m.i36 + 0.00855898*m.i3*m.i37 +
0.000764782*m.i3*m.i38 + 0.01045622*m.i3*m.i39 + 0.0241684*m.i3*m.i40 + 0.01022702*m.i3*m.i41 +
0.0096569*m.i3*m.i42 + 0.00605256*m.i3*m.i43 + 0.0087656*m.i3*m.i44 + 0.00231868*m.i3*m.i45 +
0.003075*m.i3*m.i46 + 0.00904418*m.i3*m.i47 + 0.00346386*m.i3*m.i48 + 0.00970054*m.i3*m.i49 +
0.0107517*m.i3*m.i50 + 0.00833706*m.i3*m.i51 + 0.00601022*m.i3*m.i52 + 0.00885472*m.i3*m.i53 +
0.0087269*m.i3*m.i54 + 0.00799796*m.i3*m.i55 + 0.0077742*m.i3*m.i56 + 0.00233028*m.i3*m.i57 +
0.00392772*m.i3*m.i58 + 0.00960436*m.i3*m.i59 + 0.000506858*m.i3*m.i60 + 0.01485036*m.i3*m.i61 +
0.01172454*m.i3*m.i62 + 0.00763564*m.i3*m.i63 + 0.00510368*m.i3*m.i64 + 0.00739458*m.i3*m.i65 +
0.00321864*m.i3*m.i66 + 0.00506992*m.i3*m.i67 + 0.001582392*m.i3*m.i68 + 0.0133327*m.i3*m.i69 +
0.00346984*m.i3*m.i70 + 0.00591914*m.i3*m.i71 + 0.0050918*m.i3*m.i72 + 0.00762942*m.i3*m.i73 +
0.0072567*m.i3*m.i74 + 0.0028432*m.i3*m.i75 + 0.00258746*m.i3*m.i76 + 0.00665946*m.i3*m.i77 +
0.001559716*m.i3*m.i78 + 0.0114221*m.i3*m.i79 + 0.00359546*m.i3*m.i80 + 0.00675946*m.i3*m.i81 +
0.001328782*m.i3*m.i82 + 0.00450512*m.i3*m.i83 + 0.00859628*m.i3*m.i84 + 0.00541618*m.i3*m.i85 +
0.01126372*m.i3*m.i86 + 0.00604642*m.i3*m.i87 + 0.01802074*m.i3*m.i88 + 0.0056414*m.i3*m.i89 +
0.00952436*m.i3*m.i90 + 0.00568388*m.i3*m.i91 + 0.0086732*m.i3*m.i92 + 0.001482822*m.i3*m.i93 +
0.0026677*m.i3*m.i94 + 0.00675394*m.i3*m.i95 + 0.01169216*m.i3*m.i96 + 0.0076724*m.i3*m.i97 +
0.00761804*m.i3*m.i98 + 0.01192344*m.i3*m.i99 + 0.01326866*m.i3*m.i100 + 0.00169903*m.i4*m.i5 +
0.00300136*m.i4*m.i6 + 0.00385392*m.i4*m.i7 + 0.00382362*m.i4*m.i8 + 0.00575034*m.i4*m.i9 +
0.00125203*m.i4*m.i10 + 0.000828078*m.i4*m.i11 + 0.00404896*m.i4*m.i12 - 0.001180878*m.i4*m.i13
+ 0.00956206*m.i4*m.i14 + 0.00571904*m.i4*m.i15 + 0.0047927*m.i4*m.i16 + 0.001736122*m.i4*m.i17
+ 0.001900434*m.i4*m.i18 + 0.00498296*m.i4*m.i19 + 0.0055112*m.i4*m.i20 + 0.00199047*m.i4*m.i21
+ 0.00302926*m.i4*m.i22 + 0.001107052*m.i4*m.i23 + 0.0032099*m.i4*m.i24 + 0.00202704*m.i4*m.i25
+ 0.0049441*m.i4*m.i26 + 0.00296714*m.i4*m.i27 + 0.001430786*m.i4*m.i28 + 0.00335542*m.i4*m.i29
+ 0.0072271*m.i4*m.i30 + 0.001983328*m.i4*m.i31 + 0.00263338*m.i4*m.i32 + 0.0034098*m.i4*m.i33
+ 0.001978102*m.i4*m.i34 + 0.00248436*m.i4*m.i35 + 0.001037234*m.i4*m.i36 + 0.001931824*m.i4*
m.i37 + 0.00154955*m.i4*m.i38 + 0.00293776*m.i4*m.i39 - 0.01282698*m.i4*m.i40 + 0.001937926*m.i4*
m.i41 + 0.0052959*m.i4*m.i42 + 0.001856036*m.i4*m.i43 + 0.000740384*m.i4*m.i44 + 0.00372246*m.i4*
m.i45 + 0.00362974*m.i4*m.i46 + 0.001687258*m.i4*m.i47 + 0.00297792*m.i4*m.i48 + 0.0024381*m.i4*
m.i49 + 0.00581304*m.i4*m.i50 + 0.000775592*m.i4*m.i51 + 0.00512872*m.i4*m.i52 + 0.00302932*m.i4*
m.i53 + 0.00451004*m.i4*m.i54 + 0.00355054*m.i4*m.i55 + 0.000365898*m.i4*m.i56 + 0.00396452*m.i4*
m.i57 + 0.00218522*m.i4*m.i58 + 0.001602712*m.i4*m.i59 + 0.00378946*m.i4*m.i60 + 0.00528342*m.i4*
m.i61 + 0.00345546*m.i4*m.i62 + 0.0072364*m.i4*m.i63 + 0.00460504*m.i4*m.i64 + 0.00362066*m.i4*
m.i65 + 0.00176825*m.i4*m.i66 + 0.00326082*m.i4*m.i67 + 0.00494324*m.i4*m.i68 + 0.00478058*m.i4*
m.i69 + 0.0047424*m.i4*m.i70 + 0.00406804*m.i4*m.i71 + 0.00356438*m.i4*m.i72 + 0.0039191*m.i4*
m.i73 + 0.00506266*m.i4*m.i74 + 0.005213*m.i4*m.i75 + 0.00334114*m.i4*m.i76 + 0.00410168*m.i4*
m.i77 + 0.00325268*m.i4*m.i78 + 0.000621396*m.i4*m.i79 + 0.00679868*m.i4*m.i80 + 0.001665408*m.i4
*m.i81 + 0.00231708*m.i4*m.i82 - 0.0025243*m.i4*m.i83 + 0.00277762*m.i4*m.i84 + 0.0040202*m.i4*
m.i85 + 0.001500566*m.i4*m.i86 + 0.001680814*m.i4*m.i87 + 0.00640404*m.i4*m.i88 + 0.00397656*m.i4
*m.i89 + 0.000508164*m.i4*m.i90 + 0.00565534*m.i4*m.i91 + 0.0031999*m.i4*m.i92 + 0.0007233*m.i4*
m.i93 + 0.001347788*m.i4*m.i94 + 0.00386662*m.i4*m.i95 + 0.0056032*m.i4*m.i96 + 0.00392786*m.i4*
m.i97 + 0.0032706*m.i4*m.i98 + 0.000716722*m.i4*m.i99 + 0.00200998*m.i4*m.i100 + 0.00725878*m.i5*
m.i6 + 0.000634496*m.i5*m.i7 + 0.0112129*m.i5*m.i8 + 0.006535*m.i5*m.i9 + 0.0076756*m.i5*m.i10 -
0.00455426*m.i5*m.i11 + 0.001111236*m.i5*m.i12 + 0.01473142*m.i5*m.i13 + 0.01556352*m.i5*m.i14 +
0.00889148*m.i5*m.i15 + 0.00833956*m.i5*m.i16 + 0.01155304*m.i5*m.i17 + 0.0044319*m.i5*m.i18 +
0.0061696*m.i5*m.i19 + 0.01660846*m.i5*m.i20 + 0.00921042*m.i5*m.i21 + 0.01240074*m.i5*m.i22 +
0.00930536*m.i5*m.i23 + 0.00636938*m.i5*m.i24 + 0.00582298*m.i5*m.i25 + 0.00314834*m.i5*m.i26 +
0.00569034*m.i5*m.i27 + 0.00513186*m.i5*m.i28 + 0.00443806*m.i5*m.i29 + 0.01398194*m.i5*m.i30 +
0.00649478*m.i5*m.i31 + 0.01579432*m.i5*m.i32 + 0.00734872*m.i5*m.i33 + 0.0056108*m.i5*m.i34 +
0.00623672*m.i5*m.i35 + 0.01544598*m.i5*m.i36 + 0.01144796*m.i5*m.i37 + 0.0024117*m.i5*m.i38 +
0.00970728*m.i5*m.i39 + 0.0182302*m.i5*m.i40 + 0.00790876*m.i5*m.i41 + 0.00731488*m.i5*m.i42 +
0.00543454*m.i5*m.i43 + 0.00647722*m.i5*m.i44 + 0.0035064*m.i5*m.i45 + 0.00307696*m.i5*m.i46 +
0.00716814*m.i5*m.i47 + 0.001828662*m.i5*m.i48 + 0.00846664*m.i5*m.i49 + 0.01292148*m.i5*m.i50 +
0.0081737*m.i5*m.i51 + 0.00647086*m.i5*m.i52 + 0.00609644*m.i5*m.i53 + 0.00842446*m.i5*m.i54 +
0.00619594*m.i5*m.i55 + 0.01114364*m.i5*m.i56 + 0.00464056*m.i5*m.i57 + 0.00294786*m.i5*m.i58 +
0.01085566*m.i5*m.i59 + 0.00324938*m.i5*m.i60 + 0.01321296*m.i5*m.i61 + 0.00956118*m.i5*m.i62 +
0.00799502*m.i5*m.i63 + 0.00255928*m.i5*m.i64 + 0.00635808*m.i5*m.i65 + 0.00425494*m.i5*m.i66 +
0.00743456*m.i5*m.i67 + 0.003997*m.i5*m.i68 + 0.01327542*m.i5*m.i69 + 0.00624764*m.i5*m.i70 +
0.00544782*m.i5*m.i71 + 0.00583882*m.i5*m.i72 + 0.00712322*m.i5*m.i73 + 0.00675538*m.i5*m.i74 +
0.00471928*m.i5*m.i75 + 0.00331686*m.i5*m.i76 + 0.0064726*m.i5*m.i77 + 0.0043073*m.i5*m.i78 +
0.01376458*m.i5*m.i79 + 0.00590054*m.i5*m.i80 + 0.00544478*m.i5*m.i81 + 0.00433406*m.i5*m.i82 +
0.0018936*m.i5*m.i83 + 0.00732892*m.i5*m.i84 + 0.00654804*m.i5*m.i85 + 0.00769986*m.i5*m.i86 +
0.00924248*m.i5*m.i87 + 0.01858866*m.i5*m.i88 + 0.00588762*m.i5*m.i89 + 0.00671372*m.i5*m.i90 +
0.00513832*m.i5*m.i91 + 0.00597632*m.i5*m.i92 + 0.0033572*m.i5*m.i93 + 0.00718978*m.i5*m.i94 +
0.00692006*m.i5*m.i95 + 0.0082357*m.i5*m.i96 + 0.00798976*m.i5*m.i97 + 0.00578018*m.i5*m.i98 +
0.00997244*m.i5*m.i99 + 0.00861536*m.i5*m.i100 + 0.00682146*m.i6*m.i7 + 0.00318158*m.i6*m.i8 +
0.01402384*m.i6*m.i9 + 0.01146794*m.i6*m.i10 + 0.00514562*m.i6*m.i11 + 0.001749894*m.i6*m.i12 +
0.0349226*m.i6*m.i13 + 0.0204032*m.i6*m.i14 + 0.0257432*m.i6*m.i15 + 0.01758104*m.i6*m.i16 +
0.01908054*m.i6*m.i17 + 0.00928378*m.i6*m.i18 + 0.00320468*m.i6*m.i19 + 0.0315536*m.i6*m.i20 +
0.01792788*m.i6*m.i21 + 0.0231518*m.i6*m.i22 + 0.01485588*m.i6*m.i23 + 0.01959078*m.i6*m.i24 +
0.01015748*m.i6*m.i25 + 0.00771848*m.i6*m.i26 + 0.0203708*m.i6*m.i27 + 0.00861336*m.i6*m.i28 +
0.00733064*m.i6*m.i29 + 0.0211284*m.i6*m.i30 + 0.01136376*m.i6*m.i31 + 0.0298052*m.i6*m.i32 +
0.01763386*m.i6*m.i33 + 0.01196962*m.i6*m.i34 + 0.00970124*m.i6*m.i35 + 0.0426536*m.i6*m.i36 +
0.0162704*m.i6*m.i37 + 0.00511032*m.i6*m.i38 + 0.0211034*m.i6*m.i39 + 0.0536216*m.i6*m.i40 +
0.0314338*m.i6*m.i41 + 0.0212846*m.i6*m.i42 + 0.01544516*m.i6*m.i43 + 0.0203852*m.i6*m.i44 +
0.00711214*m.i6*m.i45 + 0.01012528*m.i6*m.i46 + 0.0378006*m.i6*m.i47 + 0.00769828*m.i6*m.i48 +
0.01043538*m.i6*m.i49 + 0.0235092*m.i6*m.i50 + 0.00574084*m.i6*m.i51 + 0.01540822*m.i6*m.i52 +
0.01066192*m.i6*m.i53 + 0.01947344*m.i6*m.i54 + 0.01212224*m.i6*m.i55 + 0.01841288*m.i6*m.i56 +
0.00863178*m.i6*m.i57 + 0.0123986*m.i6*m.i58 + 0.01033934*m.i6*m.i59 + 0.00473636*m.i6*m.i60 +
0.0271978*m.i6*m.i61 + 0.0244978*m.i6*m.i62 + 0.0206042*m.i6*m.i63 + 0.0123061*m.i6*m.i64 +
0.00969592*m.i6*m.i65 + 0.0105285*m.i6*m.i66 + 0.01296694*m.i6*m.i67 + 0.00467684*m.i6*m.i68 +
0.0206522*m.i6*m.i69 + 0.01181216*m.i6*m.i70 + 0.034569*m.i6*m.i71 + 0.01713412*m.i6*m.i72 +
0.00997084*m.i6*m.i73 + 0.00934556*m.i6*m.i74 + 0.00446476*m.i6*m.i75 + 0.00591468*m.i6*m.i76 +
0.00902732*m.i6*m.i77 + 0.00684842*m.i6*m.i78 + 0.000346556*m.i6*m.i79 + 0.01344964*m.i6*m.i80 +
0.028585*m.i6*m.i81 + 0.00365848*m.i6*m.i82 + 0.0233826*m.i6*m.i83 + 0.01097966*m.i6*m.i84 +
0.01159854*m.i6*m.i85 + 0.0132315*m.i6*m.i86 + 0.00973116*m.i6*m.i87 + 0.01749474*m.i6*m.i88 +
0.00153948*m.i6*m.i89 + 0.01386412*m.i6*m.i90 + 0.01199914*m.i6*m.i91 + 0.0141917*m.i6*m.i92 +
0.001321806*m.i6*m.i93 + 0.00438272*m.i6*m.i94 + 0.01131596*m.i6*m.i95 + 0.01535776*m.i6*m.i96 +
0.01709068*m.i6*m.i97 + 0.024088*m.i6*m.i98 + 0.0176488*m.i6*m.i99 + 0.0244376*m.i6*m.i100 +
0.00488516*m.i7*m.i8 + 0.00626372*m.i7*m.i9 + 0.001990118*m.i7*m.i10 + 0.00360408*m.i7*m.i11 +
0.0044488*m.i7*m.i12 + 0.00345036*m.i7*m.i13 + 0.01022598*m.i7*m.i14 + 0.00914736*m.i7*m.i15 +
0.00744612*m.i7*m.i16 + 0.0041386*m.i7*m.i17 + 0.00439536*m.i7*m.i18 + 0.00478826*m.i7*m.i19 +
0.00946126*m.i7*m.i20 + 0.00383118*m.i7*m.i21 + 0.00577738*m.i7*m.i22 + 0.0023517*m.i7*m.i23 +
0.0050588*m.i7*m.i24 + 0.0021953*m.i7*m.i25 + 0.00304582*m.i7*m.i26 + 0.0025687*m.i7*m.i27 +
0.001019412*m.i7*m.i28 + 0.001803492*m.i7*m.i29 + 0.00840076*m.i7*m.i30 + 0.00405006*m.i7*m.i31
+ 0.00330894*m.i7*m.i32 + 0.00379124*m.i7*m.i33 + 0.00297878*m.i7*m.i34 + 0.00257924*m.i7*m.i35
+ 0.00710268*m.i7*m.i36 + 0.00290856*m.i7*m.i37 + 0.00084645*m.i7*m.i38 + 0.00616224*m.i7*m.i39
+ 0.00012188*m.i7*m.i40 + 0.00931498*m.i7*m.i41 + 0.00783*m.i7*m.i42 + 0.00769852*m.i7*m.i43 +
0.00783756*m.i7*m.i44 + 0.0049081*m.i7*m.i45 + 0.00379762*m.i7*m.i46 + 0.00691856*m.i7*m.i47 +
0.00516014*m.i7*m.i48 + 0.00525658*m.i7*m.i49 + 0.00529626*m.i7*m.i50 + 0.00103022*m.i7*m.i51 +
0.00545452*m.i7*m.i52 + 0.00609146*m.i7*m.i53 + 0.0066465*m.i7*m.i54 + 0.0057959*m.i7*m.i55 +
0.00384568*m.i7*m.i56 + 0.00518642*m.i7*m.i57 + 0.0049888*m.i7*m.i58 + 0.00240984*m.i7*m.i59 +
0.001870666*m.i7*m.i60 + 0.00856542*m.i7*m.i61 + 0.00433228*m.i7*m.i62 + 0.00926318*m.i7*m.i63 +
0.00802564*m.i7*m.i64 + 0.002679*m.i7*m.i65 + 0.00656044*m.i7*m.i66 + 0.00189873*m.i7*m.i67 +
0.00559974*m.i7*m.i68 + 0.0059088*m.i7*m.i69 + 0.00502274*m.i7*m.i70 + 0.00714092*m.i7*m.i71 +
0.00451814*m.i7*m.i72 + 0.0055096*m.i7*m.i73 + 0.0054579*m.i7*m.i74 + 0.00428152*m.i7*m.i75 +
0.00201372*m.i7*m.i76 + 0.00763776*m.i7*m.i77 + 0.001767634*m.i7*m.i78 - 0.00404984*m.i7*m.i79 +
0.00693072*m.i7*m.i80 + 0.00453578*m.i7*m.i81 + 0.001431356*m.i7*m.i82 + 0.001000832*m.i7*m.i83
+ 0.00363592*m.i7*m.i84 + 0.00399748*m.i7*m.i85 + 0.00244412*m.i7*m.i86 - 0.00038172*m.i7*m.i87
+ 0.00670104*m.i7*m.i88 + 0.00351634*m.i7*m.i89 + 0.000192176*m.i7*m.i90 + 0.00766242*m.i7*m.i91
+ 0.00431432*m.i7*m.i92 + 0.00099522*m.i7*m.i93 + 0.00215394*m.i7*m.i94 + 0.00467712*m.i7*m.i95
+ 0.00551306*m.i7*m.i96 + 0.00524514*m.i7*m.i97 + 0.00715168*m.i7*m.i98 + 0.00269474*m.i7*m.i99
+ 0.006577*m.i7*m.i100 + 0.01497394*m.i8*m.i9 + 0.0108969*m.i8*m.i10 + 0.00659842*m.i8*m.i11 +
0.00635336*m.i8*m.i12 + 0.0313098*m.i8*m.i13 + 0.0387588*m.i8*m.i14 + 0.01963812*m.i8*m.i15 +
0.00587206*m.i8*m.i16 + 0.0158028*m.i8*m.i17 + 0.00433344*m.i8*m.i18 + 0.01027216*m.i8*m.i19 +
0.0310764*m.i8*m.i20 + 0.01480666*m.i8*m.i21 + 0.0292324*m.i8*m.i22 + 0.01097454*m.i8*m.i23 +
0.01637932*m.i8*m.i24 + 0.0081932*m.i8*m.i25 + 0.00625414*m.i8*m.i26 + 0.01206926*m.i8*m.i27 +
0.00960586*m.i8*m.i28 + 0.00767454*m.i8*m.i29 + 0.0389634*m.i8*m.i30 + 0.01047056*m.i8*m.i31 +
0.0243166*m.i8*m.i32 + 0.01490526*m.i8*m.i33 + 0.0048023*m.i8*m.i34 + 0.00582726*m.i8*m.i35 +
0.0310084*m.i8*m.i36 + 0.01520046*m.i8*m.i37 + 0.00435652*m.i8*m.i38 + 0.01820518*m.i8*m.i39 +
0.028962*m.i8*m.i40 + 0.0236162*m.i8*m.i41 + 0.0089807*m.i8*m.i42 + 0.01679084*m.i8*m.i43 +
0.01575264*m.i8*m.i44 - 0.00596962*m.i8*m.i45 + 0.0045504*m.i8*m.i46 + 0.0135935*m.i8*m.i47 +
0.00528224*m.i8*m.i48 + 0.01215584*m.i8*m.i49 + 0.01116408*m.i8*m.i50 + 0.00976906*m.i8*m.i51 +
0.01011206*m.i8*m.i52 + 0.0224104*m.i8*m.i53 + 0.01007602*m.i8*m.i54 + 0.01583128*m.i8*m.i55 +
0.00761084*m.i8*m.i56 + 0.00804396*m.i8*m.i57 + 0.01038608*m.i8*m.i58 + 0.01602498*m.i8*m.i59 +
0.00380248*m.i8*m.i60 + 0.0227414*m.i8*m.i61 + 0.0208778*m.i8*m.i62 + 0.01278874*m.i8*m.i63 +
0.00882622*m.i8*m.i64 + 0.01253422*m.i8*m.i65 + 0.00938202*m.i8*m.i66 + 0.0132364*m.i8*m.i67 +
0.00341364*m.i8*m.i68 + 0.0217686*m.i8*m.i69 + 0.01082106*m.i8*m.i70 + 0.0109575*m.i8*m.i71 +
0.01032418*m.i8*m.i72 + 0.01203924*m.i8*m.i73 + 0.01820078*m.i8*m.i74 + 0.00454846*m.i8*m.i75 +
0.00699592*m.i8*m.i76 + 0.017175*m.i8*m.i77 + 0.00418326*m.i8*m.i78 + 0.003044*m.i8*m.i79 +
0.00913958*m.i8*m.i80 + 0.01058642*m.i8*m.i81 + 0.00609436*m.i8*m.i82 + 0.00939194*m.i8*m.i83 +
0.01860882*m.i8*m.i84 + 0.00544766*m.i8*m.i85 + 0.00672898*m.i8*m.i86 + 0.00847128*m.i8*m.i87 +
0.0399532*m.i8*m.i88 + 0.00230258*m.i8*m.i89 + 0.00647968*m.i8*m.i90 + 0.00663734*m.i8*m.i91 +
0.00723392*m.i8*m.i92 + 0.0028363*m.i8*m.i93 + 0.01094692*m.i8*m.i94 + 0.01122622*m.i8*m.i95 +
0.01922686*m.i8*m.i96 + 0.0178042*m.i8*m.i97 + 0.00987488*m.i8*m.i98 + 0.0201768*m.i8*m.i99 +
0.00916962*m.i8*m.i100 + 0.00380196*m.i9*m.i10 + 0.000241806*m.i9*m.i11 + 0.00422182*m.i9*m.i12
+ 0.01745366*m.i9*m.i13 + 0.01560378*m.i9*m.i14 + 0.01797116*m.i9*m.i15 + 0.0104377*m.i9*m.i16
+ 0.01789532*m.i9*m.i17 + 0.0058031*m.i9*m.i18 + 0.00524852*m.i9*m.i19 + 0.0217664*m.i9*m.i20 +
0.0137801*m.i9*m.i21 + 0.00556924*m.i9*m.i22 + 0.00707894*m.i9*m.i23 + 0.00383446*m.i9*m.i24 +
0.00797136*m.i9*m.i25 + 0.00671112*m.i9*m.i26 + 0.00962638*m.i9*m.i27 + 0.00548282*m.i9*m.i28 +
0.00537842*m.i9*m.i29 + 0.01125578*m.i9*m.i30 + 0.01033708*m.i9*m.i31 + 0.01741482*m.i9*m.i32 +
0.01282666*m.i9*m.i33 + 0.00490948*m.i9*m.i34 + 0.00344028*m.i9*m.i35 + 0.01643714*m.i9*m.i36 +
0.00871578*m.i9*m.i37 + 0.002884*m.i9*m.i38 + 0.01596496*m.i9*m.i39 + 0.0171071*m.i9*m.i40 +
0.0282184*m.i9*m.i41 + 0.0157083*m.i9*m.i42 + 0.01908622*m.i9*m.i43 + 0.01887462*m.i9*m.i44 +
0.00621506*m.i9*m.i45 + 0.00706654*m.i9*m.i46 + 0.01685764*m.i9*m.i47 + 0.0046064*m.i9*m.i48 +
0.01393082*m.i9*m.i49 + 0.01366172*m.i9*m.i50 + 0.00974224*m.i9*m.i51 + 0.01117786*m.i9*m.i52 +
0.0105042*m.i9*m.i53 + 0.01603942*m.i9*m.i54 + 0.01154502*m.i9*m.i55 + 0.0187017*m.i9*m.i56 +
0.0063051*m.i9*m.i57 + 0.01180982*m.i9*m.i58 + 0.01148738*m.i9*m.i59 + 0.0045111*m.i9*m.i60 +
0.01782442*m.i9*m.i61 + 0.01261594*m.i9*m.i62 + 0.0275116*m.i9*m.i63 + 0.01370986*m.i9*m.i64 +
0.01301448*m.i9*m.i65 + 0.00909146*m.i9*m.i66 + 0.00880956*m.i9*m.i67 + 0.00542126*m.i9*m.i68 +
0.0173699*m.i9*m.i69 + 0.0063573*m.i9*m.i70 + 0.01464082*m.i9*m.i71 + 0.01030184*m.i9*m.i72 +
0.01342364*m.i9*m.i73 + 0.01050302*m.i9*m.i74 + 0.00580926*m.i9*m.i75 + 0.00669824*m.i9*m.i76 +
0.0154461*m.i9*m.i77 + 0.00331996*m.i9*m.i78 - 0.00117976*m.i9*m.i79 + 0.0134427*m.i9*m.i80 +
0.01200946*m.i9*m.i81 + 0.00261992*m.i9*m.i82 + 0.01802554*m.i9*m.i83 + 0.01281546*m.i9*m.i84 +
0.00817562*m.i9*m.i85 + 0.01353278*m.i9*m.i86 + 0.0065419*m.i9*m.i87 + 0.0287756*m.i9*m.i88 +
0.00438656*m.i9*m.i89 + 0.006514*m.i9*m.i90 + 0.00948704*m.i9*m.i91 + 0.01460712*m.i9*m.i92 +
0.00442406*m.i9*m.i93 + 0.00525338*m.i9*m.i94 + 0.01080594*m.i9*m.i95 + 0.007284*m.i9*m.i96 +
0.01145784*m.i9*m.i97 + 0.01167366*m.i9*m.i98 + 0.01306896*m.i9*m.i99 + 0.01230056*m.i9*m.i100 +
0.00390108*m.i10*m.i11 + 0.00306506*m.i10*m.i12 + 0.0266658*m.i10*m.i13 + 0.027667*m.i10*m.i14 +
0.01278752*m.i10*m.i15 + 0.01031474*m.i10*m.i16 + 0.01126594*m.i10*m.i17 + 0.00489102*m.i10*m.i18
+ 0.00513038*m.i10*m.i19 + 0.01899656*m.i10*m.i20 + 0.01116072*m.i10*m.i21 + 0.0218888*m.i10*
m.i22 + 0.01101148*m.i10*m.i23 + 0.00938786*m.i10*m.i24 + 0.00495956*m.i10*m.i25 + 0.00409492*
m.i10*m.i26 + 0.00774196*m.i10*m.i27 + 0.00563678*m.i10*m.i28 + 0.00452506*m.i10*m.i29 +
0.0234496*m.i10*m.i30 + 0.00879878*m.i10*m.i31 + 0.01816086*m.i10*m.i32 + 0.01204676*m.i10*m.i33
+ 0.00474448*m.i10*m.i34 + 0.00478426*m.i10*m.i35 + 0.0297012*m.i10*m.i36 + 0.0151832*m.i10*
m.i37 + 0.00256504*m.i10*m.i38 + 0.01482468*m.i10*m.i39 + 0.0351312*m.i10*m.i40 + 0.00722204*
m.i10*m.i41 + 0.00911442*m.i10*m.i42 + 0.00459148*m.i10*m.i43 + 0.00643892*m.i10*m.i44 +
0.00232242*m.i10*m.i45 + 0.00525016*m.i10*m.i46 + 0.00918898*m.i10*m.i47 + 0.00604914*m.i10*m.i48
+ 0.00855226*m.i10*m.i49 + 0.01758968*m.i10*m.i50 + 0.00905476*m.i10*m.i51 + 0.0076611*m.i10*
m.i52 + 0.01159398*m.i10*m.i53 + 0.00933998*m.i10*m.i54 + 0.00932956*m.i10*m.i55 + 0.0077777*
m.i10*m.i56 + 0.00585234*m.i10*m.i57 + 0.00494612*m.i10*m.i58 + 0.01267098*m.i10*m.i59 +
0.0025072*m.i10*m.i60 + 0.01652258*m.i10*m.i61 + 0.0113132*m.i10*m.i62 + 0.00647572*m.i10*m.i63
+ 0.00509638*m.i10*m.i64 + 0.00796924*m.i10*m.i65 + 0.00671784*m.i10*m.i66 + 0.00876736*m.i10*
m.i67 + 0.00330284*m.i10*m.i68 + 0.0143256*m.i10*m.i69 + 0.00658518*m.i10*m.i70 + 0.00751304*
m.i10*m.i71 + 0.00447272*m.i10*m.i72 + 0.00707326*m.i10*m.i73 + 0.01022514*m.i10*m.i74 +
0.00629098*m.i10*m.i75 + 0.00437386*m.i10*m.i76 + 0.0069722*m.i10*m.i77 + 0.00631338*m.i10*m.i78
+ 0.01475202*m.i10*m.i79 + 0.00722624*m.i10*m.i80 + 0.00973154*m.i10*m.i81 + 0.00371556*m.i10*
m.i82 + 0.00253096*m.i10*m.i83 + 0.008833*m.i10*m.i84 + 0.00871744*m.i10*m.i85 + 0.0101816*m.i10*
m.i86 + 0.01000738*m.i10*m.i87 + 0.01974334*m.i10*m.i88 + 0.00587674*m.i10*m.i89 + 0.0124516*
m.i10*m.i90 + 0.00915752*m.i10*m.i91 + 0.00913708*m.i10*m.i92 + 0.00200378*m.i10*m.i93 +
0.00536928*m.i10*m.i94 + 0.00823672*m.i10*m.i95 + 0.01736144*m.i10*m.i96 + 0.01105742*m.i10*m.i97
+ 0.01023842*m.i10*m.i98 + 0.01685104*m.i10*m.i99 + 0.01457986*m.i10*m.i100 + 0.000833086*m.i11*
m.i12 + 0.00999478*m.i11*m.i13 + 0.01344484*m.i11*m.i14 + 0.0031808*m.i11*m.i15 + 0.01117228*
m.i11*m.i16 + 0.000697152*m.i11*m.i17 + 0.000585828*m.i11*m.i18 + 0.00585952*m.i11*m.i19 +
0.00859976*m.i11*m.i20 + 0.00502902*m.i11*m.i21 + 0.00447154*m.i11*m.i22 + 0.001969568*m.i11*
m.i23 + 0.0049358*m.i11*m.i24 - 0.00029705*m.i11*m.i25 + 0.0008833*m.i11*m.i26 + 0.00788936*m.i11
*m.i27 + 0.00223564*m.i11*m.i28 - 0.001370818*m.i11*m.i29 + 0.0148367*m.i11*m.i30 + 0.01084338*
m.i11*m.i31 + 0.000606756*m.i11*m.i32 + 0.00591896*m.i11*m.i33 - 0.00408456*m.i11*m.i34 -
0.002724*m.i11*m.i35 + 0.01495302*m.i11*m.i36 + 0.0001528802*m.i11*m.i37 + 0.000200858*m.i11*
m.i38 + 0.00843216*m.i11*m.i39 + 0.01341476*m.i11*m.i40 + 0.01160686*m.i11*m.i41 + 0.00464728*
m.i11*m.i42 + 0.00803576*m.i11*m.i43 + 0.00270742*m.i11*m.i44 - 0.00352162*m.i11*m.i45 +
0.000947796*m.i11*m.i46 + 0.00388898*m.i11*m.i47 + 0.00557236*m.i11*m.i48 + 0.00208008*m.i11*
m.i49 + 0.000931698*m.i11*m.i50 + 0.000654446*m.i11*m.i51 + 0.00650504*m.i11*m.i52 + 0.000501194*
m.i11*m.i53 + 0.00681518*m.i11*m.i54 + 0.00601122*m.i11*m.i55 - 0.00507122*m.i11*m.i56 +
0.000483176*m.i11*m.i57 + 0.00482018*m.i11*m.i58 + 0.0064067*m.i11*m.i59 - 0.000166498*m.i11*
m.i60 + 0.00575774*m.i11*m.i61 + 0.00725456*m.i11*m.i62 + 0.00219412*m.i11*m.i63 + 0.0084673*
m.i11*m.i64 + 0.000333436*m.i11*m.i65 + 0.00655332*m.i11*m.i66 - 0.00257168*m.i11*m.i67 +
0.01199786*m.i11*m.i68 + 0.0059299*m.i11*m.i69 + 0.001843394*m.i11*m.i70 + 0.01060724*m.i11*m.i71
+ 0.00647206*m.i11*m.i72 + 0.00231676*m.i11*m.i73 + 0.00580344*m.i11*m.i74 + 0.00620538*m.i11*
m.i75 - 0.000334258*m.i11*m.i76 + 0.00656424*m.i11*m.i77 - 0.001286316*m.i11*m.i78 + 0.00546106*
m.i11*m.i79 - 0.000202642*m.i11*m.i80 + 0.00426114*m.i11*m.i81 - 0.00204892*m.i11*m.i82 +
0.01117602*m.i11*m.i83 + 0.01034244*m.i11*m.i84 + 0.00449542*m.i11*m.i85 + 0.00797378*m.i11*m.i86
- 0.000792844*m.i11*m.i87 + 0.01939124*m.i11*m.i88 + 0.00432784*m.i11*m.i89 + 0.00204578*m.i11*
m.i90 + 0.021152*m.i11*m.i91 + 0.00283286*m.i11*m.i92 - 0.00407532*m.i11*m.i93 - 0.001198622*
m.i11*m.i94 + 0.0056114*m.i11*m.i95 + 0.00560696*m.i11*m.i96 + 0.00867776*m.i11*m.i97 +
0.01208222*m.i11*m.i98 + 0.00209588*m.i11*m.i99 + 0.0061276*m.i11*m.i100 + 0.00580036*m.i12*m.i13
+ 0.01674486*m.i12*m.i14 + 0.00758412*m.i12*m.i15 + 0.0061097*m.i12*m.i16 + 0.00406024*m.i12*
m.i17 + 0.00246134*m.i12*m.i18 + 0.00422294*m.i12*m.i19 + 0.00359302*m.i12*m.i20 + 0.0027503*
m.i12*m.i21 + 0.01042736*m.i12*m.i22 + 0.001094158*m.i12*m.i23 + 0.00410122*m.i12*m.i24 +
0.0025257*m.i12*m.i25 + 0.00319626*m.i12*m.i26 + 0.00241386*m.i12*m.i27 + 0.001365712*m.i12*m.i28
+ 0.00285332*m.i12*m.i29 + 0.01617908*m.i12*m.i30 + 0.00231724*m.i12*m.i31 + 0.00343892*m.i12*
m.i32 + 0.00256516*m.i12*m.i33 + 0.001014308*m.i12*m.i34 + 0.001643396*m.i12*m.i35 + 0.00879946*
m.i12*m.i36 + 0.00422942*m.i12*m.i37 + 0.001108756*m.i12*m.i38 + 0.0068803*m.i12*m.i39 -
0.00375268*m.i12*m.i40 + 0.0029422*m.i12*m.i41 + 0.00429146*m.i12*m.i42 + 0.00277958*m.i12*m.i43
+ 0.00284814*m.i12*m.i44 + 0.001633544*m.i12*m.i45 + 0.00422296*m.i12*m.i46 + 0.000606884*m.i12*
m.i47 + 0.0041981*m.i12*m.i48 + 0.00378962*m.i12*m.i49 + 0.00842602*m.i12*m.i50 + 0.002132*m.i12*
m.i51 + 0.00482062*m.i12*m.i52 + 0.00806126*m.i12*m.i53 + 0.00387284*m.i12*m.i54 + 0.0039366*
m.i12*m.i55 + 0.000612768*m.i12*m.i56 + 0.0044852*m.i12*m.i57 + 0.00284844*m.i12*m.i58 +
0.00336708*m.i12*m.i59 + 0.0030099*m.i12*m.i60 + 0.00693418*m.i12*m.i61 + 0.0046908*m.i12*m.i62
+ 0.00538386*m.i12*m.i63 + 0.00560854*m.i12*m.i64 + 0.00360994*m.i12*m.i65 + 0.00317544*m.i12*
m.i66 + 0.00443286*m.i12*m.i67 + 0.00420074*m.i12*m.i68 + 0.00506986*m.i12*m.i69 + 0.00415464*
m.i12*m.i70 + 0.00220046*m.i12*m.i71 + 0.00230386*m.i12*m.i72 + 0.00311708*m.i12*m.i73 +
0.00731294*m.i12*m.i74 + 0.0048156*m.i12*m.i75 + 0.00332812*m.i12*m.i76 + 0.00439802*m.i12*m.i77
+ 0.00371872*m.i12*m.i78 + 0.00601328*m.i12*m.i79 + 0.00749754*m.i12*m.i80 + 0.00280082*m.i12*
m.i81 + 0.00202854*m.i12*m.i82 + 0.001389608*m.i12*m.i83 + 0.00387764*m.i12*m.i84 + 0.00354982*
m.i12*m.i85 + 0.00265444*m.i12*m.i86 + 0.0022211*m.i12*m.i87 + 0.00666916*m.i12*m.i88 +
0.00412408*m.i12*m.i89 + 0.00421336*m.i12*m.i90 + 0.00306034*m.i12*m.i91 + 0.00210254*m.i12*m.i92
+ 0.001819242*m.i12*m.i93 + 0.0007903*m.i12*m.i94 + 0.00409078*m.i12*m.i95 + 0.00988156*m.i12*
m.i96 + 0.00522182*m.i12*m.i97 + 0.00482098*m.i12*m.i98 + 0.0042136*m.i12*m.i99 + 0.00408986*
m.i12*m.i100 + 0.0674968*m.i13*m.i14 + 0.0344974*m.i13*m.i15 + 0.0330226*m.i13*m.i16 + 0.0319354*
m.i13*m.i17 + 0.01218366*m.i13*m.i18 + 0.00519196*m.i13*m.i19 + 0.044536*m.i13*m.i20 + 0.0277772*
m.i13*m.i21 + 0.0622606*m.i13*m.i22 + 0.0259408*m.i13*m.i23 + 0.0302608*m.i13*m.i24 + 0.0163455*
m.i13*m.i25 + 0.0077583*m.i13*m.i26 + 0.0227636*m.i13*m.i27 + 0.01173702*m.i13*m.i28 + 0.00769116
*m.i13*m.i29 + 0.0709126*m.i13*m.i30 + 0.01974624*m.i13*m.i31 + 0.0471936*m.i13*m.i32 + 0.0320402
*m.i13*m.i33 + 0.0107856*m.i13*m.i34 + 0.00663924*m.i13*m.i35 + 0.0963608*m.i13*m.i36 + 0.0383208
*m.i13*m.i37 + 0.00629602*m.i13*m.i38 + 0.0436584*m.i13*m.i39 + 0.113305*m.i13*m.i40 + 0.030603*
m.i13*m.i41 + 0.0334486*m.i13*m.i42 + 0.0221094*m.i13*m.i43 + 0.0261022*m.i13*m.i44 + 0.00384036*
m.i13*m.i45 + 0.01393368*m.i13*m.i46 + 0.0390862*m.i13*m.i47 + 0.01408516*m.i13*m.i48 + 0.0200136
*m.i13*m.i49 + 0.0473844*m.i13*m.i50 + 0.0233922*m.i13*m.i51 + 0.0267544*m.i13*m.i52 + 0.0382128*
m.i13*m.i53 + 0.026998*m.i13*m.i54 + 0.0232812*m.i13*m.i55 + 0.0210468*m.i13*m.i56 + 0.01155576*
m.i13*m.i57 + 0.01460704*m.i13*m.i58 + 0.0315638*m.i13*m.i59 + 0.00606798*m.i13*m.i60 + 0.048913*
m.i13*m.i61 + 0.0422528*m.i13*m.i62 + 0.0227364*m.i13*m.i63 + 0.0218176*m.i13*m.i64 + 0.020181*
m.i13*m.i65 + 0.0171918*m.i13*m.i66 + 0.0231896*m.i13*m.i67 + 0.00653966*m.i13*m.i68 + 0.0386908*
m.i13*m.i69 + 0.01310368*m.i13*m.i70 + 0.0233574*m.i13*m.i71 + 0.01370986*m.i13*m.i72 +
0.01644046*m.i13*m.i73 + 0.0239108*m.i13*m.i74 + 0.01209114*m.i13*m.i75 + 0.00733894*m.i13*m.i76
+ 0.01831752*m.i13*m.i77 + 0.01361596*m.i13*m.i78 + 0.0349392*m.i13*m.i79 + 0.01738086*m.i13*
m.i80 + 0.0327952*m.i13*m.i81 + 0.00370036*m.i13*m.i82 + 0.0275306*m.i13*m.i83 + 0.0237408*m.i13*
m.i84 + 0.023854*m.i13*m.i85 + 0.0298082*m.i13*m.i86 + 0.01954408*m.i13*m.i87 + 0.0427146*m.i13*
m.i88 + 0.00800344*m.i13*m.i89 + 0.0379614*m.i13*m.i90 + 0.0237386*m.i13*m.i91 + 0.0280402*m.i13*
m.i92 + 0.00539152*m.i13*m.i93 + 0.00878456*m.i13*m.i94 + 0.0258544*m.i13*m.i95 + 0.0525716*m.i13
*m.i96 + 0.0324866*m.i13*m.i97 + 0.03178*m.i13*m.i98 + 0.0440898*m.i13*m.i99 + 0.0425102*m.i13*
m.i100 + 0.0526828*m.i14*m.i15 + 0.037439*m.i14*m.i16 + 0.0256328*m.i14*m.i17 + 0.0100326*m.i14*
m.i18 + 0.02287*m.i14*m.i19 + 0.05764*m.i14*m.i20 + 0.0305304*m.i14*m.i21 + 0.0790588*m.i14*m.i22
+ 0.0273134*m.i14*m.i23 + 0.0226144*m.i14*m.i24 + 0.01919436*m.i14*m.i25 + 0.01634394*m.i14*
m.i26 + 0.0200216*m.i14*m.i27 + 0.01187024*m.i14*m.i28 + 0.0175096*m.i14*m.i29 + 0.1303416*m.i14*
m.i30 + 0.01783484*m.i14*m.i31 + 0.0483706*m.i14*m.i32 + 0.0389666*m.i14*m.i33 + 0.00488422*m.i14
*m.i34 + 0.01045608*m.i14*m.i35 + 0.0811654*m.i14*m.i36 + 0.0367626*m.i14*m.i37 + 0.00522434*
m.i14*m.i38 + 0.05055*m.i14*m.i39 + 0.0849278*m.i14*m.i40 + 0.0341058*m.i14*m.i41 + 0.029549*
m.i14*m.i42 + 0.0119177*m.i14*m.i43 + 0.034956*m.i14*m.i44 + 0.0084943*m.i14*m.i45 + 0.01853266*
m.i14*m.i46 + 0.01893124*m.i14*m.i47 + 0.0205662*m.i14*m.i48 + 0.0326974*m.i14*m.i49 + 0.0610942*
m.i14*m.i50 + 0.0265816*m.i14*m.i51 + 0.0345152*m.i14*m.i52 + 0.0602904*m.i14*m.i53 + 0.0299894*
m.i14*m.i54 + 0.029724*m.i14*m.i55 + 0.00991024*m.i14*m.i56 + 0.0212834*m.i14*m.i57 + 0.01611994*
m.i14*m.i58 + 0.0349608*m.i14*m.i59 + 0.01544524*m.i14*m.i60 + 0.0660828*m.i14*m.i61 + 0.0517844*
m.i14*m.i62 + 0.0288716*m.i14*m.i63 + 0.02065*m.i14*m.i64 + 0.0285834*m.i14*m.i65 + 0.01348302*
m.i14*m.i66 + 0.0306592*m.i14*m.i67 + 0.01828946*m.i14*m.i68 + 0.0537368*m.i14*m.i69 + 0.0271944*
m.i14*m.i70 + 0.01793364*m.i14*m.i71 + 0.0206146*m.i14*m.i72 + 0.0281438*m.i14*m.i73 + 0.038653*
m.i14*m.i74 + 0.0322466*m.i14*m.i75 + 0.0212534*m.i14*m.i76 + 0.0336072*m.i14*m.i77 + 0.01910646*
m.i14*m.i78 + 0.0653414*m.i14*m.i79 + 0.0269972*m.i14*m.i80 + 0.0273492*m.i14*m.i81 + 0.01038358*
m.i14*m.i82 + 0.00619204*m.i14*m.i83 + 0.0273406*m.i14*m.i84 + 0.0211516*m.i14*m.i85 + 0.0382364*
m.i14*m.i86 + 0.0345294*m.i14*m.i87 + 0.1230516*m.i14*m.i88 + 0.032645*m.i14*m.i89 + 0.0494242*
m.i14*m.i90 + 0.030464*m.i14*m.i91 + 0.0229316*m.i14*m.i92 + 0.01328606*m.i14*m.i93 + 0.01219994*
m.i14*m.i94 + 0.0308436*m.i14*m.i95 + 0.0853596*m.i14*m.i96 + 0.0354032*m.i14*m.i97 + 0.0262134*
m.i14*m.i98 + 0.0473304*m.i14*m.i99 + 0.037143*m.i14*m.i100 + 0.01723066*m.i15*m.i16 + 0.0144032*
m.i15*m.i17 + 0.01011568*m.i15*m.i18 + 0.01071386*m.i15*m.i19 + 0.0363128*m.i15*m.i20 + 0.0200062
*m.i15*m.i21 + 0.0429276*m.i15*m.i22 + 0.01550086*m.i15*m.i23 + 0.01336936*m.i15*m.i24 +
0.01153424*m.i15*m.i25 + 0.01291552*m.i15*m.i26 + 0.01571376*m.i15*m.i27 + 0.0057752*m.i15*m.i28
+ 0.01132328*m.i15*m.i29 + 0.04615*m.i15*m.i30 + 0.0095472*m.i15*m.i31 + 0.0348208*m.i15*m.i32
+ 0.01999334*m.i15*m.i33 + 0.00687142*m.i15*m.i34 + 0.00887602*m.i15*m.i35 + 0.0412134*m.i15*
m.i36 + 0.0222294*m.i15*m.i37 + 0.0044452*m.i15*m.i38 + 0.0275012*m.i15*m.i39 + 0.0449902*m.i15*
m.i40 + 0.0316194*m.i15*m.i41 + 0.021335*m.i15*m.i42 + 0.01203424*m.i15*m.i43 + 0.0250958*m.i15*
m.i44 + 0.00747774*m.i15*m.i45 + 0.01208838*m.i15*m.i46 + 0.0258298*m.i15*m.i47 + 0.01217868*
m.i15*m.i48 + 0.0181139*m.i15*m.i49 + 0.0324096*m.i15*m.i50 + 0.01156602*m.i15*m.i51 + 0.01869794
*m.i15*m.i52 + 0.0276488*m.i15*m.i53 + 0.0230496*m.i15*m.i54 + 0.0171536*m.i15*m.i55 + 0.01527606
*m.i15*m.i56 + 0.01288824*m.i15*m.i57 + 0.014014*m.i15*m.i58 + 0.01657292*m.i15*m.i59 + 0.0080112
*m.i15*m.i60 + 0.0380938*m.i15*m.i61 + 0.0298954*m.i15*m.i62 + 0.0218266*m.i15*m.i63 + 0.01580514
*m.i15*m.i64 + 0.01327226*m.i15*m.i65 + 0.01171988*m.i15*m.i66 + 0.01749552*m.i15*m.i67 +
0.00958228*m.i15*m.i68 + 0.02991*m.i15*m.i69 + 0.01687722*m.i15*m.i70 + 0.0214718*m.i15*m.i71 +
0.0177952*m.i15*m.i72 + 0.01429134*m.i15*m.i73 + 0.01835742*m.i15*m.i74 + 0.014413*m.i15*m.i75 +
0.01215492*m.i15*m.i76 + 0.01888264*m.i15*m.i77 + 0.01135654*m.i15*m.i78 + 0.01419354*m.i15*m.i79
+ 0.01589948*m.i15*m.i80 + 0.01996746*m.i15*m.i81 + 0.00616376*m.i15*m.i82 + 0.00905236*m.i15*
m.i83 + 0.01329424*m.i15*m.i84 + 0.01265054*m.i15*m.i85 + 0.01743812*m.i15*m.i86 + 0.01662354*
m.i15*m.i87 + 0.0326642*m.i15*m.i88 + 0.00648876*m.i15*m.i89 + 0.0255582*m.i15*m.i90 + 0.01710528
*m.i15*m.i91 + 0.01530604*m.i15*m.i92 + 0.00729364*m.i15*m.i93 + 0.00786908*m.i15*m.i94 +
0.0169034*m.i15*m.i95 + 0.034265*m.i15*m.i96 + 0.0206426*m.i15*m.i97 + 0.01574576*m.i15*m.i98 +
0.0251768*m.i15*m.i99 + 0.0302234*m.i15*m.i100 + 0.0180502*m.i16*m.i17 + 0.00797572*m.i16*m.i18
+ 0.00993386*m.i16*m.i19 + 0.0236072*m.i16*m.i20 + 0.01425014*m.i16*m.i21 + 0.0269392*m.i16*
m.i22 + 0.01322908*m.i16*m.i23 + 0.01719786*m.i16*m.i24 + 0.00995474*m.i16*m.i25 + 0.00544834*
m.i16*m.i26 + 0.01319632*m.i16*m.i27 + 0.00695148*m.i16*m.i28 + 0.00568042*m.i16*m.i29 + 0.045082
*m.i16*m.i30 + 0.01190474*m.i16*m.i31 + 0.01955462*m.i16*m.i32 + 0.0138212*m.i16*m.i33 +
0.00642106*m.i16*m.i34 + 0.00665524*m.i16*m.i35 + 0.0380492*m.i16*m.i36 + 0.01602708*m.i16*m.i37
+ 0.00369958*m.i16*m.i38 + 0.0220792*m.i16*m.i39 + 0.0304262*m.i16*m.i40 + 0.01843444*m.i16*
m.i41 + 0.021247*m.i16*m.i42 + 0.01518988*m.i16*m.i43 + 0.01406774*m.i16*m.i44 + 0.0051723*m.i16*
m.i45 + 0.0080675*m.i16*m.i46 + 0.0176419*m.i16*m.i47 + 0.0090298*m.i16*m.i48 + 0.0126196*m.i16*
m.i49 + 0.025967*m.i16*m.i50 + 0.01140228*m.i16*m.i51 + 0.01900414*m.i16*m.i52 + 0.01781402*m.i16
*m.i53 + 0.0194748*m.i16*m.i54 + 0.01211848*m.i16*m.i55 + 0.01166912*m.i16*m.i56 + 0.00870972*
m.i16*m.i57 + 0.00719416*m.i16*m.i58 + 0.01574372*m.i16*m.i59 + 0.00725944*m.i16*m.i60 +
0.0294988*m.i16*m.i61 + 0.0260914*m.i16*m.i62 + 0.01974094*m.i16*m.i63 + 0.01434116*m.i16*m.i64
+ 0.00954816*m.i16*m.i65 + 0.0087947*m.i16*m.i66 + 0.01216302*m.i16*m.i67 + 0.01307338*m.i16*
m.i68 + 0.023669*m.i16*m.i69 + 0.01061826*m.i16*m.i70 + 0.01531198*m.i16*m.i71 + 0.01282252*m.i16
*m.i72 + 0.01136194*m.i16*m.i73 + 0.01289612*m.i16*m.i74 + 0.0111961*m.i16*m.i75 + 0.00467394*
m.i16*m.i76 + 0.0120207*m.i16*m.i77 + 0.00634502*m.i16*m.i78 + 0.0272842*m.i16*m.i79 + 0.01354848
*m.i16*m.i80 + 0.01491878*m.i16*m.i81 + 0.00372788*m.i16*m.i82 + 0.01347184*m.i16*m.i83 +
0.01367452*m.i16*m.i84 + 0.01430584*m.i16*m.i85 + 0.01662228*m.i16*m.i86 + 0.01019354*m.i16*m.i87
+ 0.031864*m.i16*m.i88 + 0.01389622*m.i16*m.i89 + 0.01404588*m.i16*m.i90 + 0.01898344*m.i16*
m.i91 + 0.01310136*m.i16*m.i92 + 0.00293122*m.i16*m.i93 + 0.00548746*m.i16*m.i94 + 0.01674526*
m.i16*m.i95 + 0.0263504*m.i16*m.i96 + 0.0187966*m.i16*m.i97 + 0.0198675*m.i16*m.i98 + 0.0160833*
m.i16*m.i99 + 0.01885334*m.i16*m.i100 + 0.00599666*m.i17*m.i18 + 0.0047675*m.i17*m.i19 +
0.0265872*m.i17*m.i20 + 0.01628802*m.i17*m.i21 + 0.01871884*m.i17*m.i22 + 0.01233104*m.i17*m.i23
+ 0.01365522*m.i17*m.i24 + 0.00989432*m.i17*m.i25 + 0.00330258*m.i17*m.i26 + 0.0116841*m.i17*
m.i27 + 0.0079471*m.i17*m.i28 + 0.0045994*m.i17*m.i29 + 0.0254766*m.i17*m.i30 + 0.01659406*m.i17*
m.i31 + 0.0220846*m.i17*m.i32 + 0.01861566*m.i17*m.i33 + 0.00948066*m.i17*m.i34 + 0.0090429*m.i17
*m.i35 + 0.0337978*m.i17*m.i36 + 0.01595384*m.i17*m.i37 + 0.00235078*m.i17*m.i38 + 0.0201494*
m.i17*m.i39 + 0.0342284*m.i17*m.i40 + 0.0277738*m.i17*m.i41 + 0.01731318*m.i17*m.i42 + 0.01753214
*m.i17*m.i43 + 0.01978996*m.i17*m.i44 + 0.00369934*m.i17*m.i45 + 0.00718436*m.i17*m.i46 +
0.01949342*m.i17*m.i47 + 0.00499956*m.i17*m.i48 + 0.01707236*m.i17*m.i49 + 0.0203004*m.i17*m.i50
+ 0.01279548*m.i17*m.i51 + 0.011643*m.i17*m.i52 + 0.01115602*m.i17*m.i53 + 0.01587576*m.i17*
m.i54 + 0.010193*m.i17*m.i55 + 0.0217498*m.i17*m.i56 + 0.0064957*m.i17*m.i57 + 0.00989022*m.i17*
m.i58 + 0.01554654*m.i17*m.i59 + 0.00382894*m.i17*m.i60 + 0.01868378*m.i17*m.i61 + 0.01822302*
m.i17*m.i62 + 0.0270002*m.i17*m.i63 + 0.01054316*m.i17*m.i64 + 0.01114578*m.i17*m.i65 + 0.010706*
m.i17*m.i66 + 0.01057722*m.i17*m.i67 + 0.00541042*m.i17*m.i68 + 0.022045*m.i17*m.i69 + 0.00933892
*m.i17*m.i70 + 0.0217256*m.i17*m.i71 + 0.010527*m.i17*m.i72 + 0.01245986*m.i17*m.i73 + 0.01462496
*m.i17*m.i74 + 0.00471612*m.i17*m.i75 + 0.00385082*m.i17*m.i76 + 0.0150046*m.i17*m.i77 +
0.00469912*m.i17*m.i78 + 0.01570408*m.i17*m.i79 + 0.01238884*m.i17*m.i80 + 0.0167981*m.i17*m.i81
+ 0.00275656*m.i17*m.i82 + 0.0264668*m.i17*m.i83 + 0.01754616*m.i17*m.i84 + 0.0104241*m.i17*
m.i85 + 0.0155118*m.i17*m.i86 + 0.00992204*m.i17*m.i87 + 0.0334656*m.i17*m.i88 + 0.0100102*m.i17*
m.i89 + 0.00830234*m.i17*m.i90 + 0.00830522*m.i17*m.i91 + 0.01347376*m.i17*m.i92 + 0.00371114*
m.i17*m.i93 + 0.00721878*m.i17*m.i94 + 0.01197232*m.i17*m.i95 + 0.01097582*m.i17*m.i96 +
0.0153446*m.i17*m.i97 + 0.01911512*m.i17*m.i98 + 0.0158341*m.i17*m.i99 + 0.01647016*m.i17*m.i100
+ 0.0038501*m.i18*m.i19 + 0.01438424*m.i18*m.i20 + 0.00575166*m.i18*m.i21 + 0.01286738*m.i18*
m.i22 + 0.0072269*m.i18*m.i23 + 0.00577628*m.i18*m.i24 + 0.00353166*m.i18*m.i25 + 0.00406754*
m.i18*m.i26 + 0.00586712*m.i18*m.i27 + 0.00246394*m.i18*m.i28 + 0.00208424*m.i18*m.i29 +
0.00868042*m.i18*m.i30 + 0.00488392*m.i18*m.i31 + 0.01139774*m.i18*m.i32 + 0.00652178*m.i18*m.i33
+ 0.00514824*m.i18*m.i34 + 0.00420068*m.i18*m.i35 + 0.01314078*m.i18*m.i36 + 0.00738678*m.i18*
m.i37 + 0.00212172*m.i18*m.i38 + 0.00767338*m.i18*m.i39 + 0.01491396*m.i18*m.i40 + 0.00689198*
m.i18*m.i41 + 0.00941516*m.i18*m.i42 + 0.00703674*m.i18*m.i43 + 0.00623926*m.i18*m.i44 +
0.0042213*m.i18*m.i45 + 0.00377366*m.i18*m.i46 + 0.01005392*m.i18*m.i47 + 0.00385304*m.i18*m.i48
+ 0.0061538*m.i18*m.i49 + 0.00828744*m.i18*m.i50 + 0.00452496*m.i18*m.i51 + 0.00647618*m.i18*
m.i52 + 0.00595912*m.i18*m.i53 + 0.00909974*m.i18*m.i54 + 0.00683082*m.i18*m.i55 + 0.00696058*
m.i18*m.i56 + 0.00489492*m.i18*m.i57 + 0.00399036*m.i18*m.i58 + 0.0071619*m.i18*m.i59 +
0.00282566*m.i18*m.i60 + 0.01253118*m.i18*m.i61 + 0.01017836*m.i18*m.i62 + 0.0054806*m.i18*m.i63
+ 0.00679494*m.i18*m.i64 + 0.00492774*m.i18*m.i65 + 0.00294036*m.i18*m.i66 + 0.00302154*m.i18*
m.i67 + 0.00492864*m.i18*m.i68 + 0.01002278*m.i18*m.i69 + 0.00498708*m.i18*m.i70 + 0.00467346*
m.i18*m.i71 + 0.00622154*m.i18*m.i72 + 0.0060522*m.i18*m.i73 + 0.00606086*m.i18*m.i74 +
0.00435108*m.i18*m.i75 + 0.00246578*m.i18*m.i76 + 0.00518572*m.i18*m.i77 + 0.00318624*m.i18*m.i78
+ 0.00460288*m.i18*m.i79 + 0.007017*m.i18*m.i80 + 0.00647242*m.i18*m.i81 + 0.00407958*m.i18*
m.i82 - 0.000888864*m.i18*m.i83 + 0.00537106*m.i18*m.i84 + 0.00634694*m.i18*m.i85 + 0.00514234*
m.i18*m.i86 + 0.00350408*m.i18*m.i87 - 0.00202898*m.i18*m.i88 + 0.001751682*m.i18*m.i89 +
0.0065019*m.i18*m.i90 + 0.007451*m.i18*m.i91 + 0.0035437*m.i18*m.i92 + 0.001995674*m.i18*m.i93 +
0.00436006*m.i18*m.i94 + 0.00715274*m.i18*m.i95 + 0.00776482*m.i18*m.i96 + 0.00710082*m.i18*m.i97
+ 0.00609606*m.i18*m.i98 + 0.00652362*m.i18*m.i99 + 0.01247386*m.i18*m.i100 + 0.01204848*m.i19*
m.i20 + 0.00628788*m.i19*m.i21 + 0.00938206*m.i19*m.i22 + 0.00540152*m.i19*m.i23 + 0.00366816*
m.i19*m.i24 + 0.00424804*m.i19*m.i25 + 0.00443146*m.i19*m.i26 + 0.00550836*m.i19*m.i27 +
0.00441186*m.i19*m.i28 + 0.00464964*m.i19*m.i29 + 0.0215394*m.i19*m.i30 + 0.00534434*m.i19*m.i31
+ 0.01089826*m.i19*m.i32 + 0.00384858*m.i19*m.i33 + 0.00271286*m.i19*m.i34 + 0.00459438*m.i19*
m.i35 + 0.00753494*m.i19*m.i36 + 0.00675858*m.i19*m.i37 + 0.00330138*m.i19*m.i38 + 0.01012594*
m.i19*m.i39 + 0.00097236*m.i19*m.i40 + 0.00697634*m.i19*m.i41 + 0.0055734*m.i19*m.i42 +
0.00439042*m.i19*m.i43 + 0.00466626*m.i19*m.i44 + 0.0056599*m.i19*m.i45 + 0.00343664*m.i19*m.i46
+ 0.00191227*m.i19*m.i47 + 0.00409474*m.i19*m.i48 + 0.00728426*m.i19*m.i49 + 0.0118005*m.i19*
m.i50 + 0.00439032*m.i19*m.i51 + 0.00819602*m.i19*m.i52 + 0.00683532*m.i19*m.i53 + 0.00927236*
m.i19*m.i54 + 0.00638082*m.i19*m.i55 + 0.0049778*m.i19*m.i56 + 0.0064092*m.i19*m.i57 + 0.00332368
*m.i19*m.i58 + 0.00797006*m.i19*m.i59 + 0.00515114*m.i19*m.i60 + 0.0140857*m.i19*m.i61 +
0.00824548*m.i19*m.i62 + 0.00645382*m.i19*m.i63 + 0.00492056*m.i19*m.i64 + 0.0040063*m.i19*m.i65
+ 0.00621702*m.i19*m.i66 + 0.00486474*m.i19*m.i67 + 0.01089728*m.i19*m.i68 + 0.01064856*m.i19*
m.i69 + 0.00763898*m.i19*m.i70 + 0.00304924*m.i19*m.i71 + 0.00746516*m.i19*m.i72 + 0.0073895*
m.i19*m.i73 + 0.008372*m.i19*m.i74 + 0.0096269*m.i19*m.i75 + 0.00403824*m.i19*m.i76 + 0.00896868*
m.i19*m.i77 + 0.00369816*m.i19*m.i78 + 0.01338638*m.i19*m.i79 + 0.00702566*m.i19*m.i80 +
0.00204776*m.i19*m.i81 + 0.0040369*m.i19*m.i82 - 0.00617474*m.i19*m.i83 + 0.00664876*m.i19*m.i84
+ 0.00640014*m.i19*m.i85 + 0.00537574*m.i19*m.i86 + 0.00744762*m.i19*m.i87 + 0.0288232*m.i19*
m.i88 + 0.0089059*m.i19*m.i89 + 0.00438344*m.i19*m.i90 + 0.01192674*m.i19*m.i91 + 0.00326376*
m.i19*m.i92 + 0.00330764*m.i19*m.i93 + 0.00649262*m.i19*m.i94 + 0.0076392*m.i19*m.i95 +
0.01075072*m.i19*m.i96 + 0.00749846*m.i19*m.i97 + 0.00563188*m.i19*m.i98 + 0.00430788*m.i19*m.i99
+ 0.00505074*m.i19*m.i100 + 0.026993*m.i20*m.i21 + 0.0407142*m.i20*m.i22 + 0.0262048*m.i20*m.i23
+ 0.0233804*m.i20*m.i24 + 0.01566388*m.i20*m.i25 + 0.01254316*m.i20*m.i26 + 0.0230746*m.i20*
m.i27 + 0.01228074*m.i20*m.i28 + 0.01141404*m.i20*m.i29 + 0.046979*m.i20*m.i30 + 0.01956928*m.i20
*m.i31 + 0.0444886*m.i20*m.i32 + 0.0345924*m.i20*m.i33 + 0.01450852*m.i20*m.i34 + 0.01607032*
m.i20*m.i35 + 0.0534276*m.i20*m.i36 + 0.027915*m.i20*m.i37 + 0.00446976*m.i20*m.i38 + 0.0310128*
m.i20*m.i39 + 0.0617194*m.i20*m.i40 + 0.0418284*m.i20*m.i41 + 0.0284554*m.i20*m.i42 + 0.0202322*
m.i20*m.i43 + 0.0309222*m.i20*m.i44 + 0.00850138*m.i20*m.i45 + 0.01226594*m.i20*m.i46 + 0.0355744
*m.i20*m.i47 + 0.01044628*m.i20*m.i48 + 0.0261968*m.i20*m.i49 + 0.0353182*m.i20*m.i50 +
0.01768812*m.i20*m.i51 + 0.0227266*m.i20*m.i52 + 0.0229416*m.i20*m.i53 + 0.0285392*m.i20*m.i54 +
0.024215*m.i20*m.i55 + 0.0227*m.i20*m.i56 + 0.01349126*m.i20*m.i57 + 0.01576804*m.i20*m.i58 +
0.0251472*m.i20*m.i59 + 0.00678918*m.i20*m.i60 + 0.0460104*m.i20*m.i61 + 0.0362612*m.i20*m.i62 +
0.0246576*m.i20*m.i63 + 0.01897386*m.i20*m.i64 + 0.021042*m.i20*m.i65 + 0.01449872*m.i20*m.i66 +
0.01901978*m.i20*m.i67 + 0.01289314*m.i20*m.i68 + 0.04318*m.i20*m.i69 + 0.0192612*m.i20*m.i70 +
0.0319956*m.i20*m.i71 + 0.0241418*m.i20*m.i72 + 0.0231068*m.i20*m.i73 + 0.0232748*m.i20*m.i74 +
0.01394672*m.i20*m.i75 + 0.01233534*m.i20*m.i76 + 0.0250086*m.i20*m.i77 + 0.01003866*m.i20*m.i78
+ 0.01782134*m.i20*m.i79 + 0.0175231*m.i20*m.i80 + 0.0266842*m.i20*m.i81 + 0.00899148*m.i20*
m.i82 + 0.01916166*m.i20*m.i83 + 0.0237898*m.i20*m.i84 + 0.01674726*m.i20*m.i85 + 0.0243836*m.i20
*m.i86 + 0.0205712*m.i20*m.i87 + 0.0526016*m.i20*m.i88 + 0.01299922*m.i20*m.i89 + 0.0223216*m.i20
*m.i90 + 0.0221722*m.i20*m.i91 + 0.0200512*m.i20*m.i92 + 0.00605128*m.i20*m.i93 + 0.01422172*
m.i20*m.i94 + 0.0209666*m.i20*m.i95 + 0.0316224*m.i20*m.i96 + 0.0278754*m.i20*m.i97 + 0.0266692*
m.i20*m.i98 + 0.032317*m.i20*m.i99 + 0.0372718*m.i20*m.i100 + 0.0225584*m.i21*m.i22 + 0.01330824*
m.i21*m.i23 + 0.01120138*m.i21*m.i24 + 0.00988644*m.i21*m.i25 + 0.0053562*m.i21*m.i26 +
0.01171726*m.i21*m.i27 + 0.0075308*m.i21*m.i28 + 0.0062293*m.i21*m.i29 + 0.028151*m.i21*m.i30 +
0.01116532*m.i21*m.i31 + 0.024731*m.i21*m.i32 + 0.01403094*m.i21*m.i33 + 0.0053378*m.i21*m.i34 +
0.0062169*m.i21*m.i35 + 0.0322338*m.i21*m.i36 + 0.0173092*m.i21*m.i37 + 0.00310282*m.i21*m.i38 +
0.01943686*m.i21*m.i39 + 0.0397312*m.i21*m.i40 + 0.0227668*m.i21*m.i41 + 0.01402322*m.i21*m.i42
+ 0.01184862*m.i21*m.i43 + 0.01574106*m.i21*m.i44 + 0.00351088*m.i21*m.i45 + 0.00692094*m.i21*
m.i46 + 0.01710158*m.i21*m.i47 + 0.00581758*m.i21*m.i48 + 0.013985*m.i21*m.i49 + 0.0205976*m.i21*
m.i50 + 0.01286968*m.i21*m.i51 + 0.01222018*m.i21*m.i52 + 0.01492284*m.i21*m.i53 + 0.01502328*
m.i21*m.i54 + 0.01279528*m.i21*m.i55 + 0.01443928*m.i21*m.i56 + 0.00711002*m.i21*m.i57 +
0.00897148*m.i21*m.i58 + 0.0175601*m.i21*m.i59 + 0.00366562*m.i21*m.i60 + 0.0240206*m.i21*m.i61
+ 0.01871124*m.i21*m.i62 + 0.01471548*m.i21*m.i63 + 0.00910326*m.i21*m.i64 + 0.01121548*m.i21*
m.i65 + 0.0093615*m.i21*m.i66 + 0.0129081*m.i21*m.i67 + 0.0055548*m.i21*m.i68 + 0.0214638*m.i21*
m.i69 + 0.00932128*m.i21*m.i70 + 0.01654162*m.i21*m.i71 + 0.01150414*m.i21*m.i72 + 0.01130758*
m.i21*m.i73 + 0.01195864*m.i21*m.i74 + 0.00685764*m.i21*m.i75 + 0.00673976*m.i21*m.i76 +
0.01092518*m.i21*m.i77 + 0.00610126*m.i21*m.i78 + 0.0166491*m.i21*m.i79 + 0.00973956*m.i21*m.i80
+ 0.01360816*m.i21*m.i81 + 0.00413938*m.i21*m.i82 + 0.01295166*m.i21*m.i83 + 0.01359658*m.i21*
m.i84 + 0.0100056*m.i21*m.i85 + 0.01591198*m.i21*m.i86 + 0.01302584*m.i21*m.i87 + 0.0321888*m.i21
*m.i88 + 0.0069057*m.i21*m.i89 + 0.01467542*m.i21*m.i90 + 0.0104985*m.i21*m.i91 + 0.01203108*
m.i21*m.i92 + 0.00438602*m.i21*m.i93 + 0.0064228*m.i21*m.i94 + 0.0109577*m.i21*m.i95 + 0.01683074
*m.i21*m.i96 + 0.01510662*m.i21*m.i97 + 0.013665*m.i21*m.i98 + 0.01994166*m.i21*m.i99 + 0.0184821
*m.i21*m.i100 + 0.01713984*m.i22*m.i23 + 0.0290628*m.i22*m.i24 + 0.01659484*m.i22*m.i25 +
0.01330504*m.i22*m.i26 + 0.0220338*m.i22*m.i27 + 0.0096401*m.i22*m.i28 + 0.01336178*m.i22*m.i29
+ 0.0794522*m.i22*m.i30 + 0.00912184*m.i22*m.i31 + 0.0466568*m.i22*m.i32 + 0.0203942*m.i22*m.i33
+ 0.00695226*m.i22*m.i34 + 0.0125215*m.i22*m.i35 + 0.0728992*m.i22*m.i36 + 0.0354588*m.i22*m.i37
+ 0.00691112*m.i22*m.i38 + 0.037201*m.i22*m.i39 + 0.0756082*m.i22*m.i40 + 0.0292772*m.i22*m.i41
+ 0.0266054*m.i22*m.i42 + 0.01269282*m.i22*m.i43 + 0.0230306*m.i22*m.i44 + 0.000804368*m.i22*
m.i45 + 0.01545384*m.i22*m.i46 + 0.0296748*m.i22*m.i47 + 0.0193381*m.i22*m.i48 + 0.0200644*m.i22*
m.i49 + 0.0450946*m.i22*m.i50 + 0.01567104*m.i22*m.i51 + 0.0202574*m.i22*m.i52 + 0.0456018*m.i22*
m.i53 + 0.024727*m.i22*m.i54 + 0.01871804*m.i22*m.i55 + 0.01574656*m.i22*m.i56 + 0.01426746*m.i22
*m.i57 + 0.0112117*m.i22*m.i58 + 0.0237092*m.i22*m.i59 + 0.01100176*m.i22*m.i60 + 0.0484136*m.i22
*m.i61 + 0.0477626*m.i22*m.i62 + 0.01715072*m.i22*m.i63 + 0.01569402*m.i22*m.i64 + 0.0163363*
m.i22*m.i65 + 0.00819194*m.i22*m.i66 + 0.0250362*m.i22*m.i67 + 0.01191736*m.i22*m.i68 + 0.0445474
*m.i22*m.i69 + 0.0208408*m.i22*m.i70 + 0.0196514*m.i22*m.i71 + 0.01993902*m.i22*m.i72 +
0.01317816*m.i22*m.i73 + 0.0290184*m.i22*m.i74 + 0.022028*m.i22*m.i75 + 0.01241074*m.i22*m.i76 +
0.01467528*m.i22*m.i77 + 0.0179883*m.i22*m.i78 + 0.040464*m.i22*m.i79 + 0.01646476*m.i22*m.i80 +
0.0251454*m.i22*m.i81 + 0.00665554*m.i22*m.i82 - 0.00094782*m.i22*m.i83 + 0.01809638*m.i22*m.i84
+ 0.01658492*m.i22*m.i85 + 0.0242392*m.i22*m.i86 + 0.0215874*m.i22*m.i87 + 0.0229098*m.i22*m.i88
+ 0.01114584*m.i22*m.i89 + 0.046945*m.i22*m.i90 + 0.0230318*m.i22*m.i91 + 0.01381346*m.i22*m.i92
+ 0.0100301*m.i22*m.i93 + 0.00837496*m.i22*m.i94 + 0.0250054*m.i22*m.i95 + 0.0620424*m.i22*m.i96
+ 0.0302296*m.i22*m.i97 + 0.0248336*m.i22*m.i98 + 0.0372288*m.i22*m.i99 + 0.0441042*m.i22*m.i100
+ 0.00618108*m.i23*m.i24 + 0.00567144*m.i23*m.i25 + 0.0048866*m.i23*m.i26 + 0.00839514*m.i23*
m.i27 + 0.00487436*m.i23*m.i28 + 0.004356*m.i23*m.i29 + 0.024299*m.i23*m.i30 + 0.00996842*m.i23*
m.i31 + 0.0204928*m.i23*m.i32 + 0.01726232*m.i23*m.i33 + 0.00564344*m.i23*m.i34 + 0.00506272*
m.i23*m.i35 + 0.027322*m.i23*m.i36 + 0.01648718*m.i23*m.i37 + 0.001813512*m.i23*m.i38 + 0.0143408
*m.i23*m.i39 + 0.0410642*m.i23*m.i40 + 0.00822668*m.i23*m.i41 + 0.01397884*m.i23*m.i42 +
0.00751294*m.i23*m.i43 + 0.01081252*m.i23*m.i44 + 0.00375058*m.i23*m.i45 + 0.00488444*m.i23*m.i46
+ 0.01210078*m.i23*m.i47 + 0.0050334*m.i23*m.i48 + 0.01042672*m.i23*m.i49 + 0.01834872*m.i23*
m.i50 + 0.0122672*m.i23*m.i51 + 0.01291522*m.i23*m.i52 + 0.01243908*m.i23*m.i53 + 0.01372984*
m.i23*m.i54 + 0.0114482*m.i23*m.i55 + 0.0105593*m.i23*m.i56 + 0.00644542*m.i23*m.i57 + 0.00648944
*m.i23*m.i58 + 0.01543002*m.i23*m.i59 + 0.0037869*m.i23*m.i60 + 0.0214726*m.i23*m.i61 +
0.01495998*m.i23*m.i62 + 0.00692592*m.i23*m.i63 + 0.00648514*m.i23*m.i64 + 0.00794602*m.i23*m.i65
+ 0.00558232*m.i23*m.i66 + 0.0093087*m.i23*m.i67 + 0.000819996*m.i23*m.i68 + 0.01512186*m.i23*
m.i69 + 0.0070338*m.i23*m.i70 + 0.00840292*m.i23*m.i71 + 0.00668858*m.i23*m.i72 + 0.00956292*
m.i23*m.i73 + 0.00972254*m.i23*m.i74 + 0.00409738*m.i23*m.i75 + 0.00544566*m.i23*m.i76 +
0.01207296*m.i23*m.i77 + 0.00561846*m.i23*m.i78 + 0.01639358*m.i23*m.i79 + 0.00769632*m.i23*m.i80
+ 0.01062502*m.i23*m.i81 + 0.0060578*m.i23*m.i82 + 0.00866906*m.i23*m.i83 + 0.00707332*m.i23*
m.i84 + 0.01006612*m.i23*m.i85 + 0.01147664*m.i23*m.i86 + 0.0127172*m.i23*m.i87 + 0.01718458*
m.i23*m.i88 + 0.00499896*m.i23*m.i89 + 0.01300446*m.i23*m.i90 + 0.00824348*m.i23*m.i91 +
0.01100222*m.i23*m.i92 + 0.00359882*m.i23*m.i93 + 0.00760194*m.i23*m.i94 + 0.01026304*m.i23*m.i95
+ 0.01748628*m.i23*m.i96 + 0.01222018*m.i23*m.i97 + 0.00656104*m.i23*m.i98 + 0.01929844*m.i23*
m.i99 + 0.01526792*m.i23*m.i100 + 0.01061256*m.i24*m.i25 + 0.00390748*m.i24*m.i26 + 0.0176534*
m.i24*m.i27 + 0.00973526*m.i24*m.i28 + 0.00580416*m.i24*m.i29 + 0.0308904*m.i24*m.i30 +
0.00564094*m.i24*m.i31 + 0.0202996*m.i24*m.i32 + 0.00846578*m.i24*m.i33 + 0.00878324*m.i24*m.i34
+ 0.0092725*m.i24*m.i35 + 0.0386418*m.i24*m.i36 + 0.01405906*m.i24*m.i37 + 0.0050169*m.i24*m.i38
+ 0.01753958*m.i24*m.i39 + 0.0277342*m.i24*m.i40 + 0.0200538*m.i24*m.i41 + 0.0160148*m.i24*m.i42
+ 0.01157484*m.i24*m.i43 + 0.0097945*m.i24*m.i44 + 0.00047637*m.i24*m.i45 + 0.0074696*m.i24*
m.i46 + 0.0232922*m.i24*m.i47 + 0.0064693*m.i24*m.i48 + 0.0076863*m.i24*m.i49 + 0.01970906*m.i24*
m.i50 + 0.00539232*m.i24*m.i51 + 0.01285448*m.i24*m.i52 + 0.0120141*m.i24*m.i53 + 0.0124346*m.i24
*m.i54 + 0.00898946*m.i24*m.i55 + 0.00726448*m.i24*m.i56 + 0.0065436*m.i24*m.i57 + 0.00501008*
m.i24*m.i58 + 0.01101314*m.i24*m.i59 + 0.00470396*m.i24*m.i60 + 0.0237074*m.i24*m.i61 + 0.0228986
*m.i24*m.i62 + 0.01228188*m.i24*m.i63 + 0.01100376*m.i24*m.i64 + 0.00915078*m.i24*m.i65 +
0.0069269*m.i24*m.i66 + 0.01206108*m.i24*m.i67 + 0.00908652*m.i24*m.i68 + 0.0217466*m.i24*m.i69
+ 0.00887002*m.i24*m.i70 + 0.022452*m.i24*m.i71 + 0.0139555*m.i24*m.i72 + 0.00715706*m.i24*m.i73
+ 0.01096546*m.i24*m.i74 + 0.00744888*m.i24*m.i75 + 0.0028668*m.i24*m.i76 + 0.0036177*m.i24*
m.i77 + 0.00580328*m.i24*m.i78 + 0.0086669*m.i24*m.i79 + 0.00929752*m.i24*m.i80 + 0.01854944*
m.i24*m.i81 + 0.0023229*m.i24*m.i82 + 0.01207648*m.i24*m.i83 + 0.01205652*m.i24*m.i84 + 0.0096674
*m.i24*m.i85 + 0.0108503*m.i24*m.i86 + 0.00597266*m.i24*m.i87 + 0.0190243*m.i24*m.i88 +
0.00640978*m.i24*m.i89 + 0.01034642*m.i24*m.i90 + 0.01193214*m.i24*m.i91 + 0.00822214*m.i24*m.i92
+ 0.00070224*m.i24*m.i93 + 0.00307244*m.i24*m.i94 + 0.01092084*m.i24*m.i95 + 0.0203774*m.i24*
m.i96 + 0.01743418*m.i24*m.i97 + 0.0232524*m.i24*m.i98 + 0.01437366*m.i24*m.i99 + 0.01998814*
m.i24*m.i100 + 0.00270846*m.i25*m.i26 + 0.00878244*m.i25*m.i27 + 0.00564506*m.i25*m.i28 +
0.00404084*m.i25*m.i29 + 0.0227806*m.i25*m.i30 + 0.00477484*m.i25*m.i31 + 0.016725*m.i25*m.i32 +
0.00496432*m.i25*m.i33 + 0.00361518*m.i25*m.i34 + 0.00462338*m.i25*m.i35 + 0.0204146*m.i25*m.i36
+ 0.01087624*m.i25*m.i37 + 0.00256388*m.i25*m.i38 + 0.01236456*m.i25*m.i39 + 0.01769162*m.i25*
m.i40 + 0.01576792*m.i25*m.i41 + 0.00928236*m.i25*m.i42 + 0.00793946*m.i25*m.i43 + 0.00966756*
m.i25*m.i44 + 0.00248138*m.i25*m.i45 + 0.00485932*m.i25*m.i46 + 0.0122764*m.i25*m.i47 + 0.0023089
*m.i25*m.i48 + 0.00859364*m.i25*m.i49 + 0.01421118*m.i25*m.i50 + 0.00733214*m.i25*m.i51 +
0.00816206*m.i25*m.i52 + 0.00960248*m.i25*m.i53 + 0.00866518*m.i25*m.i54 + 0.00692386*m.i25*m.i55
+ 0.00882586*m.i25*m.i56 + 0.00434948*m.i25*m.i57 + 0.0041589*m.i25*m.i58 + 0.01055232*m.i25*
m.i59 + 0.00330494*m.i25*m.i60 + 0.01561392*m.i25*m.i61 + 0.0126551*m.i25*m.i62 + 0.00815092*
m.i25*m.i63 + 0.00612506*m.i25*m.i64 + 0.0070869*m.i25*m.i65 + 0.00424002*m.i25*m.i66 +
0.00879504*m.i25*m.i67 + 0.0058829*m.i25*m.i68 + 0.01439048*m.i25*m.i69 + 0.00610238*m.i25*m.i70
+ 0.01131906*m.i25*m.i71 + 0.00889538*m.i25*m.i72 + 0.00612414*m.i25*m.i73 + 0.00846104*m.i25*
m.i74 + 0.0057198*m.i25*m.i75 + 0.00393476*m.i25*m.i76 + 0.00432972*m.i25*m.i77 + 0.00446968*
m.i25*m.i78 + 0.0141591*m.i25*m.i79 + 0.00681524*m.i25*m.i80 + 0.00839778*m.i25*m.i81 +
0.00242412*m.i25*m.i82 + 0.0061299*m.i25*m.i83 + 0.00821362*m.i25*m.i84 + 0.0059951*m.i25*m.i85
+ 0.01036166*m.i25*m.i86 + 0.0075501*m.i25*m.i87 + 0.0208316*m.i25*m.i88 + 0.00461656*m.i25*
m.i89 + 0.01024232*m.i25*m.i90 + 0.00541446*m.i25*m.i91 + 0.0058998*m.i25*m.i92 + 0.00419408*
m.i25*m.i93 + 0.0034525*m.i25*m.i94 + 0.00742618*m.i25*m.i95 + 0.01117296*m.i25*m.i96 +
0.00976304*m.i25*m.i97 + 0.01005714*m.i25*m.i98 + 0.00997578*m.i25*m.i99 + 0.01119052*m.i25*
m.i100 + 0.0054348*m.i26*m.i27 + 0.00158545*m.i26*m.i28 + 0.00507804*m.i26*m.i29 + 0.01115184*
m.i26*m.i30 + 0.00280118*m.i26*m.i31 + 0.0103351*m.i26*m.i32 + 0.00796856*m.i26*m.i33 +
0.00322344*m.i26*m.i34 + 0.00410686*m.i26*m.i35 + 0.00922294*m.i26*m.i36 + 0.00708292*m.i26*m.i37
+ 0.00218796*m.i26*m.i38 + 0.00667316*m.i26*m.i39 + 0.00604564*m.i26*m.i40 + 0.00774532*m.i26*
m.i41 + 0.00814596*m.i26*m.i42 + 0.0026451*m.i26*m.i43 + 0.00582206*m.i26*m.i44 + 0.00332382*
m.i26*m.i45 + 0.00451686*m.i26*m.i46 + 0.00733916*m.i26*m.i47 + 0.00476946*m.i26*m.i48 +
0.00485772*m.i26*m.i49 + 0.0100103*m.i26*m.i50 + 0.00280844*m.i26*m.i51 + 0.00687248*m.i26*m.i52
+ 0.00732458*m.i26*m.i53 + 0.00815206*m.i26*m.i54 + 0.00612236*m.i26*m.i55 + 0.00307146*m.i26*
m.i56 + 0.0049056*m.i26*m.i57 + 0.00412472*m.i26*m.i58 + 0.0040935*m.i26*m.i59 + 0.0040596*m.i26*
m.i60 + 0.01138906*m.i26*m.i61 + 0.00976836*m.i26*m.i62 + 0.0087752*m.i26*m.i63 + 0.00574374*
m.i26*m.i64 + 0.00539202*m.i26*m.i65 + 0.0020772*m.i26*m.i66 + 0.00535872*m.i26*m.i67 + 0.0041987
*m.i26*m.i68 + 0.00941624*m.i26*m.i69 + 0.00708368*m.i26*m.i70 + 0.00623148*m.i26*m.i71 +
0.0059506*m.i26*m.i72 + 0.00509138*m.i26*m.i73 + 0.00640786*m.i26*m.i74 + 0.00599214*m.i26*m.i75
+ 0.00535234*m.i26*m.i76 + 0.0061449*m.i26*m.i77 + 0.0049639*m.i26*m.i78 + 0.00212662*m.i26*
m.i79 + 0.00709762*m.i26*m.i80 + 0.00556936*m.i26*m.i81 + 0.0033022*m.i26*m.i82 - 0.0001706112*
m.i26*m.i83 + 0.0042184*m.i26*m.i84 + 0.00533878*m.i26*m.i85 + 0.00407216*m.i26*m.i86 + 0.0050287
*m.i26*m.i87 + 0.00492458*m.i26*m.i88 + 0.00236614*m.i26*m.i89 + 0.0069424*m.i26*m.i90 +
0.00767098*m.i26*m.i91 + 0.00534286*m.i26*m.i92 + 0.001624812*m.i26*m.i93 + 0.00309366*m.i26*
m.i94 + 0.00617648*m.i26*m.i95 + 0.01108742*m.i26*m.i96 + 0.0068572*m.i26*m.i97 + 0.00411952*
m.i26*m.i98 + 0.00653102*m.i26*m.i99 + 0.00944332*m.i26*m.i100 + 0.01236278*m.i27*m.i28 +
0.00615174*m.i27*m.i29 + 0.0284656*m.i27*m.i30 + 0.00531366*m.i27*m.i31 + 0.0227234*m.i27*m.i32
+ 0.01239532*m.i27*m.i33 + 0.00873604*m.i27*m.i34 + 0.01006162*m.i27*m.i35 + 0.0244272*m.i27*
m.i36 + 0.01206064*m.i27*m.i37 + 0.00764146*m.i27*m.i38 + 0.01638042*m.i27*m.i39 + 0.0281728*
m.i27*m.i40 + 0.0236864*m.i27*m.i41 + 0.01394576*m.i27*m.i42 + 0.01151236*m.i27*m.i43 +
0.00967762*m.i27*m.i44 + 0.0001345884*m.i27*m.i45 + 0.00656542*m.i27*m.i46 + 0.0226088*m.i27*
m.i47 + 0.00665866*m.i27*m.i48 + 0.00867994*m.i27*m.i49 + 0.01519986*m.i27*m.i50 + 0.00516678*
m.i27*m.i51 + 0.01290734*m.i27*m.i52 + 0.00750112*m.i27*m.i53 + 0.015481*m.i27*m.i54 + 0.00918208
*m.i27*m.i55 + 0.01133662*m.i27*m.i56 + 0.00655584*m.i27*m.i57 + 0.00645326*m.i27*m.i58 +
0.01022706*m.i27*m.i59 + 0.00655942*m.i27*m.i60 + 0.0230718*m.i27*m.i61 + 0.0200196*m.i27*m.i62
+ 0.01214952*m.i27*m.i63 + 0.00996324*m.i27*m.i64 + 0.00982212*m.i27*m.i65 + 0.00606814*m.i27*
m.i66 + 0.00854006*m.i27*m.i67 + 0.00819936*m.i27*m.i68 + 0.01608286*m.i27*m.i69 + 0.00821942*
m.i27*m.i70 + 0.0230626*m.i27*m.i71 + 0.01648106*m.i27*m.i72 + 0.00833058*m.i27*m.i73 + 0.0119455
*m.i27*m.i74 + 0.0073591*m.i27*m.i75 + 0.00553444*m.i27*m.i76 + 0.00629646*m.i27*m.i77 +
0.00406434*m.i27*m.i78 + 0.00760068*m.i27*m.i79 + 0.00662478*m.i27*m.i80 + 0.0198678*m.i27*m.i81
+ 0.0044671*m.i27*m.i82 + 0.01205228*m.i27*m.i83 + 0.0106948*m.i27*m.i84 + 0.00763694*m.i27*
m.i85 + 0.01122432*m.i27*m.i86 + 0.00899094*m.i27*m.i87 + 0.0237458*m.i27*m.i88 + 0.00548044*
m.i27*m.i89 + 0.01135562*m.i27*m.i90 + 0.01131762*m.i27*m.i91 + 0.00767916*m.i27*m.i92 +
0.00281062*m.i27*m.i93 + 0.00450634*m.i27*m.i94 + 0.01029564*m.i27*m.i95 + 0.01573164*m.i27*m.i96
+ 0.01494338*m.i27*m.i97 + 0.01900252*m.i27*m.i98 + 0.01470772*m.i27*m.i99 + 0.01866828*m.i27*
m.i100 + 0.00362518*m.i28*m.i29 + 0.01640256*m.i28*m.i30 + 0.00349192*m.i28*m.i31 + 0.0129237*
m.i28*m.i32 + 0.00538584*m.i28*m.i33 + 0.00533474*m.i28*m.i34 + 0.00643216*m.i28*m.i35 +
0.01292206*m.i28*m.i36 + 0.00798078*m.i28*m.i37 + 0.0054977*m.i28*m.i38 + 0.00885966*m.i28*m.i39
+ 0.016828*m.i28*m.i40 + 0.01167374*m.i28*m.i41 + 0.00549216*m.i28*m.i42 + 0.00692364*m.i28*
m.i43 + 0.00370672*m.i28*m.i44 + 0.000284348*m.i28*m.i45 + 0.00277668*m.i28*m.i46 + 0.00936392*
m.i28*m.i47 + 0.00267238*m.i28*m.i48 + 0.00522892*m.i28*m.i49 + 0.00779258*m.i28*m.i50 +
0.0043462*m.i28*m.i51 + 0.00591302*m.i28*m.i52 + 0.00320368*m.i28*m.i53 + 0.00698682*m.i28*m.i54
+ 0.00560018*m.i28*m.i55 + 0.0075828*m.i28*m.i56 + 0.00361162*m.i28*m.i57 + 0.00229658*m.i28*
m.i58 + 0.00780328*m.i28*m.i59 + 0.0033416*m.i28*m.i60 + 0.01168298*m.i28*m.i61 + 0.0082366*m.i28
*m.i62 + 0.00465746*m.i28*m.i63 + 0.00328332*m.i28*m.i64 + 0.00685966*m.i28*m.i65 + 0.00386632*
m.i28*m.i66 + 0.0053142*m.i28*m.i67 + 0.00432904*m.i28*m.i68 + 0.00791276*m.i28*m.i69 + 0.0040137
*m.i28*m.i70 + 0.01081358*m.i28*m.i71 + 0.00841874*m.i28*m.i72 + 0.00534694*m.i28*m.i73 +
0.00677544*m.i28*m.i74 + 0.00391198*m.i28*m.i75 + 0.00308942*m.i28*m.i76 + 0.00250778*m.i28*m.i77
+ 0.00189916*m.i28*m.i78 + 0.00856184*m.i28*m.i79 + 0.00337182*m.i28*m.i80 + 0.00959416*m.i28*
m.i81 + 0.00329038*m.i28*m.i82 + 0.00388664*m.i28*m.i83 + 0.00685968*m.i28*m.i84 + 0.00406002*
m.i28*m.i85 + 0.00658126*m.i28*m.i86 + 0.00646838*m.i28*m.i87 + 0.0218548*m.i28*m.i88 +
0.00541992*m.i28*m.i89 + 0.00503116*m.i28*m.i90 + 0.00418236*m.i28*m.i91 + 0.0040874*m.i28*m.i92
+ 0.0022624*m.i28*m.i93 + 0.00392254*m.i28*m.i94 + 0.00482686*m.i28*m.i95 + 0.00726382*m.i28*
m.i96 + 0.00767472*m.i28*m.i97 + 0.01066418*m.i28*m.i98 + 0.00883358*m.i28*m.i99 + 0.0070211*
m.i28*m.i100 + 0.0147917*m.i29*m.i30 + 0.001068816*m.i29*m.i31 + 0.0105712*m.i29*m.i32 +
0.00407766*m.i29*m.i33 + 0.00300076*m.i29*m.i34 + 0.00524794*m.i29*m.i35 + 0.01016322*m.i29*m.i36
+ 0.00841674*m.i29*m.i37 + 0.00258632*m.i29*m.i38 + 0.00698836*m.i29*m.i39 + 0.01223674*m.i29*
m.i40 + 0.01128912*m.i29*m.i41 + 0.00481604*m.i29*m.i42 + 0.00316394*m.i29*m.i43 + 0.00690116*
m.i29*m.i44 + 0.00082418*m.i29*m.i45 + 0.00343988*m.i29*m.i46 + 0.00660586*m.i29*m.i47 +
0.00315994*m.i29*m.i48 + 0.004109*m.i29*m.i49 + 0.01072766*m.i29*m.i50 + 0.00295018*m.i29*m.i51
+ 0.00574084*m.i29*m.i52 + 0.00735384*m.i29*m.i53 + 0.00646518*m.i29*m.i54 + 0.00437712*m.i29*
m.i55 + 0.0050201*m.i29*m.i56 + 0.00428602*m.i29*m.i57 + 0.00339284*m.i29*m.i58 + 0.00395186*
m.i29*m.i59 + 0.00369852*m.i29*m.i60 + 0.01069104*m.i29*m.i61 + 0.00877524*m.i29*m.i62 +
0.00780122*m.i29*m.i63 + 0.00319846*m.i29*m.i64 + 0.00522668*m.i29*m.i65 + 0.00318906*m.i29*m.i66
+ 0.00765554*m.i29*m.i67 + 0.00353436*m.i29*m.i68 + 0.0090668*m.i29*m.i69 + 0.0062235*m.i29*
m.i70 + 0.00879038*m.i29*m.i71 + 0.00661754*m.i29*m.i72 + 0.00355728*m.i29*m.i73 + 0.0041974*
m.i29*m.i74 + 0.00530048*m.i29*m.i75 + 0.00543652*m.i29*m.i76 + 0.00436164*m.i29*m.i77 +
0.00450742*m.i29*m.i78 + 0.00725294*m.i29*m.i79 + 0.00491692*m.i29*m.i80 + 0.00689594*m.i29*m.i81
+ 0.00288614*m.i29*m.i82 + 0.005327*m.i29*m.i83 + 0.00356482*m.i29*m.i84 + 0.00320232*m.i29*
m.i85 + 0.00401206*m.i29*m.i86 + 0.00746968*m.i29*m.i87 + 0.01484586*m.i29*m.i88 + 0.00405332*
m.i29*m.i89 + 0.00646554*m.i29*m.i90 + 0.00398186*m.i29*m.i91 + 0.0045419*m.i29*m.i92 +
0.00249602*m.i29*m.i93 + 0.00344506*m.i29*m.i94 + 0.0046313*m.i29*m.i95 + 0.01012898*m.i29*m.i96
+ 0.00666118*m.i29*m.i97 + 0.00510452*m.i29*m.i98 + 0.00865974*m.i29*m.i99 + 0.00556162*m.i29*
m.i100 + 0.01432038*m.i30*m.i31 + 0.048762*m.i30*m.i32 + 0.03246*m.i30*m.i33 + 0.00510162*m.i30*
m.i34 + 0.00990812*m.i30*m.i35 + 0.0782504*m.i30*m.i36 + 0.0336068*m.i30*m.i37 + 0.00740496*m.i30
*m.i38 + 0.0520556*m.i30*m.i39 + 0.0689666*m.i30*m.i40 + 0.0338084*m.i30*m.i41 + 0.0303886*m.i30*
m.i42 + 0.01530392*m.i30*m.i43 + 0.0286584*m.i30*m.i44 + 0.001838718*m.i30*m.i45 + 0.01735792*
m.i30*m.i46 + 0.0257124*m.i30*m.i47 + 0.01952576*m.i30*m.i48 + 0.0285968*m.i30*m.i49 + 0.0597966*
m.i30*m.i50 + 0.0235442*m.i30*m.i51 + 0.0356002*m.i30*m.i52 + 0.056815*m.i30*m.i53 + 0.031993*
m.i30*m.i54 + 0.0256864*m.i30*m.i55 + 0.012682*m.i30*m.i56 + 0.01927838*m.i30*m.i57 + 0.0132181*
m.i30*m.i58 + 0.0308396*m.i30*m.i59 + 0.01646776*m.i30*m.i60 + 0.0691402*m.i30*m.i61 + 0.0539688*
m.i30*m.i62 + 0.0253122*m.i30*m.i63 + 0.0217306*m.i30*m.i64 + 0.0238236*m.i30*m.i65 + 0.01199066*
m.i30*m.i66 + 0.0301278*m.i30*m.i67 + 0.0209952*m.i30*m.i68 + 0.0484514*m.i30*m.i69 + 0.0226726*
m.i30*m.i70 + 0.02153*m.i30*m.i71 + 0.023498*m.i30*m.i72 + 0.0217474*m.i30*m.i73 + 0.0363548*
m.i30*m.i74 + 0.0290864*m.i30*m.i75 + 0.01738014*m.i30*m.i76 + 0.0248066*m.i30*m.i77 + 0.01560782
*m.i30*m.i78 + 0.0735134*m.i30*m.i79 + 0.0216582*m.i30*m.i80 + 0.030706*m.i30*m.i81 + 0.00888388*
m.i30*m.i82 + 0.00819988*m.i30*m.i83 + 0.02421*m.i30*m.i84 + 0.01903928*m.i30*m.i85 + 0.0384208*
m.i30*m.i86 + 0.0308632*m.i30*m.i87 + 0.112101*m.i30*m.i88 + 0.0313082*m.i30*m.i89 + 0.0480838*
m.i30*m.i90 + 0.0265036*m.i30*m.i91 + 0.0219052*m.i30*m.i92 + 0.01243318*m.i30*m.i93 + 0.00866336
*m.i30*m.i94 + 0.0318698*m.i30*m.i95 + 0.0809696*m.i30*m.i96 + 0.0362056*m.i30*m.i97 + 0.0307602*
m.i30*m.i98 + 0.0452826*m.i30*m.i99 + 0.0359652*m.i30*m.i100 + 0.01352968*m.i31*m.i32 +
0.01461656*m.i31*m.i33 + 0.00410226*m.i31*m.i34 + 0.00308616*m.i31*m.i35 + 0.0221942*m.i31*m.i36
+ 0.0095014*m.i31*m.i37 + 0.0001894118*m.i31*m.i38 + 0.01328104*m.i31*m.i39 + 0.0207254*m.i31*
m.i40 + 0.01363894*m.i31*m.i41 + 0.01129202*m.i31*m.i42 + 0.0108266*m.i31*m.i43 + 0.01097008*
m.i31*m.i44 + 0.00461712*m.i31*m.i45 + 0.00463752*m.i31*m.i46 + 0.00929264*m.i31*m.i47 +
0.00473752*m.i31*m.i48 + 0.0114599*m.i31*m.i49 + 0.0117742*m.i31*m.i50 + 0.0088573*m.i31*m.i51 +
0.0075837*m.i31*m.i52 + 0.00658756*m.i31*m.i53 + 0.0113218*m.i31*m.i54 + 0.00930362*m.i31*m.i55
+ 0.01063604*m.i31*m.i56 + 0.00432704*m.i31*m.i57 + 0.00804616*m.i31*m.i58 + 0.01180986*m.i31*
m.i59 + 0.0009047*m.i31*m.i60 + 0.01200762*m.i31*m.i61 + 0.00940268*m.i31*m.i62 + 0.01417994*
m.i31*m.i63 + 0.0076164*m.i31*m.i64 + 0.00575322*m.i31*m.i65 + 0.00834872*m.i31*m.i66 +
0.00454676*m.i31*m.i67 + 0.00544346*m.i31*m.i68 + 0.0132866*m.i31*m.i69 + 0.00553084*m.i31*m.i70
+ 0.01147094*m.i31*m.i71 + 0.00577578*m.i31*m.i72 + 0.00887008*m.i31*m.i73 + 0.01059428*m.i31*
m.i74 + 0.0040723*m.i31*m.i75 + 0.00207936*m.i31*m.i76 + 0.01175316*m.i31*m.i77 + 0.00278464*
m.i31*m.i78 + 0.00880162*m.i31*m.i79 + 0.0087823*m.i31*m.i80 + 0.00669872*m.i31*m.i81 +
0.001695732*m.i31*m.i82 + 0.01128974*m.i31*m.i83 + 0.0131319*m.i31*m.i84 + 0.00861518*m.i31*m.i85
+ 0.01080682*m.i31*m.i86 + 0.00523332*m.i31*m.i87 + 0.0207656*m.i31*m.i88 + 0.00591302*m.i31*
m.i89 + 0.00439716*m.i31*m.i90 + 0.0115743*m.i31*m.i91 + 0.00995262*m.i31*m.i92 + 0.000428388*
m.i31*m.i93 + 0.00464012*m.i31*m.i94 + 0.00813868*m.i31*m.i95 + 0.00570582*m.i31*m.i96 +
0.00954936*m.i31*m.i97 + 0.01038358*m.i31*m.i98 + 0.00920842*m.i31*m.i99 + 0.01146966*m.i31*
m.i100 + 0.0209668*m.i32*m.i33 + 0.0108011*m.i32*m.i34 + 0.01248282*m.i32*m.i35 + 0.0530038*m.i32
*m.i36 + 0.0301486*m.i32*m.i37 + 0.00760388*m.i32*m.i38 + 0.0317898*m.i32*m.i39 + 0.0642986*m.i32
*m.i40 + 0.0332684*m.i32*m.i41 + 0.0235182*m.i32*m.i42 + 0.0143552*m.i32*m.i43 + 0.0235288*m.i32*
m.i44 + 0.00682838*m.i32*m.i45 + 0.01137478*m.i32*m.i46 + 0.0318282*m.i32*m.i47 + 0.00984204*
m.i32*m.i48 + 0.0207836*m.i32*m.i49 + 0.0371082*m.i32*m.i50 + 0.01715818*m.i32*m.i51 + 0.0184894*
m.i32*m.i52 + 0.0241264*m.i32*m.i53 + 0.0254814*m.i32*m.i54 + 0.01913224*m.i32*m.i55 + 0.0212986*
m.i32*m.i56 + 0.01167336*m.i32*m.i57 + 0.01191892*m.i32*m.i58 + 0.0246844*m.i32*m.i59 +
0.00772776*m.i32*m.i60 + 0.0424102*m.i32*m.i61 + 0.0330624*m.i32*m.i62 + 0.0190237*m.i32*m.i63 +
0.01185726*m.i32*m.i64 + 0.01593976*m.i32*m.i65 + 0.00931156*m.i32*m.i66 + 0.01976096*m.i32*m.i67
+ 0.00940704*m.i32*m.i68 + 0.0353824*m.i32*m.i69 + 0.01637874*m.i32*m.i70 + 0.0234414*m.i32*
m.i71 + 0.01981882*m.i32*m.i72 + 0.01518934*m.i32*m.i73 + 0.0206944*m.i32*m.i74 + 0.01368518*
m.i32*m.i75 + 0.01085922*m.i32*m.i76 + 0.0142422*m.i32*m.i77 + 0.01225292*m.i32*m.i78 + 0.025216*
m.i32*m.i79 + 0.01581384*m.i32*m.i80 + 0.0226748*m.i32*m.i81 + 0.0078489*m.i32*m.i82 + 0.00488232
*m.i32*m.i83 + 0.01715432*m.i32*m.i84 + 0.01617784*m.i32*m.i85 + 0.0224728*m.i32*m.i86 +
0.0213528*m.i32*m.i87 + 0.0404024*m.i32*m.i88 + 0.00700416*m.i32*m.i89 + 0.0284686*m.i32*m.i90 +
0.01764584*m.i32*m.i91 + 0.01747106*m.i32*m.i92 + 0.00781272*m.i32*m.i93 + 0.01173676*m.i32*m.i94
+ 0.01901852*m.i32*m.i95 + 0.032411*m.i32*m.i96 + 0.0238232*m.i32*m.i97 + 0.021198*m.i32*m.i98
+ 0.0300116*m.i32*m.i99 + 0.0354006*m.i32*m.i100 + 0.0090127*m.i33*m.i34 + 0.00772724*m.i33*
m.i35 + 0.0313702*m.i33*m.i36 + 0.01413346*m.i33*m.i37 + 0.001835906*m.i33*m.i38 + 0.01789618*
m.i33*m.i39 + 0.0342932*m.i33*m.i40 + 0.0203234*m.i33*m.i41 + 0.01859662*m.i33*m.i42 + 0.00949822
*m.i33*m.i43 + 0.0173394*m.i33*m.i44 + 0.00462026*m.i33*m.i45 + 0.0076766*m.i33*m.i46 + 0.0195887
*m.i33*m.i47 + 0.00677792*m.i33*m.i48 + 0.01593666*m.i33*m.i49 + 0.0205366*m.i33*m.i50 +
0.01028686*m.i33*m.i51 + 0.01380638*m.i33*m.i52 + 0.0139701*m.i33*m.i53 + 0.016589*m.i33*m.i54 +
0.0139115*m.i33*m.i55 + 0.01339328*m.i33*m.i56 + 0.00706492*m.i33*m.i57 + 0.01010916*m.i33*m.i58
+ 0.0112109*m.i33*m.i59 + 0.0038394*m.i33*m.i60 + 0.0232104*m.i33*m.i61 + 0.01960694*m.i33*m.i62
+ 0.01805454*m.i33*m.i63 + 0.01327968*m.i33*m.i64 + 0.0135282*m.i33*m.i65 + 0.0101248*m.i33*
m.i66 + 0.00800254*m.i33*m.i67 + 0.0030849*m.i33*m.i68 + 0.0205056*m.i33*m.i69 + 0.00997944*m.i33
*m.i70 + 0.01867754*m.i33*m.i71 + 0.01023414*m.i33*m.i72 + 0.01414764*m.i33*m.i73 + 0.01623304*
m.i33*m.i74 + 0.00580254*m.i33*m.i75 + 0.00688906*m.i33*m.i76 + 0.01955742*m.i33*m.i77 +
0.0043617*m.i33*m.i78 + 0.0110714*m.i33*m.i79 + 0.00837212*m.i33*m.i80 + 0.0186224*m.i33*m.i81 +
0.0038599*m.i33*m.i82 + 0.01828456*m.i33*m.i83 + 0.01460176*m.i33*m.i84 + 0.00984126*m.i33*m.i85
+ 0.01375926*m.i33*m.i86 + 0.01081848*m.i33*m.i87 + 0.0294078*m.i33*m.i88 + 0.00904426*m.i33*
m.i89 + 0.01335384*m.i33*m.i90 + 0.00944562*m.i33*m.i91 + 0.01586856*m.i33*m.i92 + 0.00253356*
m.i33*m.i93 + 0.00579828*m.i33*m.i94 + 0.01264366*m.i33*m.i95 + 0.0212436*m.i33*m.i96 + 0.014968*
m.i33*m.i97 + 0.01459146*m.i33*m.i98 + 0.01990882*m.i33*m.i99 + 0.020898*m.i33*m.i100 + 0.0078456
*m.i34*m.i35 + 0.01102212*m.i34*m.i36 + 0.00676724*m.i34*m.i37 + 0.00365266*m.i34*m.i38 +
0.00595098*m.i34*m.i39 + 0.01153866*m.i34*m.i40 + 0.01058304*m.i34*m.i41 + 0.00838326*m.i34*m.i42
+ 0.00601354*m.i34*m.i43 + 0.00621002*m.i34*m.i44 + 0.00388646*m.i34*m.i45 + 0.00291464*m.i34*
m.i46 + 0.01279302*m.i34*m.i47 + 0.001590652*m.i34*m.i48 + 0.00546164*m.i34*m.i49 + 0.00756668*
m.i34*m.i50 + 0.00255946*m.i34*m.i51 + 0.00586752*m.i34*m.i52 - 0.0001086844*m.i34*m.i53 +
0.00756758*m.i34*m.i54 + 0.00472132*m.i34*m.i55 + 0.0090114*m.i34*m.i56 + 0.00404276*m.i34*m.i57
+ 0.00259172*m.i34*m.i58 + 0.0043188*m.i34*m.i59 + 0.00265148*m.i34*m.i60 + 0.00988174*m.i34*
m.i61 + 0.00773706*m.i34*m.i62 + 0.00871216*m.i34*m.i63 + 0.0051719*m.i34*m.i64 + 0.005674*m.i34*
m.i65 + 0.0042472*m.i34*m.i66 + 0.0029352*m.i34*m.i67 + 0.00380488*m.i34*m.i68 + 0.00782908*m.i34
*m.i69 + 0.00528678*m.i34*m.i70 + 0.01141144*m.i34*m.i71 + 0.00731358*m.i34*m.i72 + 0.00557996*
m.i34*m.i73 + 0.00428558*m.i34*m.i74 + 0.00214164*m.i34*m.i75 + 0.001888024*m.i34*m.i76 +
0.00450712*m.i34*m.i77 + 0.001974898*m.i34*m.i78 + 0.000555542*m.i34*m.i79 + 0.004826*m.i34*m.i80
+ 0.01009798*m.i34*m.i81 + 0.00342408*m.i34*m.i82 + 0.0066259*m.i34*m.i83 + 0.00557372*m.i34*
m.i84 + 0.00493326*m.i34*m.i85 + 0.0033431*m.i34*m.i86 + 0.00355798*m.i34*m.i87 + 0.0070914*m.i34
*m.i88 + 0.00319452*m.i34*m.i89 + 0.001165088*m.i34*m.i90 + 0.00330168*m.i34*m.i91 + 0.00487072*
m.i34*m.i92 + 0.001039364*m.i34*m.i93 + 0.00462638*m.i34*m.i94 + 0.00474964*m.i34*m.i95 +
0.00307738*m.i34*m.i96 + 0.00634158*m.i34*m.i97 + 0.0093911*m.i34*m.i98 + 0.00479968*m.i34*m.i99
+ 0.00945466*m.i34*m.i100 + 0.00886108*m.i35*m.i36 + 0.008324*m.i35*m.i37 + 0.0042517*m.i35*
m.i38 + 0.0063195*m.i35*m.i39 + 0.00897334*m.i35*m.i40 + 0.01438534*m.i35*m.i41 + 0.00707384*
m.i35*m.i42 + 0.00524994*m.i35*m.i43 + 0.00729354*m.i35*m.i44 + 0.00231104*m.i35*m.i45 +
0.00317018*m.i35*m.i46 + 0.01095322*m.i35*m.i47 + 0.00256082*m.i35*m.i48 + 0.0066693*m.i35*m.i49
+ 0.00896786*m.i35*m.i50 + 0.00243944*m.i35*m.i51 + 0.00542922*m.i35*m.i52 + 0.001853016*m.i35*
m.i53 + 0.0080304*m.i35*m.i54 + 0.004194*m.i35*m.i55 + 0.00944224*m.i35*m.i56 + 0.0044097*m.i35*
m.i57 + 0.00234874*m.i35*m.i58 + 0.0045055*m.i35*m.i59 + 0.00387194*m.i35*m.i60 + 0.01070194*
m.i35*m.i61 + 0.01020854*m.i35*m.i62 + 0.00869604*m.i35*m.i63 + 0.0038381*m.i35*m.i64 +
0.00566828*m.i35*m.i65 + 0.00392276*m.i35*m.i66 + 0.00493806*m.i35*m.i67 + 0.00543634*m.i35*m.i68
+ 0.01090284*m.i35*m.i69 + 0.00744802*m.i35*m.i70 + 0.01323476*m.i35*m.i71 + 0.00994186*m.i35*
m.i72 + 0.00554564*m.i35*m.i73 + 0.00631474*m.i35*m.i74 + 0.00456554*m.i35*m.i75 + 0.00357674*
m.i35*m.i76 + 0.00520436*m.i35*m.i77 + 0.0030095*m.i35*m.i78 + 0.0057729*m.i35*m.i79 + 0.00411204
*m.i35*m.i80 + 0.00953392*m.i35*m.i81 + 0.00378046*m.i35*m.i82 + 0.00572152*m.i35*m.i83 +
0.00613732*m.i35*m.i84 + 0.00382166*m.i35*m.i85 + 0.00356476*m.i35*m.i86 + 0.00634394*m.i35*m.i87
+ 0.0111758*m.i35*m.i88 + 0.00567884*m.i35*m.i89 + 0.00368822*m.i35*m.i90 + 0.00382434*m.i35*
m.i91 + 0.00295216*m.i35*m.i92 + 0.00261056*m.i35*m.i93 + 0.00538486*m.i35*m.i94 + 0.00508518*
m.i35*m.i95 + 0.00571674*m.i35*m.i96 + 0.00749186*m.i35*m.i97 + 0.00986618*m.i35*m.i98 +
0.00565378*m.i35*m.i99 + 0.0094721*m.i35*m.i100 + 0.0440606*m.i36*m.i37 + 0.0069763*m.i36*m.i38
+ 0.0493166*m.i36*m.i39 + 0.121634*m.i36*m.i40 + 0.0358136*m.i36*m.i41 + 0.0380066*m.i36*m.i42
+ 0.0240066*m.i36*m.i43 + 0.0315302*m.i36*m.i44 + 0.00778714*m.i36*m.i45 + 0.01711478*m.i36*
m.i46 + 0.0433014*m.i36*m.i47 + 0.01592312*m.i36*m.i48 + 0.0219624*m.i36*m.i49 + 0.0584382*m.i36*
m.i50 + 0.0237454*m.i36*m.i51 + 0.030079*m.i36*m.i52 + 0.0450814*m.i36*m.i53 + 0.0285826*m.i36*
m.i54 + 0.0266392*m.i36*m.i55 + 0.01830758*m.i36*m.i56 + 0.01364522*m.i36*m.i57 + 0.01568*m.i36*
m.i58 + 0.0359108*m.i36*m.i59 + 0.00643528*m.i36*m.i60 + 0.056249*m.i36*m.i61 + 0.0503568*m.i36*
m.i62 + 0.0221574*m.i36*m.i63 + 0.023432*m.i36*m.i64 + 0.0219264*m.i36*m.i65 + 0.01946022*m.i36*
m.i66 + 0.0301552*m.i36*m.i67 + 0.00986666*m.i36*m.i68 + 0.0496472*m.i36*m.i69 + 0.0177644*m.i36*
m.i70 + 0.0308856*m.i36*m.i71 + 0.01899074*m.i36*m.i72 + 0.01805938*m.i36*m.i73 + 0.0273694*m.i36
*m.i74 + 0.01662774*m.i36*m.i75 + 0.00832596*m.i36*m.i76 + 0.0203852*m.i36*m.i77 + 0.0174271*
m.i36*m.i78 + 0.039217*m.i36*m.i79 + 0.0232082*m.i36*m.i80 + 0.0357644*m.i36*m.i81 + 0.00331724*
m.i36*m.i82 + 0.0276304*m.i36*m.i83 + 0.0267904*m.i36*m.i84 + 0.02756*m.i36*m.i85 + 0.0320374*
m.i36*m.i86 + 0.0222598*m.i36*m.i87 + 0.0496644*m.i36*m.i88 + 0.01118028*m.i36*m.i89 + 0.0432572*
m.i36*m.i90 + 0.027434*m.i36*m.i91 + 0.0293774*m.i36*m.i92 + 0.0055352*m.i36*m.i93 + 0.00852418*
m.i36*m.i94 + 0.028037*m.i36*m.i95 + 0.0642512*m.i36*m.i96 + 0.0386458*m.i36*m.i97 + 0.040981*
m.i36*m.i98 + 0.04604*m.i36*m.i99 + 0.0478424*m.i36*m.i100 + 0.00525362*m.i37*m.i38 + 0.0212576*
m.i37*m.i39 + 0.0543916*m.i37*m.i40 + 0.018282*m.i37*m.i41 + 0.01700698*m.i37*m.i42 + 0.00953368*
m.i37*m.i43 + 0.0147155*m.i37*m.i44 + 0.00425042*m.i37*m.i45 + 0.00777022*m.i37*m.i46 +
0.01646346*m.i37*m.i47 + 0.00740598*m.i37*m.i48 + 0.01274586*m.i37*m.i49 + 0.0282742*m.i37*m.i50
+ 0.01506898*m.i37*m.i51 + 0.01409464*m.i37*m.i52 + 0.01916222*m.i37*m.i53 + 0.01572296*m.i37*
m.i54 + 0.01361714*m.i37*m.i55 + 0.01302042*m.i37*m.i56 + 0.00807862*m.i37*m.i57 + 0.00701644*
m.i37*m.i58 + 0.0201438*m.i37*m.i59 + 0.00497496*m.i37*m.i60 + 0.0259544*m.i37*m.i61 + 0.01982096
*m.i37*m.i62 + 0.01082904*m.i37*m.i63 + 0.00909066*m.i37*m.i64 + 0.0112364*m.i37*m.i65 +
0.0089483*m.i37*m.i66 + 0.01522148*m.i37*m.i67 + 0.00459152*m.i37*m.i68 + 0.0214858*m.i37*m.i69
+ 0.01075074*m.i37*m.i70 + 0.0132224*m.i37*m.i71 + 0.00980738*m.i37*m.i72 + 0.00885252*m.i37*
m.i73 + 0.01427422*m.i37*m.i74 + 0.00903996*m.i37*m.i75 + 0.00768272*m.i37*m.i76 + 0.0103221*
m.i37*m.i77 + 0.01082002*m.i37*m.i78 + 0.0248284*m.i37*m.i79 + 0.01098172*m.i37*m.i80 +
0.01335848*m.i37*m.i81 + 0.00545734*m.i37*m.i82 + 0.00921544*m.i37*m.i83 + 0.0110069*m.i37*m.i84
+ 0.01385998*m.i37*m.i85 + 0.01437348*m.i37*m.i86 + 0.01621552*m.i37*m.i87 + 0.01981332*m.i37*
m.i88 + 0.00549314*m.i37*m.i89 + 0.0210958*m.i37*m.i90 + 0.0116061*m.i37*m.i91 + 0.01444326*m.i37
*m.i92 + 0.00631646*m.i37*m.i93 + 0.00847398*m.i37*m.i94 + 0.0132838*m.i37*m.i95 + 0.0257442*
m.i37*m.i96 + 0.01746728*m.i37*m.i97 + 0.01331586*m.i37*m.i98 + 0.0246618*m.i37*m.i99 + 0.0231186
*m.i37*m.i100 + 0.00427726*m.i38*m.i39 + 0.00960742*m.i38*m.i40 + 0.00588794*m.i38*m.i41 +
0.0040899*m.i38*m.i42 + 0.00370486*m.i38*m.i43 + 0.001581616*m.i38*m.i44 + 0.00157779*m.i38*m.i45
+ 0.001517842*m.i38*m.i46 + 0.00577098*m.i38*m.i47 + 0.00184948*m.i38*m.i48 + 0.001412132*m.i38*
m.i49 + 0.00473326*m.i38*m.i50 + 0.001265572*m.i38*m.i51 + 0.00389392*m.i38*m.i52 + 0.00195541*
m.i38*m.i53 + 0.0045747*m.i38*m.i54 + 0.003024*m.i38*m.i55 + 0.00322834*m.i38*m.i56 + 0.00240162*
m.i38*m.i57 + 0.000494648*m.i38*m.i58 + 0.0035117*m.i38*m.i59 + 0.00302272*m.i38*m.i60 +
0.0067192*m.i38*m.i61 + 0.00576934*m.i38*m.i62 + 0.00236514*m.i38*m.i63 + 0.00208302*m.i38*m.i64
+ 0.00359594*m.i38*m.i65 + 0.001590092*m.i38*m.i66 + 0.00239398*m.i38*m.i67 + 0.00302224*m.i38*
m.i68 + 0.00326928*m.i38*m.i69 + 0.00302294*m.i38*m.i70 + 0.0049377*m.i38*m.i71 + 0.00553496*
m.i38*m.i72 + 0.00229972*m.i38*m.i73 + 0.00318332*m.i38*m.i74 + 0.00325074*m.i38*m.i75 +
0.001803886*m.i38*m.i76 + 0.000902562*m.i38*m.i77 + 0.001651326*m.i38*m.i78 + 0.0039935*m.i38*
m.i79 + 0.00233242*m.i38*m.i80 + 0.00546644*m.i38*m.i81 + 0.00223454*m.i38*m.i82 - 0.001681894*
m.i38*m.i83 + 0.0025273*m.i38*m.i84 + 0.0032781*m.i38*m.i85 + 0.001557044*m.i38*m.i86 +
0.00327138*m.i38*m.i87 + 0.00674346*m.i38*m.i88 + 0.0020784*m.i38*m.i89 + 0.00343958*m.i38*m.i90
+ 0.00324954*m.i38*m.i91 + 0.00206404*m.i38*m.i92 + 0.00161462*m.i38*m.i93 + 0.00247166*m.i38*
m.i94 + 0.00341238*m.i38*m.i95 + 0.00585902*m.i38*m.i96 + 0.00423638*m.i38*m.i97 + 0.00566634*
m.i38*m.i98 + 0.00315378*m.i38*m.i99 + 0.00449598*m.i38*m.i100 + 0.0491892*m.i39*m.i40 +
0.0262408*m.i39*m.i41 + 0.0205234*m.i39*m.i42 + 0.01409356*m.i39*m.i43 + 0.0195666*m.i39*m.i44 +
0.00525174*m.i39*m.i45 + 0.01076856*m.i39*m.i46 + 0.0216478*m.i39*m.i47 + 0.01097136*m.i39*m.i48
+ 0.0178672*m.i39*m.i49 + 0.0324104*m.i39*m.i50 + 0.0147971*m.i39*m.i51 + 0.01855664*m.i39*m.i52
+ 0.0250992*m.i39*m.i53 + 0.0213078*m.i39*m.i54 + 0.01575182*m.i39*m.i55 + 0.01438592*m.i39*
m.i56 + 0.0105253*m.i39*m.i57 + 0.01177712*m.i39*m.i58 + 0.0207946*m.i39*m.i59 + 0.00650454*m.i39
*m.i60 + 0.036126*m.i39*m.i61 + 0.0278076*m.i39*m.i62 + 0.0206546*m.i39*m.i63 + 0.01499036*m.i39*
m.i64 + 0.01276412*m.i39*m.i65 + 0.0125414*m.i39*m.i66 + 0.01617824*m.i39*m.i67 + 0.010394*m.i39*
m.i68 + 0.0290228*m.i39*m.i69 + 0.01190924*m.i39*m.i70 + 0.01824964*m.i39*m.i71 + 0.014012*m.i39*
m.i72 + 0.01408568*m.i39*m.i73 + 0.0192582*m.i39*m.i74 + 0.01283914*m.i39*m.i75 + 0.00757714*
m.i39*m.i76 + 0.0157748*m.i39*m.i77 + 0.00886562*m.i39*m.i78 + 0.0226622*m.i39*m.i79 + 0.01506442
*m.i39*m.i80 + 0.01868878*m.i39*m.i81 + 0.00371016*m.i39*m.i82 + 0.01245306*m.i39*m.i83 +
0.01693888*m.i39*m.i84 + 0.0145704*m.i39*m.i85 + 0.0207926*m.i39*m.i86 + 0.01487822*m.i39*m.i87
+ 0.0465058*m.i39*m.i88 + 0.01052428*m.i39*m.i89 + 0.0220072*m.i39*m.i90 + 0.01887928*m.i39*
m.i91 + 0.01597714*m.i39*m.i92 + 0.00531126*m.i39*m.i93 + 0.00658506*m.i39*m.i94 + 0.01713092*
m.i39*m.i95 + 0.0328166*m.i39*m.i96 + 0.0213542*m.i39*m.i97 + 0.0210286*m.i39*m.i98 + 0.0255336*
m.i39*m.i99 + 0.0274274*m.i39*m.i100 + 0.0504412*m.i40*m.i41 + 0.0336102*m.i40*m.i42 + 0.0294804*
m.i40*m.i43 + 0.0424704*m.i40*m.i44 + 0.0030095*m.i40*m.i45 + 0.01146224*m.i40*m.i46 + 0.0507426*
m.i40*m.i47 + 0.01585054*m.i40*m.i48 + 0.0217164*m.i40*m.i49 + 0.0491478*m.i40*m.i50 + 0.0317926*
m.i40*m.i51 + 0.0284682*m.i40*m.i52 + 0.0468934*m.i40*m.i53 + 0.0309254*m.i40*m.i54 + 0.028626*
m.i40*m.i55 + 0.0309698*m.i40*m.i56 + 0.01062184*m.i40*m.i57 + 0.01987174*m.i40*m.i58 + 0.0429952
*m.i40*m.i59 + 0.00300922*m.i40*m.i60 + 0.0574936*m.i40*m.i61 + 0.0496304*m.i40*m.i62 +
0.01678646*m.i40*m.i63 + 0.0153295*m.i40*m.i64 + 0.0230176*m.i40*m.i65 + 0.0200972*m.i40*m.i66 +
0.0274442*m.i40*m.i67 - 0.00465404*m.i40*m.i68 + 0.0404524*m.i40*m.i69 + 0.01243058*m.i40*m.i70
+ 0.0333654*m.i40*m.i71 + 0.01847532*m.i40*m.i72 + 0.01863464*m.i40*m.i73 + 0.01865328*m.i40*
m.i74 + 0.0086314*m.i40*m.i75 + 0.0107773*m.i40*m.i76 + 0.0203618*m.i40*m.i77 + 0.01445046*m.i40*
m.i78 + 0.0410886*m.i40*m.i79 + 0.01194082*m.i40*m.i80 + 0.044529*m.i40*m.i81 + 0.00528742*m.i40*
m.i82 + 0.0445722*m.i40*m.i83 + 0.0229102*m.i40*m.i84 + 0.0241064*m.i40*m.i85 + 0.0368384*m.i40*
m.i86 + 0.0327072*m.i40*m.i87 + 0.0612044*m.i40*m.i88 + 0.0029601*m.i40*m.i89 + 0.0534994*m.i40*
m.i90 + 0.0258428*m.i40*m.i91 + 0.0317582*m.i40*m.i92 + 0.00965728*m.i40*m.i93 + 0.01437522*m.i40
*m.i94 + 0.0249652*m.i40*m.i95 + 0.0605768*m.i40*m.i96 + 0.0345084*m.i40*m.i97 + 0.0313726*m.i40*
m.i98 + 0.064674*m.i40*m.i99 + 0.0504464*m.i40*m.i100 + 0.0211266*m.i41*m.i42 + 0.0280268*m.i41*
m.i43 + 0.0396958*m.i41*m.i44 + 0.00245084*m.i41*m.i45 + 0.00955952*m.i41*m.i46 + 0.0396834*m.i41
*m.i47 + 0.0061862*m.i41*m.i48 + 0.02227*m.i41*m.i49 + 0.0217142*m.i41*m.i50 + 0.00978418*m.i41*
m.i51 + 0.01479238*m.i41*m.i52 + 0.016171*m.i41*m.i53 + 0.0243916*m.i41*m.i54 + 0.01422356*m.i41*
m.i55 + 0.0283342*m.i41*m.i56 + 0.00801394*m.i41*m.i57 + 0.01783044*m.i41*m.i58 + 0.01283818*
m.i41*m.i59 + 0.00500652*m.i41*m.i60 + 0.0289002*m.i41*m.i61 + 0.0313062*m.i41*m.i62 + 0.0372108*
m.i41*m.i63 + 0.0192516*m.i41*m.i64 + 0.0152555*m.i41*m.i65 + 0.01848886*m.i41*m.i66 + 0.01396382
*m.i41*m.i67 + 0.01323774*m.i41*m.i68 + 0.0319484*m.i41*m.i69 + 0.01505338*m.i41*m.i70 +
0.0464724*m.i41*m.i71 + 0.0275962*m.i41*m.i72 + 0.01531976*m.i41*m.i73 + 0.0159052*m.i41*m.i74 +
0.00897454*m.i41*m.i75 + 0.00931212*m.i41*m.i76 + 0.01958562*m.i41*m.i77 + 0.00344582*m.i41*m.i78
+ 0.00874906*m.i41*m.i79 + 0.01063594*m.i41*m.i80 + 0.02994*m.i41*m.i81 + 0.000668906*m.i41*
m.i82 + 0.0436128*m.i41*m.i83 + 0.0233408*m.i41*m.i84 + 0.00754018*m.i41*m.i85 + 0.01805636*m.i41
*m.i86 + 0.01281402*m.i41*m.i87 + 0.0523726*m.i41*m.i88 + 0.00844562*m.i41*m.i89 + 0.01302218*
m.i41*m.i90 + 0.01396562*m.i41*m.i91 + 0.01458222*m.i41*m.i92 + 0.0072903*m.i41*m.i93 +
0.00709746*m.i41*m.i94 + 0.01473562*m.i41*m.i95 + 0.01085782*m.i41*m.i96 + 0.021406*m.i41*m.i97
+ 0.0295828*m.i41*m.i98 + 0.01994264*m.i41*m.i99 + 0.0263314*m.i41*m.i100 + 0.01525376*m.i42*
m.i43 + 0.01763084*m.i42*m.i44 + 0.00749008*m.i42*m.i45 + 0.00916454*m.i42*m.i46 + 0.0235102*
m.i42*m.i47 + 0.00921988*m.i42*m.i48 + 0.01347394*m.i42*m.i49 + 0.0247352*m.i42*m.i50 +
0.01120346*m.i42*m.i51 + 0.01858118*m.i42*m.i52 + 0.01723882*m.i42*m.i53 + 0.0208142*m.i42*m.i54
+ 0.01360838*m.i42*m.i55 + 0.0118194*m.i42*m.i56 + 0.00860676*m.i42*m.i57 + 0.00935934*m.i42*
m.i58 + 0.01516418*m.i42*m.i59 + 0.0068076*m.i42*m.i60 + 0.028779*m.i42*m.i61 + 0.0258494*m.i42*
m.i62 + 0.0233604*m.i42*m.i63 + 0.01573382*m.i42*m.i64 + 0.01049188*m.i42*m.i65 + 0.00740748*
m.i42*m.i66 + 0.01082116*m.i42*m.i67 + 0.00777482*m.i42*m.i68 + 0.0240088*m.i42*m.i69 +
0.01102072*m.i42*m.i70 + 0.01820862*m.i42*m.i71 + 0.01298112*m.i42*m.i72 + 0.01234456*m.i42*m.i73
+ 0.0141652*m.i42*m.i74 + 0.00934936*m.i42*m.i75 + 0.00505832*m.i42*m.i76 + 0.01458566*m.i42*
m.i77 + 0.00728638*m.i42*m.i78 + 0.0099359*m.i42*m.i79 + 0.01486474*m.i42*m.i80 + 0.01668502*
m.i42*m.i81 + 0.00373442*m.i42*m.i82 + 0.01190258*m.i42*m.i83 + 0.01201006*m.i42*m.i84 +
0.0151776*m.i42*m.i85 + 0.0145938*m.i42*m.i86 + 0.00824462*m.i42*m.i87 + 0.0160982*m.i42*m.i88 +
0.006593*m.i42*m.i89 + 0.01418496*m.i42*m.i90 + 0.01803698*m.i42*m.i91 + 0.0159653*m.i42*m.i92 +
0.00291508*m.i42*m.i93 + 0.00538746*m.i42*m.i94 + 0.01644022*m.i42*m.i95 + 0.0250208*m.i42*m.i96
+ 0.018306*m.i42*m.i97 + 0.01797718*m.i42*m.i98 + 0.01649756*m.i42*m.i99 + 0.025412*m.i42*m.i100
+ 0.01762524*m.i43*m.i44 + 0.0026577*m.i43*m.i45 + 0.00500594*m.i43*m.i46 + 0.01987672*m.i43*
m.i47 + 0.00486026*m.i43*m.i48 + 0.01054502*m.i43*m.i49 + 0.00887754*m.i43*m.i50 + 0.00693606*
m.i43*m.i51 + 0.01006578*m.i43*m.i52 + 0.01002454*m.i43*m.i53 + 0.0138188*m.i43*m.i54 +
0.00975298*m.i43*m.i55 + 0.01686962*m.i43*m.i56 + 0.00490722*m.i43*m.i57 + 0.00949952*m.i43*m.i58
+ 0.01032096*m.i43*m.i59 + 0.00313858*m.i43*m.i60 + 0.01509816*m.i43*m.i61 + 0.0162044*m.i43*
m.i62 + 0.01875628*m.i43*m.i63 + 0.01240346*m.i43*m.i64 + 0.0085184*m.i43*m.i65 + 0.0097536*m.i43
*m.i66 + 0.00601436*m.i43*m.i67 + 0.0069333*m.i43*m.i68 + 0.01534648*m.i43*m.i69 + 0.00585324*
m.i43*m.i70 + 0.01833662*m.i43*m.i71 + 0.01219044*m.i43*m.i72 + 0.00997222*m.i43*m.i73 +
0.00950324*m.i43*m.i74 + 0.00395808*m.i43*m.i75 + 0.00230734*m.i43*m.i76 + 0.01177946*m.i43*m.i77
+ 0.00120913*m.i43*m.i78 + 0.00451336*m.i43*m.i79 + 0.0087064*m.i43*m.i80 + 0.01415418*m.i43*
m.i81 + 0.00158382*m.i43*m.i82 + 0.01934448*m.i43*m.i83 + 0.01332798*m.i43*m.i84 + 0.0073079*
m.i43*m.i85 + 0.01024086*m.i43*m.i86 + 0.00333288*m.i43*m.i87 + 0.01697646*m.i43*m.i88 +
0.00457426*m.i43*m.i89 + 0.00557218*m.i43*m.i90 + 0.0103559*m.i43*m.i91 + 0.00897022*m.i43*m.i92
+ 0.00315402*m.i43*m.i93 + 0.00504118*m.i43*m.i94 + 0.01075858*m.i43*m.i95 + 0.00678594*m.i43*
m.i96 + 0.01260626*m.i43*m.i97 + 0.0163881*m.i43*m.i98 + 0.01009846*m.i43*m.i99 + 0.01154306*
m.i43*m.i100 + 0.00483446*m.i44*m.i45 + 0.00652268*m.i44*m.i46 + 0.0242272*m.i44*m.i47 +
0.00478826*m.i44*m.i48 + 0.01685648*m.i44*m.i49 + 0.020425*m.i44*m.i50 + 0.00923526*m.i44*m.i51
+ 0.01214276*m.i44*m.i52 + 0.01807778*m.i44*m.i53 + 0.01714928*m.i44*m.i54 + 0.0117815*m.i44*
m.i55 + 0.01675568*m.i44*m.i56 + 0.0065756*m.i44*m.i57 + 0.01226174*m.i44*m.i58 + 0.0107529*m.i44
*m.i59 + 0.00316098*m.i44*m.i60 + 0.0237412*m.i44*m.i61 + 0.023095*m.i44*m.i62 + 0.0261176*m.i44*
m.i63 + 0.01217274*m.i44*m.i64 + 0.01008618*m.i44*m.i65 + 0.0100818*m.i44*m.i66 + 0.01058518*
m.i44*m.i67 + 0.00547734*m.i44*m.i68 + 0.0242058*m.i44*m.i69 + 0.01131642*m.i44*m.i70 + 0.0238346
*m.i44*m.i71 + 0.01469328*m.i44*m.i72 + 0.01153818*m.i44*m.i73 + 0.0107527*m.i44*m.i74 +
0.00664436*m.i44*m.i75 + 0.00643936*m.i44*m.i76 + 0.01819866*m.i44*m.i77 + 0.00401038*m.i44*m.i78
+ 0.00860378*m.i44*m.i79 + 0.01052694*m.i44*m.i80 + 0.01791956*m.i44*m.i81 + 0.001302356*m.i44*
m.i82 + 0.024415*m.i44*m.i83 + 0.01318656*m.i44*m.i84 + 0.00691488*m.i44*m.i85 + 0.0134211*m.i44*
m.i86 + 0.01005166*m.i44*m.i87 + 0.036692*m.i44*m.i88 + 0.00614716*m.i44*m.i89 + 0.0120958*m.i44*
m.i90 + 0.00884752*m.i44*m.i91 + 0.01296164*m.i44*m.i92 + 0.00513894*m.i44*m.i93 + 0.00596534*
m.i44*m.i94 + 0.01196692*m.i44*m.i95 + 0.01664976*m.i44*m.i96 + 0.01462126*m.i44*m.i97 +
0.0157382*m.i44*m.i98 + 0.01533824*m.i44*m.i99 + 0.0188597*m.i44*m.i100 + 0.00317774*m.i45*m.i46
+ 0.00420624*m.i45*m.i47 + 0.00199361*m.i45*m.i48 + 0.0050265*m.i45*m.i49 + 0.00894044*m.i45*
m.i50 + 0.00284776*m.i45*m.i51 + 0.00547162*m.i45*m.i52 + 0.00269966*m.i45*m.i53 + 0.0064379*
m.i45*m.i54 + 0.00472118*m.i45*m.i55 + 0.0042126*m.i45*m.i56 + 0.00394074*m.i45*m.i57 +
0.00265196*m.i45*m.i58 + 0.00448504*m.i45*m.i59 + 0.001797504*m.i45*m.i60 + 0.00867806*m.i45*
m.i61 + 0.00322858*m.i45*m.i62 + 0.00607352*m.i45*m.i63 + 0.00436738*m.i45*m.i64 + 0.00237578*
m.i45*m.i65 + 0.0044976*m.i45*m.i66 + 0.00181419*m.i45*m.i67 + 0.00495262*m.i45*m.i68 +
0.00570214*m.i45*m.i69 + 0.00422674*m.i45*m.i70 + 0.001748284*m.i45*m.i71 + 0.00347868*m.i45*
m.i72 + 0.00586478*m.i45*m.i73 + 0.00333902*m.i45*m.i74 + 0.0046385*m.i45*m.i75 + 0.001228842*
m.i45*m.i76 + 0.00595824*m.i45*m.i77 + 0.0027183*m.i45*m.i78 + 0.00108409*m.i45*m.i79 +
0.00761658*m.i45*m.i80 + 0.0005468*m.i45*m.i81 + 0.001647768*m.i45*m.i82 - 0.00572218*m.i45*m.i83
+ 0.00291394*m.i45*m.i84 + 0.00667112*m.i45*m.i85 + 0.00283124*m.i45*m.i86 + 0.00214236*m.i45*
m.i87 + 0.00913532*m.i45*m.i88 + 0.0031579*m.i45*m.i89 + 0.001671266*m.i45*m.i90 + 0.007457*m.i45
*m.i91 + 0.00539294*m.i45*m.i92 + 0.001548892*m.i45*m.i93 + 0.00325768*m.i45*m.i94 + 0.00415906*
m.i45*m.i95 + 0.00472416*m.i45*m.i96 + 0.00257908*m.i45*m.i97 + 0.00311904*m.i45*m.i98 -
0.00028754*m.i45*m.i99 + 0.00641254*m.i45*m.i100 + 0.00936266*m.i46*m.i47 + 0.00551424*m.i46*
m.i48 + 0.00665328*m.i46*m.i49 + 0.01254298*m.i46*m.i50 + 0.00457552*m.i46*m.i51 + 0.00723508*
m.i46*m.i52 + 0.01013924*m.i46*m.i53 + 0.00835722*m.i46*m.i54 + 0.00612552*m.i46*m.i55 +
0.00568528*m.i46*m.i56 + 0.00506602*m.i46*m.i57 + 0.00547684*m.i46*m.i58 + 0.00630834*m.i46*m.i59
+ 0.0034076*m.i46*m.i60 + 0.01269782*m.i46*m.i61 + 0.01056202*m.i46*m.i62 + 0.00905674*m.i46*
m.i63 + 0.00727642*m.i46*m.i64 + 0.0053986*m.i46*m.i65 + 0.00499194*m.i46*m.i66 + 0.00693256*
m.i46*m.i67 + 0.00384534*m.i46*m.i68 + 0.01113952*m.i46*m.i69 + 0.00571676*m.i46*m.i70 +
0.00918194*m.i46*m.i71 + 0.00582038*m.i46*m.i72 + 0.00587208*m.i46*m.i73 + 0.00927628*m.i46*m.i74
+ 0.00540062*m.i46*m.i75 + 0.00399822*m.i46*m.i76 + 0.00599102*m.i46*m.i77 + 0.00478388*m.i46*
m.i78 + 0.0052496*m.i46*m.i79 + 0.0080323*m.i46*m.i80 + 0.00786638*m.i46*m.i81 + 0.001854684*
m.i46*m.i82 + 0.00407872*m.i46*m.i83 + 0.00621788*m.i46*m.i84 + 0.00606418*m.i46*m.i85 +
0.00669516*m.i46*m.i86 + 0.00483036*m.i46*m.i87 + 0.00889994*m.i46*m.i88 + 0.00341184*m.i46*m.i89
+ 0.00883678*m.i46*m.i90 + 0.00699852*m.i46*m.i91 + 0.00577214*m.i46*m.i92 + 0.00238288*m.i46*
m.i93 + 0.001681122*m.i46*m.i94 + 0.00660328*m.i46*m.i95 + 0.0125098*m.i46*m.i96 + 0.00829924*
m.i46*m.i97 + 0.00843732*m.i46*m.i98 + 0.00930502*m.i46*m.i99 + 0.01141018*m.i46*m.i100 +
0.00622806*m.i47*m.i48 + 0.01275134*m.i47*m.i49 + 0.0219686*m.i47*m.i50 + 0.00559252*m.i47*m.i51
+ 0.014742*m.i47*m.i52 + 0.01293552*m.i47*m.i53 + 0.0202408*m.i47*m.i54 + 0.01276622*m.i47*m.i55
+ 0.0211842*m.i47*m.i56 + 0.00751862*m.i47*m.i57 + 0.01167596*m.i47*m.i58 + 0.0096102*m.i47*
m.i59 + 0.00476024*m.i47*m.i60 + 0.0291008*m.i47*m.i61 + 0.0293252*m.i47*m.i62 + 0.0218568*m.i47*
m.i63 + 0.01597818*m.i47*m.i64 + 0.01230724*m.i47*m.i65 + 0.01074494*m.i47*m.i66 + 0.01192482*
m.i47*m.i67 + 0.0072756*m.i47*m.i68 + 0.0259978*m.i47*m.i69 + 0.01196354*m.i47*m.i70 + 0.0346772*
m.i47*m.i71 + 0.01997802*m.i47*m.i72 + 0.0109755*m.i47*m.i73 + 0.01126216*m.i47*m.i74 +
0.00543986*m.i47*m.i75 + 0.00507998*m.i47*m.i76 + 0.01031016*m.i47*m.i77 + 0.0051788*m.i47*m.i78
+ 0.001275304*m.i47*m.i79 + 0.00993436*m.i47*m.i80 + 0.0302174*m.i47*m.i81 + 0.0025327*m.i47*
m.i82 + 0.0227778*m.i47*m.i83 + 0.01358392*m.i47*m.i84 + 0.01015524*m.i47*m.i85 + 0.01402648*
m.i47*m.i86 + 0.00789154*m.i47*m.i87 + 0.0151434*m.i47*m.i88 + 0.001278866*m.i47*m.i89 +
0.0158996*m.i47*m.i90 + 0.01154264*m.i47*m.i91 + 0.01393698*m.i47*m.i92 + 0.00304714*m.i47*m.i93
+ 0.00512466*m.i47*m.i94 + 0.01429612*m.i47*m.i95 + 0.01681572*m.i47*m.i96 + 0.01931984*m.i47*
m.i97 + 0.0267484*m.i47*m.i98 + 0.01797768*m.i47*m.i99 + 0.0282598*m.i47*m.i100 + 0.00546656*
m.i48*m.i49 + 0.01037534*m.i48*m.i50 + 0.00353598*m.i48*m.i51 + 0.00756044*m.i48*m.i52 +
0.01216498*m.i48*m.i53 + 0.00967664*m.i48*m.i54 + 0.00647364*m.i48*m.i55 + 0.00302706*m.i48*m.i56
+ 0.0053717*m.i48*m.i57 + 0.00577622*m.i48*m.i58 + 0.00544272*m.i48*m.i59 + 0.00352554*m.i48*
m.i60 + 0.01442968*m.i48*m.i61 + 0.0109524*m.i48*m.i62 + 0.00913756*m.i48*m.i63 + 0.00640136*
m.i48*m.i64 + 0.00303604*m.i48*m.i65 + 0.00380586*m.i48*m.i66 + 0.00547728*m.i48*m.i67 +
0.00370642*m.i48*m.i68 + 0.00883124*m.i48*m.i69 + 0.00549652*m.i48*m.i70 + 0.00566248*m.i48*m.i71
+ 0.00467596*m.i48*m.i72 + 0.00529964*m.i48*m.i73 + 0.00953518*m.i48*m.i74 + 0.00623786*m.i48*
m.i75 + 0.00402142*m.i48*m.i76 + 0.00662892*m.i48*m.i77 + 0.004711*m.i48*m.i78 + 0.001686804*
m.i48*m.i79 + 0.00761384*m.i48*m.i80 + 0.0057658*m.i48*m.i81 + 0.00181049*m.i48*m.i82 -
0.00054847*m.i48*m.i83 + 0.0048793*m.i48*m.i84 + 0.00598068*m.i48*m.i85 + 0.00652398*m.i48*m.i86
+ 0.0036324*m.i48*m.i87 + 0.00674584*m.i48*m.i88 + 0.00354232*m.i48*m.i89 + 0.00923644*m.i48*
m.i90 + 0.01247554*m.i48*m.i91 + 0.00613734*m.i48*m.i92 + 0.000820814*m.i48*m.i93 + 0.001893008*
m.i48*m.i94 + 0.00690274*m.i48*m.i95 + 0.01623126*m.i48*m.i96 + 0.00810288*m.i48*m.i97 +
0.00702362*m.i48*m.i98 + 0.01027006*m.i48*m.i99 + 0.01224198*m.i48*m.i100 + 0.01829412*m.i49*
m.i50 + 0.0119479*m.i49*m.i51 + 0.01038228*m.i49*m.i52 + 0.01375438*m.i49*m.i53 + 0.01480194*
m.i49*m.i54 + 0.01103368*m.i49*m.i55 + 0.01464938*m.i49*m.i56 + 0.00724638*m.i49*m.i57 +
0.00857364*m.i49*m.i58 + 0.0149174*m.i49*m.i59 + 0.00407556*m.i49*m.i60 + 0.0214208*m.i49*m.i61
+ 0.01655784*m.i49*m.i62 + 0.01832206*m.i49*m.i63 + 0.0099515*m.i49*m.i64 + 0.01025382*m.i49*
m.i65 + 0.00862324*m.i49*m.i66 + 0.00863512*m.i49*m.i67 + 0.0076467*m.i49*m.i68 + 0.0220404*m.i49
*m.i69 + 0.0095053*m.i49*m.i70 + 0.01307838*m.i49*m.i71 + 0.01047408*m.i49*m.i72 + 0.01294838*
m.i49*m.i73 + 0.01471132*m.i49*m.i74 + 0.00851398*m.i49*m.i75 + 0.00575748*m.i49*m.i76 +
0.0145716*m.i49*m.i77 + 0.00460678*m.i49*m.i78 + 0.01570596*m.i49*m.i79 + 0.00985226*m.i49*m.i80
+ 0.01023644*m.i49*m.i81 + 0.00369278*m.i49*m.i82 + 0.00860988*m.i49*m.i83 + 0.01393008*m.i49*
m.i84 + 0.00839504*m.i49*m.i85 + 0.01483048*m.i49*m.i86 + 0.01071222*m.i49*m.i87 + 0.0344974*
m.i49*m.i88 + 0.00962838*m.i49*m.i89 + 0.01169418*m.i49*m.i90 + 0.01045396*m.i49*m.i91 +
0.0095482*m.i49*m.i92 + 0.00539536*m.i49*m.i93 + 0.00663516*m.i49*m.i94 + 0.01120512*m.i49*m.i95
+ 0.01484196*m.i49*m.i96 + 0.0127009*m.i49*m.i97 + 0.01167858*m.i49*m.i98 + 0.01477446*m.i49*
m.i99 + 0.01842494*m.i49*m.i100 + 0.01663076*m.i50*m.i51 + 0.021828*m.i50*m.i52 + 0.029083*m.i50*
m.i53 + 0.0230518*m.i50*m.i54 + 0.01639088*m.i50*m.i55 + 0.01308142*m.i50*m.i56 + 0.01225642*
m.i50*m.i57 + 0.0094199*m.i50*m.i58 + 0.0222192*m.i50*m.i59 + 0.00884396*m.i50*m.i60 + 0.0415716*
m.i50*m.i61 + 0.032076*m.i50*m.i62 + 0.021259*m.i50*m.i63 + 0.01432872*m.i50*m.i64 + 0.01445944*
m.i50*m.i65 + 0.01098896*m.i50*m.i66 + 0.0219658*m.i50*m.i67 + 0.01066588*m.i50*m.i68 + 0.0354768
*m.i50*m.i69 + 0.01575178*m.i50*m.i70 + 0.01775054*m.i50*m.i71 + 0.01436852*m.i50*m.i72 +
0.01353572*m.i50*m.i73 + 0.01936092*m.i50*m.i74 + 0.01665002*m.i50*m.i75 + 0.00971184*m.i50*m.i76
+ 0.01642836*m.i50*m.i77 + 0.01382168*m.i50*m.i78 + 0.0341934*m.i50*m.i79 + 0.01843884*m.i50*
m.i80 + 0.01940942*m.i50*m.i81 + 0.00527464*m.i50*m.i82 + 0.00829608*m.i50*m.i83 + 0.0138699*
m.i50*m.i84 + 0.01840912*m.i50*m.i85 + 0.0210266*m.i50*m.i86 + 0.0205286*m.i50*m.i87 + 0.0451728*
m.i50*m.i88 + 0.01361116*m.i50*m.i89 + 0.0277252*m.i50*m.i90 + 0.01783032*m.i50*m.i91 +
0.01982086*m.i50*m.i92 + 0.00668064*m.i50*m.i93 + 0.00765962*m.i50*m.i94 + 0.01980832*m.i50*m.i95
+ 0.043863*m.i50*m.i96 + 0.0241266*m.i50*m.i97 + 0.0216094*m.i50*m.i98 + 0.0284306*m.i50*m.i99
+ 0.0308476*m.i50*m.i100 + 0.01058872*m.i51*m.i52 + 0.01279448*m.i51*m.i53 + 0.0112444*m.i51*
m.i54 + 0.00990216*m.i51*m.i55 + 0.00896022*m.i51*m.i56 + 0.00513818*m.i51*m.i57 + 0.00543454*
m.i51*m.i58 + 0.01870256*m.i51*m.i59 + 0.00309084*m.i51*m.i60 + 0.01767624*m.i51*m.i61 +
0.01208918*m.i51*m.i62 + 0.01086364*m.i51*m.i63 + 0.00670046*m.i51*m.i64 + 0.00877154*m.i51*m.i65
+ 0.00557174*m.i51*m.i66 + 0.00887856*m.i51*m.i67 + 0.00260902*m.i51*m.i68 + 0.01536338*m.i51*
m.i69 + 0.00483316*m.i51*m.i70 + 0.00448378*m.i51*m.i71 + 0.0043601*m.i51*m.i72 + 0.00929772*
m.i51*m.i73 + 0.00989476*m.i51*m.i74 + 0.00528028*m.i51*m.i75 + 0.00446022*m.i51*m.i76 +
0.00845848*m.i51*m.i77 + 0.00509916*m.i51*m.i78 + 0.0204202*m.i51*m.i79 + 0.00800384*m.i51*m.i80
+ 0.00529538*m.i51*m.i81 + 0.0038846*m.i51*m.i82 + 0.00772216*m.i51*m.i83 + 0.009979*m.i51*m.i84
+ 0.010097*m.i51*m.i85 + 0.0139755*m.i51*m.i86 + 0.01131734*m.i51*m.i87 + 0.02533*m.i51*m.i88 +
0.00621034*m.i51*m.i89 + 0.01160734*m.i51*m.i90 + 0.00843408*m.i51*m.i91 + 0.00995326*m.i51*m.i92
+ 0.00455616*m.i51*m.i93 + 0.00533468*m.i51*m.i94 + 0.00929878*m.i51*m.i95 + 0.0142337*m.i51*
m.i96 + 0.01066822*m.i51*m.i97 + 0.00526832*m.i51*m.i98 + 0.01737382*m.i51*m.i99 + 0.01465192*
m.i51*m.i100 + 0.01484222*m.i52*m.i53 + 0.0171371*m.i52*m.i54 + 0.01181392*m.i52*m.i55 +
0.00600344*m.i52*m.i56 + 0.00840878*m.i52*m.i57 + 0.0071463*m.i52*m.i58 + 0.01536778*m.i52*m.i59
+ 0.0071369*m.i52*m.i60 + 0.0280962*m.i52*m.i61 + 0.0210708*m.i52*m.i62 + 0.01590808*m.i52*m.i63
+ 0.01317442*m.i52*m.i64 + 0.0091774*m.i52*m.i65 + 0.0068045*m.i52*m.i66 + 0.01047574*m.i52*
m.i67 + 0.00882116*m.i52*m.i68 + 0.01759098*m.i52*m.i69 + 0.00927774*m.i52*m.i70 + 0.01307496*
m.i52*m.i71 + 0.0115876*m.i52*m.i72 + 0.01090888*m.i52*m.i73 + 0.0112976*m.i52*m.i74 + 0.00919952
*m.i52*m.i75 + 0.00611904*m.i52*m.i76 + 0.0126521*m.i52*m.i77 + 0.0063454*m.i52*m.i78 +
0.01337936*m.i52*m.i79 + 0.01210696*m.i52*m.i80 + 0.01264942*m.i52*m.i81 + 0.00476554*m.i52*m.i82
+ 0.01346924*m.i52*m.i83 + 0.01007318*m.i52*m.i84 + 0.0127267*m.i52*m.i85 + 0.01394736*m.i52*
m.i86 + 0.0099746*m.i52*m.i87 + 0.0311922*m.i52*m.i88 + 0.0079236*m.i52*m.i89 + 0.01182038*m.i52*
m.i90 + 0.01651678*m.i52*m.i91 + 0.01241554*m.i52*m.i92 + 0.0030009*m.i52*m.i93 + 0.00533038*
m.i52*m.i94 + 0.0132025*m.i52*m.i95 + 0.0243106*m.i52*m.i96 + 0.01594256*m.i52*m.i97 + 0.01260958
*m.i52*m.i98 + 0.0156343*m.i52*m.i99 + 0.01771086*m.i52*m.i100 + 0.0153737*m.i53*m.i54 +
0.01383672*m.i53*m.i55 + 0.00715324*m.i53*m.i56 + 0.00943676*m.i53*m.i57 + 0.00990018*m.i53*m.i58
+ 0.01573366*m.i53*m.i59 + 0.00657884*m.i53*m.i60 + 0.0319944*m.i53*m.i61 + 0.029398*m.i53*m.i62
+ 0.01378922*m.i53*m.i63 + 0.01107682*m.i53*m.i64 + 0.01095454*m.i53*m.i65 + 0.00681218*m.i53*
m.i66 + 0.01767184*m.i53*m.i67 + 0.00360916*m.i53*m.i68 + 0.0271974*m.i53*m.i69 + 0.01108326*
m.i53*m.i70 + 0.00659666*m.i53*m.i71 + 0.00877032*m.i53*m.i72 + 0.01135242*m.i53*m.i73 +
0.01814298*m.i53*m.i74 + 0.01264072*m.i53*m.i75 + 0.00851402*m.i53*m.i76 + 0.01433306*m.i53*m.i77
+ 0.00973382*m.i53*m.i78 + 0.025286*m.i53*m.i79 + 0.01345344*m.i53*m.i80 + 0.01259382*m.i53*
m.i81 + 0.0027805*m.i53*m.i82 + 0.000307752*m.i53*m.i83 + 0.0107134*m.i53*m.i84 + 0.01054482*
m.i53*m.i85 + 0.0158905*m.i53*m.i86 + 0.01354224*m.i53*m.i87 + 0.0304602*m.i53*m.i88 + 0.0090225*
m.i53*m.i89 + 0.0279162*m.i53*m.i90 + 0.01259072*m.i53*m.i91 + 0.01154418*m.i53*m.i92 +
0.00696904*m.i53*m.i93 + 0.0036836*m.i53*m.i94 + 0.01605638*m.i53*m.i95 + 0.0430698*m.i53*m.i96
+ 0.01780592*m.i53*m.i97 + 0.01137144*m.i53*m.i98 + 0.0256234*m.i53*m.i99 + 0.0212362*m.i53*
m.i100 + 0.01304758*m.i54*m.i55 + 0.01398616*m.i54*m.i56 + 0.00915664*m.i54*m.i57 + 0.01070596*
m.i54*m.i58 + 0.01499*m.i54*m.i59 + 0.0070249*m.i54*m.i60 + 0.0302542*m.i54*m.i61 + 0.0244214*
m.i54*m.i62 + 0.0228504*m.i54*m.i63 + 0.01378888*m.i54*m.i64 + 0.00915648*m.i54*m.i65 + 0.0089268
*m.i54*m.i66 + 0.010488*m.i54*m.i67 + 0.00997224*m.i54*m.i68 + 0.0229576*m.i54*m.i69 + 0.01077794
*m.i54*m.i70 + 0.01825372*m.i54*m.i71 + 0.01517784*m.i54*m.i72 + 0.01258444*m.i54*m.i73 +
0.01361126*m.i54*m.i74 + 0.01029832*m.i54*m.i75 + 0.00657472*m.i54*m.i76 + 0.01463254*m.i54*m.i77
+ 0.00613474*m.i54*m.i78 + 0.01201368*m.i54*m.i79 + 0.013126*m.i54*m.i80 + 0.01505614*m.i54*
m.i81 + 0.00467872*m.i54*m.i82 + 0.01050702*m.i54*m.i83 + 0.01265914*m.i54*m.i84 + 0.01318044*
m.i54*m.i85 + 0.01473222*m.i54*m.i86 + 0.01110614*m.i54*m.i87 + 0.0261814*m.i54*m.i88 +
0.00783796*m.i54*m.i89 + 0.01294844*m.i54*m.i90 + 0.0192808*m.i54*m.i91 + 0.0139507*m.i54*m.i92
+ 0.00351228*m.i54*m.i93 + 0.0068612*m.i54*m.i94 + 0.01527036*m.i54*m.i95 + 0.0205052*m.i54*
m.i96 + 0.01688726*m.i54*m.i97 + 0.01524852*m.i54*m.i98 + 0.0174601*m.i54*m.i99 + 0.0244266*m.i54
*m.i100 + 0.00673562*m.i55*m.i56 + 0.00707698*m.i55*m.i57 + 0.00734322*m.i55*m.i58 + 0.01405048*
m.i55*m.i59 + 0.00334038*m.i55*m.i60 + 0.0222096*m.i55*m.i61 + 0.01523028*m.i55*m.i62 + 0.0102055
*m.i55*m.i63 + 0.01002768*m.i55*m.i64 + 0.01048288*m.i55*m.i65 + 0.00635712*m.i55*m.i66 +
0.00874464*m.i55*m.i67 + 0.00593524*m.i55*m.i68 + 0.01648812*m.i55*m.i69 + 0.0080135*m.i55*m.i70
+ 0.00887592*m.i55*m.i71 + 0.00847214*m.i55*m.i72 + 0.01055314*m.i55*m.i73 + 0.01129422*m.i55*
m.i74 + 0.00699156*m.i55*m.i75 + 0.00627446*m.i55*m.i76 + 0.01024268*m.i55*m.i77 + 0.00531432*
m.i55*m.i78 + 0.0098513*m.i55*m.i79 + 0.01065934*m.i55*m.i80 + 0.00967318*m.i55*m.i81 +
0.00462964*m.i55*m.i82 + 0.00334858*m.i55*m.i83 + 0.01100528*m.i55*m.i84 + 0.00975296*m.i55*m.i85
+ 0.01214742*m.i55*m.i86 + 0.00846042*m.i55*m.i87 + 0.0242638*m.i55*m.i88 + 0.0054702*m.i55*
m.i89 + 0.01124098*m.i55*m.i90 + 0.0118002*m.i55*m.i91 + 0.01077996*m.i55*m.i92 + 0.00250778*
m.i55*m.i93 + 0.00555816*m.i55*m.i94 + 0.01037364*m.i55*m.i95 + 0.0175302*m.i55*m.i96 +
0.01283314*m.i55*m.i97 + 0.01054116*m.i55*m.i98 + 0.01565736*m.i55*m.i99 + 0.01643682*m.i55*
m.i100 + 0.00563824*m.i56*m.i57 + 0.00909602*m.i56*m.i58 + 0.0103611*m.i56*m.i59 + 0.00370386*
m.i56*m.i60 + 0.01345496*m.i56*m.i61 + 0.01240364*m.i56*m.i62 + 0.01894134*m.i56*m.i63 +
0.00842246*m.i56*m.i64 + 0.00913306*m.i56*m.i65 + 0.0128603*m.i56*m.i66 + 0.00789202*m.i56*m.i67
+ 0.0049437*m.i56*m.i68 + 0.0172921*m.i56*m.i69 + 0.00742364*m.i56*m.i70 + 0.0201228*m.i56*m.i71
+ 0.0118952*m.i56*m.i72 + 0.01088666*m.i56*m.i73 + 0.0107701*m.i56*m.i74 + 0.00409754*m.i56*
m.i75 + 0.00366002*m.i56*m.i76 + 0.01236854*m.i56*m.i77 + 0.00300872*m.i56*m.i78 + 0.0135613*
m.i56*m.i79 + 0.00480806*m.i56*m.i80 + 0.01596128*m.i56*m.i81 + 0.00309564*m.i56*m.i82 +
0.01777436*m.i56*m.i83 + 0.01193038*m.i56*m.i84 + 0.00565974*m.i56*m.i85 + 0.01170688*m.i56*m.i86
+ 0.01022376*m.i56*m.i87 + 0.0163427*m.i56*m.i88 + 0.00612568*m.i56*m.i89 + 0.01115784*m.i56*
m.i90 + 0.00381802*m.i56*m.i91 + 0.0089326*m.i56*m.i92 + 0.0075443*m.i56*m.i93 + 0.00818402*m.i56
*m.i94 + 0.00966992*m.i56*m.i95 + 0.00265106*m.i56*m.i96 + 0.01019204*m.i56*m.i97 + 0.01329902*
m.i56*m.i98 + 0.01411634*m.i56*m.i99 + 0.0138779*m.i56*m.i100 + 0.00474894*m.i57*m.i58 +
0.00767974*m.i57*m.i59 + 0.0043561*m.i57*m.i60 + 0.01478228*m.i57*m.i61 + 0.00989558*m.i57*m.i62
+ 0.00895424*m.i57*m.i63 + 0.0066828*m.i57*m.i64 + 0.00578744*m.i57*m.i65 + 0.00498864*m.i57*
m.i66 + 0.00614268*m.i57*m.i67 + 0.0054738*m.i57*m.i68 + 0.01078148*m.i57*m.i69 + 0.00688352*
m.i57*m.i70 + 0.0068114*m.i57*m.i71 + 0.00628102*m.i57*m.i72 + 0.00701898*m.i57*m.i73 +
0.00848154*m.i57*m.i74 + 0.0066742*m.i57*m.i75 + 0.00450208*m.i57*m.i76 + 0.0074907*m.i57*m.i77
+ 0.00457588*m.i57*m.i78 + 0.00668368*m.i57*m.i79 + 0.00806954*m.i57*m.i80 + 0.00702352*m.i57*
m.i81 + 0.0038917*m.i57*m.i82 + 0.000255196*m.i57*m.i83 + 0.00565464*m.i57*m.i84 + 0.00629044*
m.i57*m.i85 + 0.00649918*m.i57*m.i86 + 0.00619514*m.i57*m.i87 + 0.01578988*m.i57*m.i88 +
0.00523946*m.i57*m.i89 + 0.00717944*m.i57*m.i90 + 0.0080494*m.i57*m.i91 + 0.00534064*m.i57*m.i92
+ 0.00276512*m.i57*m.i93 + 0.00412012*m.i57*m.i94 + 0.00715034*m.i57*m.i95 + 0.01300638*m.i57*
m.i96 + 0.00826382*m.i57*m.i97 + 0.0068466*m.i57*m.i98 + 0.00897648*m.i57*m.i99 + 0.01037138*
m.i57*m.i100 + 0.00646004*m.i58*m.i59 + 0.00186599*m.i58*m.i60 + 0.01246886*m.i58*m.i61 +
0.00999352*m.i58*m.i62 + 0.01381952*m.i58*m.i63 + 0.00855014*m.i58*m.i64 + 0.00465434*m.i58*m.i65
+ 0.00825376*m.i58*m.i66 + 0.00576402*m.i58*m.i67 + 0.00273548*m.i58*m.i68 + 0.01035762*m.i58*
m.i69 + 0.004824*m.i58*m.i70 + 0.01355144*m.i58*m.i71 + 0.00700278*m.i58*m.i72 + 0.00707718*m.i58
*m.i73 + 0.00851974*m.i58*m.i74 + 0.00330912*m.i58*m.i75 + 0.00401842*m.i58*m.i76 + 0.00999942*
m.i58*m.i77 + 0.00277578*m.i58*m.i78 - 0.000989722*m.i58*m.i79 + 0.00742188*m.i58*m.i80 +
0.00901096*m.i58*m.i81 + 0.000981242*m.i58*m.i82 + 0.01290728*m.i58*m.i83 + 0.0083181*m.i58*m.i84
+ 0.00517936*m.i58*m.i85 + 0.00723458*m.i58*m.i86 + 0.0044253*m.i58*m.i87 + 0.0137847*m.i58*
m.i88 + 0.001547694*m.i58*m.i89 + 0.00582604*m.i58*m.i90 + 0.00844516*m.i58*m.i91 + 0.00776542*
m.i58*m.i92 + 0.00182761*m.i58*m.i93 + 0.0023829*m.i58*m.i94 + 0.00628056*m.i58*m.i95 +
0.00690478*m.i58*m.i96 + 0.00802988*m.i58*m.i97 + 0.0076502*m.i58*m.i98 + 0.01085276*m.i58*m.i99
+ 0.0112764*m.i58*m.i100 + 0.00476864*m.i59*m.i60 + 0.025812*m.i59*m.i61 + 0.01805478*m.i59*
m.i62 + 0.0109551*m.i59*m.i63 + 0.00938908*m.i59*m.i64 + 0.01178962*m.i59*m.i65 + 0.0076335*m.i59
*m.i66 + 0.01177666*m.i59*m.i67 + 0.0070214*m.i59*m.i68 + 0.0221478*m.i59*m.i69 + 0.007972*m.i59*
m.i70 + 0.0074733*m.i59*m.i71 + 0.0088486*m.i59*m.i72 + 0.01271666*m.i59*m.i73 + 0.0141508*m.i59*
m.i74 + 0.00914726*m.i59*m.i75 + 0.00537448*m.i59*m.i76 + 0.01084216*m.i59*m.i77 + 0.0073258*
m.i59*m.i78 + 0.0246694*m.i59*m.i79 + 0.01112936*m.i59*m.i80 + 0.00816652*m.i59*m.i81 +
0.00597972*m.i59*m.i82 + 0.00662172*m.i59*m.i83 + 0.01458364*m.i59*m.i84 + 0.01429256*m.i59*m.i85
+ 0.01882618*m.i59*m.i86 + 0.01439702*m.i59*m.i87 + 0.034478*m.i59*m.i88 + 0.0080275*m.i59*m.i89
+ 0.01623632*m.i59*m.i90 + 0.01482176*m.i59*m.i91 + 0.01127396*m.i59*m.i92 + 0.00550568*m.i59*
m.i93 + 0.00798042*m.i59*m.i94 + 0.01294416*m.i59*m.i95 + 0.0212862*m.i59*m.i96 + 0.01627426*
m.i59*m.i97 + 0.0106876*m.i59*m.i98 + 0.021021*m.i59*m.i99 + 0.0210024*m.i59*m.i100 + 0.01016558*
m.i60*m.i61 + 0.00950624*m.i60*m.i62 + 0.00759926*m.i60*m.i63 + 0.00405624*m.i60*m.i64 +
0.00408766*m.i60*m.i65 + 0.001012866*m.i60*m.i66 + 0.00434698*m.i60*m.i67 + 0.00457798*m.i60*
m.i68 + 0.0080193*m.i60*m.i69 + 0.0054101*m.i60*m.i70 + 0.0046192*m.i60*m.i71 + 0.00570946*m.i60*
m.i72 + 0.00452172*m.i60*m.i73 + 0.00634618*m.i60*m.i74 + 0.00624388*m.i60*m.i75 + 0.0033187*
m.i60*m.i76 + 0.00483228*m.i60*m.i77 + 0.00344686*m.i60*m.i78 + 0.0083673*m.i60*m.i79 +
0.00518592*m.i60*m.i80 + 0.00542166*m.i60*m.i81 + 0.0031059*m.i60*m.i82 - 0.001025068*m.i60*m.i83
+ 0.0028835*m.i60*m.i84 + 0.00445296*m.i60*m.i85 + 0.00423572*m.i60*m.i86 + 0.0051822*m.i60*
m.i87 + 0.01112192*m.i60*m.i88 + 0.00500464*m.i60*m.i89 + 0.0062184*m.i60*m.i90 + 0.00602*m.i60*
m.i91 + 0.00246398*m.i60*m.i92 + 0.00288384*m.i60*m.i93 + 0.00278724*m.i60*m.i94 + 0.00626372*
m.i60*m.i95 + 0.01170704*m.i60*m.i96 + 0.00615192*m.i60*m.i97 + 0.00462302*m.i60*m.i98 +
0.00471294*m.i60*m.i99 + 0.00588256*m.i60*m.i100 + 0.0418718*m.i61*m.i62 + 0.0230598*m.i61*m.i63
+ 0.01842282*m.i61*m.i64 + 0.01721234*m.i61*m.i65 + 0.00990124*m.i61*m.i66 + 0.0216044*m.i61*
m.i67 + 0.01473812*m.i61*m.i68 + 0.0394464*m.i61*m.i69 + 0.01716988*m.i61*m.i70 + 0.0195513*m.i61
*m.i71 + 0.0219932*m.i61*m.i72 + 0.01943214*m.i61*m.i73 + 0.020134*m.i61*m.i74 + 0.0174732*m.i61*
m.i75 + 0.01174406*m.i61*m.i76 + 0.01834496*m.i61*m.i77 + 0.01109086*m.i61*m.i78 + 0.0264464*
m.i61*m.i79 + 0.01965936*m.i61*m.i80 + 0.0227546*m.i61*m.i81 + 0.00831452*m.i61*m.i82 +
0.00631004*m.i61*m.i83 + 0.01801602*m.i61*m.i84 + 0.01882322*m.i61*m.i85 + 0.026381*m.i61*m.i86
+ 0.0201168*m.i61*m.i87 + 0.0582994*m.i61*m.i88 + 0.01420784*m.i61*m.i89 + 0.0279352*m.i61*m.i90
+ 0.0260044*m.i61*m.i91 + 0.01994278*m.i61*m.i92 + 0.00558188*m.i61*m.i93 + 0.0100806*m.i61*
m.i94 + 0.0228614*m.i61*m.i95 + 0.0472894*m.i61*m.i96 + 0.0277624*m.i61*m.i97 + 0.0233414*m.i61*
m.i98 + 0.0320998*m.i61*m.i99 + 0.037788*m.i61*m.i100 + 0.0226754*m.i62*m.i63 + 0.01497022*m.i62*
m.i64 + 0.0138219*m.i62*m.i65 + 0.00559668*m.i62*m.i66 + 0.01850946*m.i62*m.i67 + 0.01131414*
m.i62*m.i68 + 0.0392412*m.i62*m.i69 + 0.01609634*m.i62*m.i70 + 0.0216048*m.i62*m.i71 + 0.0216526*
m.i62*m.i72 + 0.0150155*m.i62*m.i73 + 0.01738604*m.i62*m.i74 + 0.01374744*m.i62*m.i75 +
0.00779326*m.i62*m.i76 + 0.01429558*m.i62*m.i77 + 0.0081994*m.i62*m.i78 + 0.024889*m.i62*m.i79 +
0.01494124*m.i62*m.i80 + 0.0229898*m.i62*m.i81 + 0.00445144*m.i62*m.i82 + 0.01114552*m.i62*m.i83
+ 0.01793036*m.i62*m.i84 + 0.01444614*m.i62*m.i85 + 0.01879448*m.i62*m.i86 + 0.01466504*m.i62*
m.i87 + 0.0326604*m.i62*m.i88 + 0.01169144*m.i62*m.i89 + 0.0254028*m.i62*m.i90 + 0.01965996*m.i62
*m.i91 + 0.01132102*m.i62*m.i92 + 0.0046546*m.i62*m.i93 + 0.00635342*m.i62*m.i94 + 0.0209304*
m.i62*m.i95 + 0.040751*m.i62*m.i96 + 0.0251822*m.i62*m.i97 + 0.0238578*m.i62*m.i98 + 0.0225858*
m.i62*m.i99 + 0.0313134*m.i62*m.i100 + 0.01545704*m.i63*m.i64 + 0.01086358*m.i63*m.i65 +
0.00996396*m.i63*m.i66 + 0.00982328*m.i63*m.i67 + 0.00892944*m.i63*m.i68 + 0.024956*m.i63*m.i69
+ 0.0125295*m.i63*m.i70 + 0.0274234*m.i63*m.i71 + 0.0136346*m.i63*m.i72 + 0.0143589*m.i63*m.i73
+ 0.01281966*m.i63*m.i74 + 0.009889*m.i63*m.i75 + 0.00617316*m.i63*m.i76 + 0.0195622*m.i63*m.i77
+ 0.00502572*m.i63*m.i78 + 0.00153262*m.i63*m.i79 + 0.01706792*m.i63*m.i80 + 0.01790944*m.i63*
m.i81 + 0.001490592*m.i63*m.i82 + 0.0267338*m.i63*m.i83 + 0.01586496*m.i63*m.i84 + 0.01166282*
m.i63*m.i85 + 0.01568614*m.i63*m.i86 + 0.00753188*m.i63*m.i87 + 0.0417782*m.i63*m.i88 + 0.0112216
*m.i63*m.i89 + 0.00371206*m.i63*m.i90 + 0.01829192*m.i63*m.i91 + 0.01841964*m.i63*m.i92 +
0.00206622*m.i63*m.i93 + 0.00505172*m.i63*m.i94 + 0.01487174*m.i63*m.i95 + 0.01414348*m.i63*m.i96
+ 0.0156802*m.i63*m.i97 + 0.01823426*m.i63*m.i98 + 0.01258764*m.i63*m.i99 + 0.01994098*m.i63*
m.i100 + 0.00854654*m.i64*m.i65 + 0.01079866*m.i64*m.i66 + 0.00602732*m.i64*m.i67 + 0.00921276*
m.i64*m.i68 + 0.01464414*m.i64*m.i69 + 0.00664932*m.i64*m.i70 + 0.0144736*m.i64*m.i71 +
0.00978338*m.i64*m.i72 + 0.00959208*m.i64*m.i73 + 0.0112566*m.i64*m.i74 + 0.00671142*m.i64*m.i75
+ 0.00408206*m.i64*m.i76 + 0.01167568*m.i64*m.i77 + 0.00375274*m.i64*m.i78 + 0.00404336*m.i64*
m.i79 + 0.00963238*m.i64*m.i80 + 0.0122908*m.i64*m.i81 + 0.001806772*m.i64*m.i82 + 0.01577266*
m.i64*m.i83 + 0.01128074*m.i64*m.i84 + 0.0095111*m.i64*m.i85 + 0.0097723*m.i64*m.i86 + 0.00346618
*m.i64*m.i87 + 0.01289324*m.i64*m.i88 + 0.00453186*m.i64*m.i89 + 0.0078486*m.i64*m.i90 +
0.01310134*m.i64*m.i91 + 0.00985686*m.i64*m.i92 + 0.00257788*m.i64*m.i93 + 0.00260324*m.i64*m.i94
+ 0.0108877*m.i64*m.i95 + 0.01349616*m.i64*m.i96 + 0.01306042*m.i64*m.i97 + 0.01405114*m.i64*
m.i98 + 0.0115142*m.i64*m.i99 + 0.01728302*m.i64*m.i100 + 0.0048225*m.i65*m.i66 + 0.00871696*
m.i65*m.i67 + 0.00504014*m.i65*m.i68 + 0.01673796*m.i65*m.i69 + 0.00728674*m.i65*m.i70 +
0.00969202*m.i65*m.i71 + 0.0082057*m.i65*m.i72 + 0.0103704*m.i65*m.i73 + 0.00998004*m.i65*m.i74
+ 0.00672722*m.i65*m.i75 + 0.00633346*m.i65*m.i76 + 0.00774852*m.i65*m.i77 + 0.00440922*m.i65*
m.i78 + 0.01343946*m.i65*m.i79 + 0.00798994*m.i65*m.i80 + 0.01225132*m.i65*m.i81 + 0.00444398*
m.i65*m.i82 + 0.00673302*m.i65*m.i83 + 0.0109598*m.i65*m.i84 + 0.00683186*m.i65*m.i85 +
0.01183874*m.i65*m.i86 + 0.0090907*m.i65*m.i87 + 0.0283952*m.i65*m.i88 + 0.00785096*m.i65*m.i89
+ 0.01125058*m.i65*m.i90 + 0.00510526*m.i65*m.i91 + 0.00837574*m.i65*m.i92 + 0.00385798*m.i65*
m.i93 + 0.00464904*m.i65*m.i94 + 0.00896456*m.i65*m.i95 + 0.0160694*m.i65*m.i96 + 0.0113557*m.i65
*m.i97 + 0.01155766*m.i65*m.i98 + 0.01443876*m.i65*m.i99 + 0.01238186*m.i65*m.i100 + 0.00548536*
m.i66*m.i67 + 0.00630564*m.i66*m.i68 + 0.00939978*m.i66*m.i69 + 0.00431468*m.i66*m.i70 +
0.01542742*m.i66*m.i71 + 0.0071665*m.i66*m.i72 + 0.00755022*m.i66*m.i73 + 0.00838922*m.i66*m.i74
+ 0.00386922*m.i66*m.i75 + 0.001951058*m.i66*m.i76 + 0.01146338*m.i66*m.i77 + 0.001980078*m.i66*
m.i78 + 0.00444902*m.i66*m.i79 + 0.00356762*m.i66*m.i80 + 0.00956806*m.i66*m.i81 - 0.00023183*
m.i66*m.i82 + 0.01703884*m.i66*m.i83 + 0.01002452*m.i66*m.i84 + 0.0062546*m.i66*m.i85 +
0.00563304*m.i66*m.i86 + 0.00514984*m.i66*m.i87 + 0.01908326*m.i66*m.i88 + 0.00457928*m.i66*m.i89
+ 0.003995*m.i66*m.i90 + 0.0080501*m.i66*m.i91 + 0.00810108*m.i66*m.i92 + 0.00328186*m.i66*m.i93
+ 0.00369064*m.i66*m.i94 + 0.0058103*m.i66*m.i95 + 0.00438208*m.i66*m.i96 + 0.00867896*m.i66*
m.i97 + 0.0114927*m.i66*m.i98 + 0.01103938*m.i66*m.i99 + 0.00981454*m.i66*m.i100 + 0.00310364*
m.i67*m.i68 + 0.0195756*m.i67*m.i69 + 0.00833924*m.i67*m.i70 + 0.01122*m.i67*m.i71 + 0.00862168*
m.i67*m.i72 + 0.00711248*m.i67*m.i73 + 0.00958304*m.i67*m.i74 + 0.00671208*m.i67*m.i75 +
0.00667666*m.i67*m.i76 + 0.00639998*m.i67*m.i77 + 0.00746068*m.i67*m.i78 + 0.0164696*m.i67*m.i79
+ 0.00952472*m.i67*m.i80 + 0.01054908*m.i67*m.i81 + 0.00295206*m.i67*m.i82 + 0.00786538*m.i67*
m.i83 + 0.00812566*m.i67*m.i84 + 0.00774908*m.i67*m.i85 + 0.01084866*m.i67*m.i86 + 0.01179554*
m.i67*m.i87 + 0.022894*m.i67*m.i88 + 0.00619526*m.i67*m.i89 + 0.01517056*m.i67*m.i90 + 0.00567344
*m.i67*m.i91 + 0.00901318*m.i67*m.i92 + 0.00388018*m.i67*m.i93 + 0.0036956*m.i67*m.i94 + 0.008896
*m.i67*m.i95 + 0.021896*m.i67*m.i96 + 0.01327636*m.i67*m.i97 + 0.0109*m.i67*m.i98 + 0.0178563*
m.i67*m.i99 + 0.01328366*m.i67*m.i100 + 0.01361686*m.i68*m.i69 + 0.00764086*m.i68*m.i70 +
0.00794036*m.i68*m.i71 + 0.01077146*m.i68*m.i72 + 0.00701056*m.i68*m.i73 + 0.00764336*m.i68*m.i74
+ 0.01085638*m.i68*m.i75 + 0.00267198*m.i68*m.i76 + 0.00622086*m.i68*m.i77 + 0.0026961*m.i68*
m.i78 + 0.01283914*m.i68*m.i79 + 0.00651186*m.i68*m.i80 + 0.00444824*m.i68*m.i81 + 0.00245108*
m.i68*m.i82 - 0.000724804*m.i68*m.i83 + 0.01001432*m.i68*m.i84 + 0.00659112*m.i68*m.i85 +
0.00798872*m.i68*m.i86 + 0.00378278*m.i68*m.i87 + 0.0249894*m.i68*m.i88 + 0.00935338*m.i68*m.i89
+ 0.00406214*m.i68*m.i90 + 0.01547864*m.i68*m.i91 + 0.0026383*m.i68*m.i92 + 0.001956366*m.i68*
m.i93 + 0.00433104*m.i68*m.i94 + 0.0086862*m.i68*m.i95 + 0.00871594*m.i68*m.i96 + 0.00917804*
m.i68*m.i97 + 0.01147728*m.i68*m.i98 + 0.000904318*m.i68*m.i99 + 0.0095902*m.i68*m.i100 +
0.01716134*m.i69*m.i70 + 0.0210128*m.i69*m.i71 + 0.01970512*m.i69*m.i72 + 0.01824406*m.i69*m.i73
+ 0.0202038*m.i69*m.i74 + 0.0166321*m.i69*m.i75 + 0.0080034*m.i69*m.i76 + 0.01785698*m.i69*m.i77
+ 0.00956708*m.i69*m.i78 + 0.0273938*m.i69*m.i79 + 0.01578286*m.i69*m.i80 + 0.01986548*m.i69*
m.i81 + 0.00472512*m.i69*m.i82 + 0.0064477*m.i69*m.i83 + 0.0205866*m.i69*m.i84 + 0.01485404*m.i69
*m.i85 + 0.0219926*m.i69*m.i86 + 0.01726592*m.i69*m.i87 + 0.044296*m.i69*m.i88 + 0.01519388*m.i69
*m.i89 + 0.0245318*m.i69*m.i90 + 0.019668*m.i69*m.i91 + 0.01322886*m.i69*m.i92 + 0.00622812*m.i69
*m.i93 + 0.00886068*m.i69*m.i94 + 0.0207946*m.i69*m.i95 + 0.0369544*m.i69*m.i96 + 0.0252064*m.i69
*m.i97 + 0.0246794*m.i69*m.i98 + 0.0240826*m.i69*m.i99 + 0.0322226*m.i69*m.i100 + 0.01122678*
m.i70*m.i71 + 0.00985708*m.i70*m.i72 + 0.00817346*m.i70*m.i73 + 0.01042594*m.i70*m.i74 +
0.0087512*m.i70*m.i75 + 0.00587552*m.i70*m.i76 + 0.00956692*m.i70*m.i77 + 0.00604702*m.i70*m.i78
+ 0.01012786*m.i70*m.i79 + 0.00894572*m.i70*m.i80 + 0.00937532*m.i70*m.i81 + 0.0040741*m.i70*
m.i82 + 0.001290572*m.i70*m.i83 + 0.00820512*m.i70*m.i84 + 0.00683756*m.i70*m.i85 + 0.00768078*
m.i70*m.i86 + 0.00827048*m.i70*m.i87 + 0.01990564*m.i70*m.i88 + 0.007123*m.i70*m.i89 + 0.00998564
*m.i70*m.i90 + 0.00953688*m.i70*m.i91 + 0.00558782*m.i70*m.i92 + 0.00342686*m.i70*m.i93 +
0.00568486*m.i70*m.i94 + 0.00914938*m.i70*m.i95 + 0.01630104*m.i70*m.i96 + 0.01110616*m.i70*m.i97
+ 0.010247*m.i70*m.i98 + 0.00833958*m.i70*m.i99 + 0.01265252*m.i70*m.i100 + 0.0220254*m.i71*
m.i72 + 0.0095213*m.i71*m.i73 + 0.01209936*m.i71*m.i74 + 0.00527094*m.i71*m.i75 + 0.00557218*
m.i71*m.i76 + 0.01262004*m.i71*m.i77 + 0.0037353*m.i71*m.i78 - 0.000223588*m.i71*m.i79 +
0.00801532*m.i71*m.i80 + 0.0286786*m.i71*m.i81 + 0.000788336*m.i71*m.i82 + 0.0387752*m.i71*m.i83
+ 0.01552284*m.i71*m.i84 + 0.00720994*m.i71*m.i85 + 0.01148132*m.i71*m.i86 + 0.00870698*m.i71*
m.i87 + 0.028675*m.i71*m.i88 + 0.00544718*m.i71*m.i89 + 0.00673884*m.i71*m.i90 + 0.01008984*m.i71
*m.i91 + 0.01241834*m.i71*m.i92 + 0.0025495*m.i71*m.i93 + 0.00280272*m.i71*m.i94 + 0.00947552*
m.i71*m.i95 + 0.0070495*m.i71*m.i96 + 0.0170916*m.i71*m.i97 + 0.0269036*m.i71*m.i98 + 0.01506306*
m.i71*m.i99 + 0.0206782*m.i71*m.i100 + 0.00925426*m.i72*m.i73 + 0.00967792*m.i72*m.i74 +
0.00847338*m.i72*m.i75 + 0.005213*m.i72*m.i76 + 0.00908662*m.i72*m.i77 + 0.00316872*m.i72*m.i78
+ 0.00898138*m.i72*m.i79 + 0.0069179*m.i72*m.i80 + 0.0151281*m.i72*m.i81 + 0.00348424*m.i72*
m.i82 + 0.01111986*m.i72*m.i83 + 0.01165966*m.i72*m.i84 + 0.0064802*m.i72*m.i85 + 0.00959246*
m.i72*m.i86 + 0.0084611*m.i72*m.i87 + 0.0240956*m.i72*m.i88 + 0.00687054*m.i72*m.i89 + 0.0094553*
m.i72*m.i90 + 0.0110757*m.i72*m.i91 + 0.00543508*m.i72*m.i92 + 0.0037306*m.i72*m.i93 + 0.00500972
*m.i72*m.i94 + 0.01005818*m.i72*m.i95 + 0.01294332*m.i72*m.i96 + 0.01344022*m.i72*m.i97 +
0.01593132*m.i72*m.i98 + 0.0093216*m.i72*m.i99 + 0.01640118*m.i72*m.i100 + 0.01198598*m.i73*m.i74
+ 0.00750544*m.i73*m.i75 + 0.0050216*m.i73*m.i76 + 0.01285904*m.i73*m.i77 + 0.00339452*m.i73*
m.i78 + 0.00891788*m.i73*m.i79 + 0.00948614*m.i73*m.i80 + 0.00944098*m.i73*m.i81 + 0.00409826*
m.i73*m.i82 + 0.00488372*m.i73*m.i83 + 0.01210326*m.i73*m.i84 + 0.00827726*m.i73*m.i85 +
0.0117403*m.i73*m.i86 + 0.00812428*m.i73*m.i87 + 0.031499*m.i73*m.i88 + 0.00909586*m.i73*m.i89 +
0.00829156*m.i73*m.i90 + 0.0112196*m.i73*m.i91 + 0.00781298*m.i73*m.i92 + 0.00380884*m.i73*m.i93
+ 0.00624296*m.i73*m.i94 + 0.01005016*m.i73*m.i95 + 0.01437472*m.i73*m.i96 + 0.011277*m.i73*
m.i97 + 0.01058622*m.i73*m.i98 + 0.0121454*m.i73*m.i99 + 0.01373088*m.i73*m.i100 + 0.01080198*
m.i74*m.i75 + 0.00656182*m.i74*m.i76 + 0.01437682*m.i74*m.i77 + 0.00746976*m.i74*m.i78 +
0.0158128*m.i74*m.i79 + 0.01161714*m.i74*m.i80 + 0.01098286*m.i74*m.i81 + 0.00409892*m.i74*m.i82
+ 0.00263806*m.i74*m.i83 + 0.01368742*m.i74*m.i84 + 0.00966578*m.i74*m.i85 + 0.0126469*m.i74*
m.i86 + 0.0097362*m.i74*m.i87 + 0.0236752*m.i74*m.i88 + 0.0087263*m.i74*m.i89 + 0.01653132*m.i74*
m.i90 + 0.01259886*m.i74*m.i91 + 0.0074886*m.i74*m.i92 + 0.00612882*m.i74*m.i93 + 0.00553384*
m.i74*m.i94 + 0.01256076*m.i74*m.i95 + 0.022837*m.i74*m.i96 + 0.01489052*m.i74*m.i97 + 0.0138654*
m.i74*m.i98 + 0.01608016*m.i74*m.i99 + 0.0185439*m.i74*m.i100 + 0.00483222*m.i75*m.i76 +
0.0084646*m.i75*m.i77 + 0.00605234*m.i75*m.i78 + 0.01627408*m.i75*m.i79 + 0.00784142*m.i75*m.i80
+ 0.00564276*m.i75*m.i81 + 0.00324588*m.i75*m.i82 - 0.00767236*m.i75*m.i83 + 0.00699372*m.i75*
m.i84 + 0.00737608*m.i75*m.i85 + 0.00954642*m.i75*m.i86 + 0.00823136*m.i75*m.i87 + 0.0262748*
m.i75*m.i88 + 0.00948902*m.i75*m.i89 + 0.01252876*m.i75*m.i90 + 0.01423104*m.i75*m.i91 +
0.00521492*m.i75*m.i92 + 0.00397698*m.i75*m.i93 + 0.00422896*m.i75*m.i94 + 0.01025216*m.i75*m.i95
+ 0.021456*m.i75*m.i96 + 0.01000128*m.i75*m.i97 + 0.00860654*m.i75*m.i98 + 0.0079023*m.i75*m.i99
+ 0.01223272*m.i75*m.i100 + 0.00508036*m.i76*m.i77 + 0.00440326*m.i76*m.i78 + 0.00722936*m.i76*
m.i79 + 0.00592748*m.i76*m.i80 + 0.00543106*m.i76*m.i81 + 0.00352072*m.i76*m.i82 + 0.00282876*
m.i76*m.i83 + 0.00421804*m.i76*m.i84 + 0.00327576*m.i76*m.i85 + 0.00605002*m.i76*m.i86 +
0.00724932*m.i76*m.i87 + 0.01581762*m.i76*m.i88 + 0.00366428*m.i76*m.i89 + 0.00812736*m.i76*m.i90
+ 0.00388382*m.i76*m.i91 + 0.0047062*m.i76*m.i92 + 0.00287772*m.i76*m.i93 + 0.00297876*m.i76*
m.i94 + 0.00459654*m.i76*m.i95 + 0.01070758*m.i76*m.i96 + 0.0061617*m.i76*m.i97 + 0.00324936*
m.i76*m.i98 + 0.00970994*m.i76*m.i99 + 0.00690694*m.i76*m.i100 + 0.00393188*m.i77*m.i78 +
0.00648912*m.i77*m.i79 + 0.00880144*m.i77*m.i80 + 0.00990674*m.i77*m.i81 + 0.00277832*m.i77*m.i82
+ 0.01369158*m.i77*m.i83 + 0.01108874*m.i77*m.i84 + 0.00804488*m.i77*m.i85 + 0.0100624*m.i77*
m.i86 + 0.00868852*m.i77*m.i87 + 0.0320896*m.i77*m.i88 + 0.00865688*m.i77*m.i89 + 0.00846622*
m.i77*m.i90 + 0.01262084*m.i77*m.i91 + 0.01111726*m.i77*m.i92 + 0.00462154*m.i77*m.i93 +
0.00718072*m.i77*m.i94 + 0.01147082*m.i77*m.i95 + 0.0176254*m.i77*m.i96 + 0.01224072*m.i77*m.i97
+ 0.00939734*m.i77*m.i98 + 0.012534*m.i77*m.i99 + 0.01295098*m.i77*m.i100 + 0.00907912*m.i78*
m.i79 + 0.00720642*m.i78*m.i80 + 0.00426806*m.i78*m.i81 + 0.0028332*m.i78*m.i82 - 0.001354666*
m.i78*m.i83 + 0.00318608*m.i78*m.i84 + 0.00627032*m.i78*m.i85 + 0.00574778*m.i78*m.i86 +
0.00663794*m.i78*m.i87 + 0.00493084*m.i78*m.i88 + 0.00225816*m.i78*m.i89 + 0.01063042*m.i78*m.i90
+ 0.0063342*m.i78*m.i91 + 0.00541402*m.i78*m.i92 + 0.00268782*m.i78*m.i93 + 0.00290288*m.i78*
m.i94 + 0.00588184*m.i78*m.i95 + 0.01436716*m.i78*m.i96 + 0.00728756*m.i78*m.i97 + 0.00442972*
m.i78*m.i98 + 0.00924454*m.i78*m.i99 + 0.00979098*m.i78*m.i100 + 0.0060581*m.i79*m.i80 +
0.00755126*m.i79*m.i81 + 0.00637932*m.i79*m.i82 - 0.00105651*m.i79*m.i83 + 0.01349704*m.i79*m.i84
+ 0.01178354*m.i79*m.i85 + 0.0220208*m.i79*m.i86 + 0.0245836*m.i79*m.i87 + 0.0524002*m.i79*m.i88
+ 0.0230428*m.i79*m.i89 + 0.0314514*m.i79*m.i90 + 0.00636018*m.i79*m.i91 + 0.0061917*m.i79*m.i92
+ 0.01207768*m.i79*m.i93 + 0.00753416*m.i79*m.i94 + 0.01719794*m.i79*m.i95 + 0.0367202*m.i79*
m.i96 + 0.01636496*m.i79*m.i97 + 0.01053626*m.i79*m.i98 + 0.0223148*m.i79*m.i99 + 0.0125583*m.i79
*m.i100 + 0.00731124*m.i80*m.i81 + 0.0043053*m.i80*m.i82 + 0.00250064*m.i80*m.i83 + 0.00942746*
m.i80*m.i84 + 0.01109824*m.i80*m.i85 + 0.0094077*m.i80*m.i86 + 0.00584688*m.i80*m.i87 +
0.01773876*m.i80*m.i88 + 0.00587054*m.i80*m.i89 + 0.0073899*m.i80*m.i90 + 0.01217556*m.i80*m.i91
+ 0.0092825*m.i80*m.i92 + 0.001672258*m.i80*m.i93 + 0.00403362*m.i80*m.i94 + 0.01001412*m.i80*
m.i95 + 0.01641906*m.i80*m.i96 + 0.01159292*m.i80*m.i97 + 0.01062798*m.i80*m.i98 + 0.00967468*
m.i80*m.i99 + 0.0140493*m.i80*m.i100 + 0.00288116*m.i81*m.i82 + 0.022981*m.i81*m.i83 + 0.01105584
*m.i81*m.i84 + 0.00722284*m.i81*m.i85 + 0.01178602*m.i81*m.i86 + 0.00945868*m.i81*m.i87 +
0.024973*m.i81*m.i88 + 0.00575624*m.i81*m.i89 + 0.01415098*m.i81*m.i90 + 0.0066048*m.i81*m.i91 +
0.01072344*m.i81*m.i92 + 0.00322326*m.i81*m.i93 + 0.00351188*m.i81*m.i94 + 0.01127788*m.i81*m.i95
+ 0.01956074*m.i81*m.i96 + 0.01617428*m.i81*m.i97 + 0.0227228*m.i81*m.i98 + 0.01855842*m.i81*
m.i99 + 0.01991896*m.i81*m.i100 - 0.00333172*m.i82*m.i83 + 0.00228114*m.i82*m.i84 + 0.00336158*
m.i82*m.i85 + 0.00354748*m.i82*m.i86 + 0.00514572*m.i82*m.i87 + 0.00636398*m.i82*m.i88 +
0.00276272*m.i82*m.i89 + 0.00394504*m.i82*m.i90 + 0.00242814*m.i82*m.i91 + 0.00151634*m.i82*m.i92
+ 0.00205258*m.i82*m.i93 + 0.00416174*m.i82*m.i94 + 0.0036601*m.i82*m.i95 + 0.00573294*m.i82*
m.i96 + 0.0040347*m.i82*m.i97 + 0.001040396*m.i82*m.i98 + 0.00519918*m.i82*m.i99 + 0.00479088*
m.i82*m.i100 + 0.01497528*m.i83*m.i84 + 0.0032291*m.i83*m.i85 + 0.01011148*m.i83*m.i86 +
0.00471364*m.i83*m.i87 + 0.0246434*m.i83*m.i88 + 0.000996878*m.i83*m.i89 - 0.00262512*m.i83*m.i90
- 0.000789784*m.i83*m.i91 + 0.01304756*m.i83*m.i92 + 0.000531142*m.i83*m.i93 - 0.000443948*m.i83
*m.i94 + 0.00279848*m.i83*m.i95 - 0.0065326*m.i83*m.i96 + 0.01221224*m.i83*m.i97 + 0.01799712*
m.i83*m.i98 + 0.0158385*m.i83*m.i99 + 0.0071337*m.i83*m.i100 + 0.00892568*m.i84*m.i85 +
0.01364388*m.i84*m.i86 + 0.0072533*m.i84*m.i87 + 0.0326884*m.i84*m.i88 + 0.00896504*m.i84*m.i89
+ 0.00823562*m.i84*m.i90 + 0.0125821*m.i84*m.i91 + 0.00787816*m.i84*m.i92 + 0.00249586*m.i84*
m.i93 + 0.00519262*m.i84*m.i94 + 0.01044988*m.i84*m.i95 + 0.01107886*m.i84*m.i96 + 0.0139867*
m.i84*m.i97 + 0.01596046*m.i84*m.i98 + 0.01218826*m.i84*m.i99 + 0.01543212*m.i84*m.i100 +
0.00990954*m.i85*m.i86 + 0.00725662*m.i85*m.i87 + 0.0133432*m.i85*m.i88 + 0.00507396*m.i85*m.i89
+ 0.00930526*m.i85*m.i90 + 0.01462284*m.i85*m.i91 + 0.01055408*m.i85*m.i92 + 0.00190258*m.i85*
m.i93 + 0.00468802*m.i85*m.i94 + 0.0107648*m.i85*m.i95 + 0.01646608*m.i85*m.i96 + 0.01215728*
m.i85*m.i97 + 0.01028698*m.i85*m.i98 + 0.01183266*m.i85*m.i99 + 0.01660366*m.i85*m.i100 +
0.0120373*m.i86*m.i87 + 0.0422718*m.i86*m.i88 + 0.00969238*m.i86*m.i89 + 0.01765146*m.i86*m.i90
+ 0.01429788*m.i86*m.i91 + 0.0124585*m.i86*m.i92 + 0.0040945*m.i86*m.i93 + 0.0046898*m.i86*m.i94
+ 0.01232074*m.i86*m.i95 + 0.0222548*m.i86*m.i96 + 0.0145479*m.i86*m.i97 + 0.0128277*m.i86*m.i98
+ 0.0192244*m.i86*m.i99 + 0.01947568*m.i86*m.i100 + 0.032904*m.i87*m.i88 + 0.0084843*m.i87*m.i89
+ 0.01591916*m.i87*m.i90 + 0.0059879*m.i87*m.i91 + 0.00789644*m.i87*m.i92 + 0.00607862*m.i87*
m.i93 + 0.00667478*m.i87*m.i94 + 0.0088746*m.i87*m.i95 + 0.01963916*m.i87*m.i96 + 0.01115822*
m.i87*m.i97 + 0.0065973*m.i87*m.i98 + 0.01821046*m.i87*m.i99 + 0.01269924*m.i87*m.i100 + 0.04164*
m.i88*m.i89 + 0.01700894*m.i88*m.i90 + 0.0282218*m.i88*m.i91 + 0.0247666*m.i88*m.i92 + 0.00860626
*m.i88*m.i93 + 0.0146832*m.i88*m.i94 + 0.0207292*m.i88*m.i95 + 0.0482992*m.i88*m.i96 + 0.026772*
m.i88*m.i97 + 0.0300758*m.i88*m.i98 + 0.0329128*m.i88*m.i99 + 0.01375988*m.i88*m.i100 +
0.00594302*m.i89*m.i90 + 0.00801468*m.i89*m.i91 + 0.00437824*m.i89*m.i92 + 0.00302882*m.i89*m.i93
+ 0.0041304*m.i89*m.i94 + 0.00803522*m.i89*m.i95 + 0.01620516*m.i89*m.i96 + 0.00836644*m.i89*
m.i97 + 0.01022328*m.i89*m.i98 + 0.0069101*m.i89*m.i99 + 0.00464412*m.i89*m.i100 + 0.01014268*
m.i90*m.i91 + 0.00890216*m.i90*m.i92 + 0.00857494*m.i90*m.i93 + 0.00416286*m.i90*m.i94 +
0.01435266*m.i90*m.i95 + 0.038709*m.i90*m.i96 + 0.01593092*m.i90*m.i97 + 0.0108455*m.i90*m.i98 +
0.0247362*m.i90*m.i99 + 0.0239224*m.i90*m.i100 + 0.01172504*m.i91*m.i92 - 3.25928e-5*m.i91*m.i93
+ 0.00582154*m.i91*m.i94 + 0.01455814*m.i91*m.i95 + 0.0217724*m.i91*m.i96 + 0.01520358*m.i91*
m.i97 + 0.01361584*m.i91*m.i98 + 0.01107608*m.i91*m.i99 + 0.0218082*m.i91*m.i100 + 0.000834202*
m.i92*m.i93 + 0.00361846*m.i92*m.i94 + 0.00964536*m.i92*m.i95 + 0.01621624*m.i92*m.i96 +
0.01139352*m.i92*m.i97 + 0.01032652*m.i92*m.i98 + 0.01663626*m.i92*m.i99 + 0.01551254*m.i92*
m.i100 + 0.00302326*m.i93*m.i94 + 0.0039602*m.i93*m.i95 + 0.0070366*m.i93*m.i96 + 0.0035814*m.i93
*m.i97 + 0.00156313*m.i93*m.i98 + 0.00599576*m.i93*m.i99 + 0.00427812*m.i93*m.i100 + 0.00550244*
m.i94*m.i95 + 0.00558508*m.i94*m.i96 + 0.0059384*m.i94*m.i97 + 0.00357124*m.i94*m.i98 + 0.0064057
*m.i94*m.i99 + 0.00623724*m.i94*m.i100 + 0.0227304*m.i95*m.i96 + 0.01445112*m.i95*m.i97 +
0.01257804*m.i95*m.i98 + 0.01368382*m.i95*m.i99 + 0.01773414*m.i95*m.i100 + 0.0257114*m.i96*m.i97
+ 0.01933344*m.i96*m.i98 + 0.0317874*m.i96*m.i99 + 0.0306278*m.i96*m.i100 + 0.01873902*m.i97*
m.i98 + 0.01912542*m.i97*m.i99 + 0.0219022*m.i97*m.i100 + 0.01388668*m.i98*m.i99 + 0.0207524*
m.i98*m.i100 + 0.0256994*m.i99*m.i100 - m.x101 <= 0)
m.c2 = Constraint(expr= 0.00011438*m.i1 - 0.0226628*m.i2 - 0.0164176*m.i3 - 0.00987102*m.i4 - 0.0177519*m.i5
- 0.0214501*m.i6 - 0.0183449*m.i7 - 0.139908*m.i8 + 0.0143991*m.i9 - 0.00459102*m.i10
- 0.0498625*m.i11 - 0.00136834*m.i12 - 0.00731355*m.i13 - 0.0407972*m.i14 - 0.0179845*m.i15
- 0.0134868*m.i16 + 0.0208532*m.i17 - 0.0134023*m.i18 - 0.0016983*m.i19 - 0.0504684*m.i20
- 0.00993531*m.i21 - 0.00967252*m.i22 - 0.0093525*m.i23 - 0.0235131*m.i24 - 0.00939281*m.i25
- 0.00385931*m.i26 - 0.0232418*m.i27 - 0.0134094*m.i28 - 0.0102879*m.i29 - 0.0680481*m.i30
+ 0.00079685*m.i31 - 0.0117352*m.i32 - 0.0221879*m.i33 - 0.0292863*m.i34 - 0.0178439*m.i35
- 0.0215713*m.i36 - 0.0127821*m.i37 - 0.0199321*m.i38 - 0.0156042*m.i39 + 0.0117787*m.i40
- 0.0242007*m.i41 - 0.0166018*m.i42 - 0.00704129*m.i43 - 0.01393*m.i44 - 0.0168447*m.i45
- 0.00581865*m.i46 - 0.0198853*m.i47 - 0.00910726*m.i48 - 0.00613898*m.i49 - 0.034707*m.i50
- 0.00251132*m.i51 - 0.0165947*m.i52 - 0.00871196*m.i53 - 0.0188213*m.i54 - 0.00851418*m.i55
- 0.00292541*m.i56 - 0.00672748*m.i57 - 0.00207873*m.i58 - 0.0104384*m.i59 + 0.00259625*m.i60
- 0.0200773*m.i61 - 0.0351089*m.i62 - 0.0260835*m.i63 - 0.0163205*m.i64 - 0.0108857*m.i65
- 0.0369356*m.i66 - 0.00072115*m.i67 - 0.013863*m.i68 - 0.0201333*m.i69 - 0.00815196*m.i70
- 0.0274616*m.i71 - 0.00505996*m.i72 - 0.00018617*m.i73 - 0.00473674*m.i74 - 0.0209568*m.i75
- 0.00959808*m.i76 - 0.0138104*m.i77 - 0.00857398*m.i78 - 0.0457583*m.i79 - 0.00116198*m.i80
- 0.0208204*m.i81 - 0.00628309*m.i82 - 0.0237823*m.i83 - 0.0140875*m.i84 - 0.0158258*m.i85
- 0.00742073*m.i86 - 0.0120305*m.i87 + 0.0173439*m.i88 - 0.0253604*m.i89 - 0.0179007*m.i90
- 0.0223623*m.i91 - 0.016037*m.i92 - 0.00597365*m.i93 - 0.0142456*m.i94 - 0.00769496*m.i95
- 0.00982019*m.i96 - 0.00627006*m.i97 - 0.0288562*m.i98 - 0.0245847*m.i99 - 0.0261142*m.i100
>= 0)
m.c3 = Constraint(expr= 52.59*m.i1 + 28.87*m.i2 + 29.19*m.i3 + 46.55*m.i4 + 24.26*m.i5 + 42.53*m.i6 + 40.53*m.i7
+ 79.56*m.i8 + 108.9*m.i9 + 79.06*m.i10 + 20.15*m.i11 + 35.64*m.i12 + 39.55*m.i13 + 14.32*m.i14
+ 26.41*m.i15 + 62.48*m.i16 + 254.3*m.i17 + 32.42*m.i18 + 24.84*m.i19 + 10.1*m.i20 + 21.2*m.i21
+ 40.25*m.i22 + 17.32*m.i23 + 60.92*m.i24 + 54.73*m.i25 + 78.62*m.i26 + 49.24*m.i27
+ 68.19*m.i28 + 50.3*m.i29 + 3.83*m.i30 + 18.27*m.i31 + 59.67*m.i32 + 12.21*m.i33 + 38.09*m.i34
+ 71.72*m.i35 + 23.6*m.i36 + 70.71*m.i37 + 56.98*m.i38 + 34.47*m.i39 + 10.23*m.i40 + 59.19*m.i41
+ 58.61*m.i42 + 445.29*m.i43 + 131.69*m.i44 + 34.24*m.i45 + 43.11*m.i46 + 25.18*m.i47 + 28*m.i48
+ 19.43*m.i49 + 14.33*m.i50 + 28.41*m.i51 + 74.5*m.i52 + 36.54*m.i53 + 38.99*m.i54 + 43.15*m.i55
+ 199.55*m.i56 + 59.07*m.i57 + 123.55*m.i58 + 20.55*m.i59 + 66.72*m.i60 + 37.95*m.i61
+ 27.62*m.i62 + 23.21*m.i63 + 36.09*m.i64 + 23.09*m.i65 + 46.54*m.i66 + 67.89*m.i67
+ 34.83*m.i68 + 11.96*m.i69 + 45.77*m.i70 + 32.91*m.i71 + 77.37*m.i72 + 21.46*m.i73
+ 53.11*m.i74 + 14.29*m.i75 + 61.13*m.i76 + 32.79*m.i77 + 59.84*m.i78 + 6.59*m.i79 + 14.06*m.i80
+ 55.29*m.i81 + 33.33*m.i82 + 4.24*m.i83 + 23.21*m.i84 + 47.85*m.i85 + 48.99*m.i86 + 57.46*m.i87
+ 28.87*m.i88 + 24.6*m.i89 + 22.26*m.i90 + 28.31*m.i91 + 26.67*m.i92 + 48.1*m.i93 + 28.01*m.i94
+ 64.85*m.i95 + 25.54*m.i96 + 31.47*m.i97 + 18.31*m.i98 + 35.06*m.i99 + 8.06*m.i100 >= 5000)
m.c4 = Constraint(expr= 52.59*m.i1 + 28.87*m.i2 + 29.19*m.i3 + 46.55*m.i4 + 24.26*m.i5 + 42.53*m.i6 + 40.53*m.i7
+ 79.56*m.i8 + 108.9*m.i9 + 79.06*m.i10 + 20.15*m.i11 + 35.64*m.i12 + 39.55*m.i13 + 14.32*m.i14
+ 26.41*m.i15 + 62.48*m.i16 + 254.3*m.i17 + 32.42*m.i18 + 24.84*m.i19 + 10.1*m.i20 + 21.2*m.i21
+ 40.25*m.i22 + 17.32*m.i23 + 60.92*m.i24 + 54.73*m.i25 + 78.62*m.i26 + 49.24*m.i27
+ 68.19*m.i28 + 50.3*m.i29 + 3.83*m.i30 + 18.27*m.i31 + 59.67*m.i32 + 12.21*m.i33 + 38.09*m.i34
+ 71.72*m.i35 + 23.6*m.i36 + 70.71*m.i37 + 56.98*m.i38 + 34.47*m.i39 + 10.23*m.i40 + 59.19*m.i41
+ 58.61*m.i42 + 445.29*m.i43 + 131.69*m.i44 + 34.24*m.i45 + 43.11*m.i46 + 25.18*m.i47 + 28*m.i48
+ 19.43*m.i49 + 14.33*m.i50 + 28.41*m.i51 + 74.5*m.i52 + 36.54*m.i53 + 38.99*m.i54 + 43.15*m.i55
+ 199.55*m.i56 + 59.07*m.i57 + 123.55*m.i58 + 20.55*m.i59 + 66.72*m.i60 + 37.95*m.i61
+ 27.62*m.i62 + 23.21*m.i63 + 36.09*m.i64 + 23.09*m.i65 + 46.54*m.i66 + 67.89*m.i67
+ 34.83*m.i68 + 11.96*m.i69 + 45.77*m.i70 + 32.91*m.i71 + 77.37*m.i72 + 21.46*m.i73
+ 53.11*m.i74 + 14.29*m.i75 + 61.13*m.i76 + 32.79*m.i77 + 59.84*m.i78 + 6.59*m.i79 + 14.06*m.i80
+ 55.29*m.i81 + 33.33*m.i82 + 4.24*m.i83 + 23.21*m.i84 + 47.85*m.i85 + 48.99*m.i86 + 57.46*m.i87
+ 28.87*m.i88 + 24.6*m.i89 + 22.26*m.i90 + 28.31*m.i91 + 26.67*m.i92 + 48.1*m.i93 + 28.01*m.i94
+ 64.85*m.i95 + 25.54*m.i96 + 31.47*m.i97 + 18.31*m.i98 + 35.06*m.i99 + 8.06*m.i100 <= 5500)
| []
| []
| []
| [] | [] | python | null | null | null |
pkg/runner/local_runner_test.go | package runner_test
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner"
"github.com/buildbarn/bb-remote-execution/pkg/runner"
"github.com/buildbarn/bb-storage/pkg/filesystem"
"github.com/stretchr/testify/require"
)
func TestLocalRunner(t *testing.T) {
buildDirectoryPath := filepath.Join(os.Getenv("TEST_TMPDIR"), t.Name())
require.NoError(t, os.Mkdir(buildDirectoryPath, 0777))
buildDirectory, err := filesystem.NewLocalDirectory(buildDirectoryPath)
require.NoError(t, err)
defer buildDirectory.Close()
t.Run("EmptyEnvironment", func(t *testing.T) {
testPath := filepath.Join(buildDirectoryPath, "EmptyEnvironment")
require.NoError(t, os.Mkdir(testPath, 0777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0777))
// Running a command without specifying any environment
// variables should cause the process to be executed in
// an empty environment. It should not inherit the
// environment of the runner.
runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPath, false, false)
response, err := runner.Run(context.Background(), &runner_pb.RunRequest{
Arguments: []string{"/usr/bin/env"},
StdoutPath: "EmptyEnvironment/stdout",
StderrPath: "EmptyEnvironment/stderr",
InputRootDirectory: "EmptyEnvironment/root",
TemporaryDirectory: "EmptyEnvironment/tmp",
})
require.NoError(t, err)
require.Equal(t, int32(0), response.ExitCode)
stdout, err := ioutil.ReadFile(filepath.Join(testPath, "stdout"))
require.NoError(t, err)
require.Empty(t, stdout)
stderr, err := ioutil.ReadFile(filepath.Join(testPath, "stderr"))
require.NoError(t, err)
require.Empty(t, stderr)
})
t.Run("NonEmptyEnvironment", func(t *testing.T) {
testPath := filepath.Join(buildDirectoryPath, "NonEmptyEnvironment")
require.NoError(t, os.Mkdir(testPath, 0777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0777))
tmpPath := filepath.Join(testPath, "tmp")
require.NoError(t, os.Mkdir(tmpPath, 0777))
// The environment variables provided in the RunRequest
// should be respected. If automatic injection of TMPDIR
// is enabled, that variable should also be added.
runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPath, true, false)
response, err := runner.Run(context.Background(), &runner_pb.RunRequest{
Arguments: []string{"/usr/bin/env"},
EnvironmentVariables: map[string]string{
"FOO": "bar",
"BAZ": "xyzzy",
},
StdoutPath: "NonEmptyEnvironment/stdout",
StderrPath: "NonEmptyEnvironment/stderr",
InputRootDirectory: "NonEmptyEnvironment/root",
TemporaryDirectory: "NonEmptyEnvironment/tmp",
})
require.NoError(t, err)
require.Equal(t, int32(0), response.ExitCode)
stdout, err := ioutil.ReadFile(filepath.Join(testPath, "stdout"))
require.NoError(t, err)
require.ElementsMatch(t, []string{
"FOO=bar",
"BAZ=xyzzy",
"TMPDIR=" + tmpPath,
}, strings.Fields(string(stdout)))
stderr, err := ioutil.ReadFile(filepath.Join(testPath, "stderr"))
require.NoError(t, err)
require.Empty(t, stderr)
})
t.Run("OverridingTmpdir", func(t *testing.T) {
testPath := filepath.Join(buildDirectoryPath, "OverridingTmpdir")
require.NoError(t, os.Mkdir(testPath, 0777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0777))
tmpPath := filepath.Join(testPath, "tmp")
require.NoError(t, os.Mkdir(tmpPath, 0777))
// Automatic injection of TMPDIR should have no effect
// if the command to be run provides its own TMPDIR.
runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPath, true, false)
response, err := runner.Run(context.Background(), &runner_pb.RunRequest{
Arguments: []string{"/usr/bin/env"},
EnvironmentVariables: map[string]string{
"TMPDIR": "/somewhere/else",
},
StdoutPath: "OverridingTmpdir/stdout",
StderrPath: "OverridingTmpdir/stderr",
InputRootDirectory: "OverridingTmpdir/root",
TemporaryDirectory: "OverridingTmpdir/tmp",
})
require.NoError(t, err)
require.Equal(t, int32(0), response.ExitCode)
stdout, err := ioutil.ReadFile(filepath.Join(testPath, "stdout"))
require.NoError(t, err)
require.Equal(t, "TMPDIR=/somewhere/else\n", string(stdout))
stderr, err := ioutil.ReadFile(filepath.Join(testPath, "stderr"))
require.NoError(t, err)
require.Empty(t, stderr)
})
t.Run("NonZeroExitCode", func(t *testing.T) {
testPath := filepath.Join(buildDirectoryPath, "NonZeroExitCode")
require.NoError(t, os.Mkdir(testPath, 0777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0777))
// Non-zero exit codes should be captured in the
// RunResponse. POSIX 2008 and later added support for
// 32-bit signed exit codes. Most implementations still
// truncate the exit code to 8 bits.
runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPath, false, false)
response, err := runner.Run(context.Background(), &runner_pb.RunRequest{
Arguments: []string{"/bin/sh", "-c", "exit 255"},
StdoutPath: "NonZeroExitCode/stdout",
StderrPath: "NonZeroExitCode/stderr",
InputRootDirectory: "NonZeroExitCode/root",
TemporaryDirectory: "NonZeroExitCode/tmp",
})
require.NoError(t, err)
require.Equal(t, int32(255), response.ExitCode)
stdout, err := ioutil.ReadFile(filepath.Join(testPath, "stdout"))
require.NoError(t, err)
require.Empty(t, stdout)
stderr, err := ioutil.ReadFile(filepath.Join(testPath, "stderr"))
require.NoError(t, err)
require.Empty(t, stderr)
})
// TODO: Improve testing coverage of LocalRunner.
}
| [
"\"TEST_TMPDIR\""
]
| []
| [
"TEST_TMPDIR"
]
| [] | ["TEST_TMPDIR"] | go | 1 | 0 | |
ideolog/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ideolog.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
upup/pkg/fi/cloudup/apply_cluster.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudup
import (
"fmt"
"net/url"
"os"
"path"
"strings"
"k8s.io/kops/pkg/k8sversion"
"github.com/blang/semver"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kopsbase "k8s.io/kops"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/apis/kops/validation"
"k8s.io/kops/pkg/apis/nodeup"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/pkg/client/simple"
"k8s.io/kops/pkg/client/simple/vfsclientset"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model"
"k8s.io/kops/pkg/model/alimodel"
"k8s.io/kops/pkg/model/awsmodel"
"k8s.io/kops/pkg/model/components"
"k8s.io/kops/pkg/model/components/etcdmanager"
"k8s.io/kops/pkg/model/domodel"
"k8s.io/kops/pkg/model/gcemodel"
"k8s.io/kops/pkg/model/openstackmodel"
"k8s.io/kops/pkg/model/spotinstmodel"
"k8s.io/kops/pkg/model/vspheremodel"
"k8s.io/kops/pkg/resources/digitalocean"
"k8s.io/kops/pkg/templates"
"k8s.io/kops/upup/models"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/alitasks"
"k8s.io/kops/upup/pkg/fi/cloudup/aliup"
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"k8s.io/kops/upup/pkg/fi/cloudup/baremetal"
"k8s.io/kops/upup/pkg/fi/cloudup/cloudformation"
"k8s.io/kops/upup/pkg/fi/cloudup/do"
"k8s.io/kops/upup/pkg/fi/cloudup/dotasks"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/upup/pkg/fi/cloudup/gcetasks"
"k8s.io/kops/upup/pkg/fi/cloudup/openstack"
"k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks"
"k8s.io/kops/upup/pkg/fi/cloudup/spotinsttasks"
"k8s.io/kops/upup/pkg/fi/cloudup/terraform"
"k8s.io/kops/upup/pkg/fi/cloudup/vsphere"
"k8s.io/kops/upup/pkg/fi/cloudup/vspheretasks"
"k8s.io/kops/upup/pkg/fi/fitasks"
"k8s.io/kops/util/pkg/vfs"
)
const (
starline = "*********************************************************************************\n"
)
var (
// AlphaAllowBareMetal is a feature flag that gates BareMetal support while it is alpha
AlphaAllowBareMetal = featureflag.New("AlphaAllowBareMetal", featureflag.Bool(false))
// AlphaAllowDO is a feature flag that gates DigitalOcean support while it is alpha
AlphaAllowDO = featureflag.New("AlphaAllowDO", featureflag.Bool(false))
// AlphaAllowGCE is a feature flag that gates GCE support while it is alpha
AlphaAllowGCE = featureflag.New("AlphaAllowGCE", featureflag.Bool(false))
// AlphaAllowOpenstack is a feature flag that gates OpenStack support while it is alpha
AlphaAllowOpenstack = featureflag.New("AlphaAllowOpenstack", featureflag.Bool(false))
// AlphaAllowVsphere is a feature flag that gates vsphere support while it is alpha
AlphaAllowVsphere = featureflag.New("AlphaAllowVsphere", featureflag.Bool(false))
// AlphaAllowALI is a feature flag that gates aliyun support while it is alpha
AlphaAllowALI = featureflag.New("AlphaAllowALI", featureflag.Bool(false))
// CloudupModels a list of supported models
CloudupModels = []string{"proto", "cloudup"}
)
type ApplyClusterCmd struct {
Cluster *kops.Cluster
InstanceGroups []*kops.InstanceGroup
// NodeUpSource is the location from which we download nodeup
NodeUpSource string
// NodeUpHash is the sha hash
NodeUpHash string
// Models is a list of cloudup models to apply
Models []string
// TargetName specifies how we are operating e.g. direct to GCE, or AWS, or dry-run, or terraform
TargetName string
// Target is the fi.Target we will operate against
Target fi.Target
// OutDir is a local directory in which we place output, can cache files etc
OutDir string
// Assets is a list of sources for files (primarily when not using everything containerized)
// Formats:
// raw url: http://... or https://...
// url with hash: <hex>@http://... or <hex>@https://...
Assets []string
Clientset simple.Clientset
// DryRun is true if this is only a dry run
DryRun bool
// RunTasksOptions defines parameters for task execution, e.g. retry interval
RunTasksOptions *fi.RunTasksOptions
// The channel we are using
channel *kops.Channel
// Phase can be set to a Phase to run the specific subset of tasks, if we don't want to run everything
Phase Phase
// LifecycleOverrides is passed in to override the lifecycle for one of more tasks.
// The key value is the task name such as InternetGateway and the value is the fi.Lifecycle
// that is re-mapped.
LifecycleOverrides map[string]fi.Lifecycle
// TaskMap is the map of tasks that we built (output)
TaskMap map[string]fi.Task
}
func (c *ApplyClusterCmd) Run() error {
if c.InstanceGroups == nil {
list, err := c.Clientset.InstanceGroupsFor(c.Cluster).List(metav1.ListOptions{})
if err != nil {
return err
}
var instanceGroups []*kops.InstanceGroup
for i := range list.Items {
instanceGroups = append(instanceGroups, &list.Items[i])
}
c.InstanceGroups = instanceGroups
}
if c.Models == nil {
c.Models = CloudupModels
}
modelStore, err := findModelStore()
if err != nil {
return err
}
channel, err := ChannelForCluster(c.Cluster)
if err != nil {
return err
}
c.channel = channel
stageAssetsLifecycle := fi.LifecycleSync
securityLifecycle := fi.LifecycleSync
networkLifecycle := fi.LifecycleSync
clusterLifecycle := fi.LifecycleSync
switch c.Phase {
case Phase(""):
// Everything ... the default
// until we implement finding assets we need to Ignore them
stageAssetsLifecycle = fi.LifecycleIgnore
case PhaseStageAssets:
networkLifecycle = fi.LifecycleIgnore
securityLifecycle = fi.LifecycleIgnore
clusterLifecycle = fi.LifecycleIgnore
case PhaseNetwork:
stageAssetsLifecycle = fi.LifecycleIgnore
securityLifecycle = fi.LifecycleIgnore
clusterLifecycle = fi.LifecycleIgnore
case PhaseSecurity:
stageAssetsLifecycle = fi.LifecycleIgnore
networkLifecycle = fi.LifecycleExistsAndWarnIfChanges
clusterLifecycle = fi.LifecycleIgnore
case PhaseCluster:
if c.TargetName == TargetDryRun {
stageAssetsLifecycle = fi.LifecycleIgnore
securityLifecycle = fi.LifecycleExistsAndWarnIfChanges
networkLifecycle = fi.LifecycleExistsAndWarnIfChanges
} else {
stageAssetsLifecycle = fi.LifecycleIgnore
networkLifecycle = fi.LifecycleExistsAndValidates
securityLifecycle = fi.LifecycleExistsAndValidates
}
default:
return fmt.Errorf("unknown phase %q", c.Phase)
}
// This is kinda a hack. Need to move phases out of fi. If we use Phase here we introduce a circular
// go dependency.
phase := string(c.Phase)
assetBuilder := assets.NewAssetBuilder(c.Cluster, phase)
err = c.upgradeSpecs(assetBuilder)
if err != nil {
return err
}
err = c.validateKopsVersion()
if err != nil {
return err
}
err = c.validateKubernetesVersion()
if err != nil {
return err
}
err = validation.DeepValidate(c.Cluster, c.InstanceGroups, true)
if err != nil {
return err
}
cluster := c.Cluster
if cluster.Spec.KubernetesVersion == "" {
return fmt.Errorf("KubernetesVersion not set")
}
if cluster.Spec.DNSZone == "" && !dns.IsGossipHostname(cluster.ObjectMeta.Name) {
return fmt.Errorf("DNSZone not set")
}
l := &Loader{}
l.Init()
l.Cluster = c.Cluster
configBase, err := vfs.Context.BuildVfsPath(cluster.Spec.ConfigBase)
if err != nil {
return fmt.Errorf("error parsing config base %q: %v", cluster.Spec.ConfigBase, err)
}
keyStore, err := c.Clientset.KeyStore(cluster)
if err != nil {
return err
}
sshCredentialStore, err := c.Clientset.SSHCredentialStore(cluster)
if err != nil {
return err
}
secretStore, err := c.Clientset.SecretStore(cluster)
if err != nil {
return err
}
// Normalize k8s version
versionWithoutV := strings.TrimSpace(cluster.Spec.KubernetesVersion)
if strings.HasPrefix(versionWithoutV, "v") {
versionWithoutV = versionWithoutV[1:]
}
if cluster.Spec.KubernetesVersion != versionWithoutV {
glog.Warningf("Normalizing kubernetes version: %q -> %q", cluster.Spec.KubernetesVersion, versionWithoutV)
cluster.Spec.KubernetesVersion = versionWithoutV
}
kv, err := k8sversion.Parse(cluster.Spec.KubernetesVersion)
if err != nil {
return err
}
// check if we should recommend turning off anonymousAuth on k8s versions gte than 1.10
// we do 1.10 since this is a really critical issues and 1.10 has it
if kv.IsGTE("1.10") {
// we do a check here because setting modifying the kubelet object messes with the output
warn := false
if cluster.Spec.Kubelet == nil {
warn = true
} else if cluster.Spec.Kubelet.AnonymousAuth == nil {
warn = true
}
if warn {
fmt.Println("")
fmt.Printf(starline)
fmt.Println("")
fmt.Println("Kubelet anonymousAuth is currently turned on. This allows RBAC escalation and remote code execution possibilites.")
fmt.Println("It is highly recommended you turn it off by setting 'spec.kubelet.anonymousAuth' to 'false' via 'kops edit cluster'")
fmt.Println("")
fmt.Println("See https://github.com/kubernetes/kops/blob/master/docs/security.md#kubelet-api")
fmt.Println("")
fmt.Printf(starline)
fmt.Println("")
}
}
if err := c.AddFileAssets(assetBuilder); err != nil {
return err
}
// Only setup transfer of kops assets if using a FileRepository
if c.Cluster.Spec.Assets != nil && c.Cluster.Spec.Assets.FileRepository != nil {
if err := SetKopsAssetsLocations(assetBuilder); err != nil {
return err
}
}
checkExisting := true
l.AddTypes(map[string]interface{}{
"keypair": &fitasks.Keypair{},
"secret": &fitasks.Secret{},
"managedFile": &fitasks.ManagedFile{},
"mirrorKeystore": &fitasks.MirrorKeystore{},
"mirrorSecrets": &fitasks.MirrorSecrets{},
})
cloud, err := BuildCloud(cluster)
if err != nil {
return err
}
region := ""
project := ""
var sshPublicKeys [][]byte
{
keys, err := sshCredentialStore.FindSSHPublicKeys(fi.SecretNameSSHPrimary)
if err != nil {
return fmt.Errorf("error retrieving SSH public key %q: %v", fi.SecretNameSSHPrimary, err)
}
for _, k := range keys {
sshPublicKeys = append(sshPublicKeys, []byte(k.Spec.PublicKey))
}
}
modelContext := &model.KopsModelContext{
Cluster: cluster,
InstanceGroups: c.InstanceGroups,
}
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderGCE:
{
gceCloud := cloud.(gce.GCECloud)
region = gceCloud.Region()
project = gceCloud.Project()
if !AlphaAllowGCE.Enabled() {
return fmt.Errorf("GCE support is currently alpha, and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowGCE")
}
modelContext.SSHPublicKeys = sshPublicKeys
l.AddTypes(map[string]interface{}{
"Disk": &gcetasks.Disk{},
"Instance": &gcetasks.Instance{},
"InstanceTemplate": &gcetasks.InstanceTemplate{},
"Network": &gcetasks.Network{},
"InstanceGroupManager": &gcetasks.InstanceGroupManager{},
"FirewallRule": &gcetasks.FirewallRule{},
"Address": &gcetasks.Address{},
})
}
case kops.CloudProviderDO:
{
if !AlphaAllowDO.Enabled() {
return fmt.Errorf("DigitalOcean support is currently (very) alpha and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowDO to enable it")
}
modelContext.SSHPublicKeys = sshPublicKeys
l.AddTypes(map[string]interface{}{
"volume": &dotasks.Volume{},
"droplet": &dotasks.Droplet{},
})
}
case kops.CloudProviderAWS:
{
awsCloud := cloud.(awsup.AWSCloud)
region = awsCloud.Region()
l.AddTypes(map[string]interface{}{
// EC2
"elasticIP": &awstasks.ElasticIP{},
"instance": &awstasks.Instance{},
"instanceElasticIPAttachment": &awstasks.InstanceElasticIPAttachment{},
"instanceVolumeAttachment": &awstasks.InstanceVolumeAttachment{},
"ebsVolume": &awstasks.EBSVolume{},
"sshKey": &awstasks.SSHKey{},
// IAM
"iamInstanceProfile": &awstasks.IAMInstanceProfile{},
"iamInstanceProfileRole": &awstasks.IAMInstanceProfileRole{},
"iamRole": &awstasks.IAMRole{},
"iamRolePolicy": &awstasks.IAMRolePolicy{},
// VPC / Networking
"dhcpOptions": &awstasks.DHCPOptions{},
"internetGateway": &awstasks.InternetGateway{},
"route": &awstasks.Route{},
"routeTable": &awstasks.RouteTable{},
"routeTableAssociation": &awstasks.RouteTableAssociation{},
"securityGroup": &awstasks.SecurityGroup{},
"securityGroupRule": &awstasks.SecurityGroupRule{},
"subnet": &awstasks.Subnet{},
"vpc": &awstasks.VPC{},
"ngw": &awstasks.NatGateway{},
"vpcDHDCPOptionsAssociation": &awstasks.VPCDHCPOptionsAssociation{},
// ELB
"loadBalancer": &awstasks.LoadBalancer{},
"loadBalancerAttachment": &awstasks.LoadBalancerAttachment{},
// Autoscaling
"autoscalingGroup": &awstasks.AutoscalingGroup{},
"launchConfiguration": &awstasks.LaunchConfiguration{},
// Spotinst
"spotinstElastigroup": &spotinsttasks.Elastigroup{},
})
if len(sshPublicKeys) == 0 {
return fmt.Errorf("SSH public key must be specified when running with AWS (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
modelContext.SSHPublicKeys = sshPublicKeys
if len(sshPublicKeys) != 1 {
return fmt.Errorf("Exactly one 'admin' SSH public key can be specified when running with AWS; please delete a key using `kops delete secret`")
}
l.TemplateFunctions["MachineTypeInfo"] = awsup.GetMachineTypeInfo
}
case kops.CloudProviderALI:
{
if !AlphaAllowALI.Enabled() {
return fmt.Errorf("Aliyun support is currently alpha, and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowALI")
}
aliCloud := cloud.(aliup.ALICloud)
region = aliCloud.Region()
l.AddTypes(map[string]interface{}{
"Vpc": &alitasks.VPC{},
"VSwitch": &alitasks.VSwitch{},
"Disk": &alitasks.Disk{},
"SecurityGroup": &alitasks.SecurityGroup{},
"SecurityGroupRule": &alitasks.SecurityGroupRule{},
"LoadBalancer": &alitasks.LoadBalancer{},
"LoadBalancerListener": &alitasks.LoadBalancerListener{},
"LoadBalancerWhiteList": &alitasks.LoadBalancerWhiteList{},
"AutoscalingGroup": &alitasks.ScalingGroup{},
"LaunchConfiguration": &alitasks.LaunchConfiguration{},
"RAMPolicy": &alitasks.RAMPolicy{},
"RAMRole": &alitasks.RAMRole{},
"SSHKey": &alitasks.SSHKey{},
})
if len(sshPublicKeys) == 0 {
return fmt.Errorf("SSH public key must be specified when running with ALICloud (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
modelContext.SSHPublicKeys = sshPublicKeys
if len(sshPublicKeys) != 1 {
return fmt.Errorf("Exactly one 'admin' SSH public key can be specified when running with ALICloud; please delete a key using `kops delete secret`")
}
}
case kops.CloudProviderVSphere:
{
if !AlphaAllowVsphere.Enabled() {
return fmt.Errorf("Vsphere support is currently alpha, and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowVsphere")
}
vsphereCloud := cloud.(*vsphere.VSphereCloud)
// TODO: map region with vCenter cluster, or datacenter, or datastore?
region = vsphereCloud.Cluster
l.AddTypes(map[string]interface{}{
"instance": &vspheretasks.VirtualMachine{},
})
}
case kops.CloudProviderBareMetal:
{
if !AlphaAllowBareMetal.Enabled() {
return fmt.Errorf("BareMetal support is currently (very) alpha and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowBareMetal to enable it")
}
// No additional tasks (yet)
}
case kops.CloudProviderOpenstack:
{
if !AlphaAllowOpenstack.Enabled() {
return fmt.Errorf("Openstack support is currently alpha, and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowOpenstack")
}
osCloud := cloud.(openstack.OpenstackCloud)
region = osCloud.Region()
l.AddTypes(map[string]interface{}{
// Compute
"sshKey": &openstacktasks.SSHKey{},
"serverGroup": &openstacktasks.ServerGroup{},
"instance": &openstacktasks.Instance{},
// Networking
"network": &openstacktasks.Network{},
"subnet": &openstacktasks.Subnet{},
"router": &openstacktasks.Router{},
"securityGroup": &openstacktasks.SecurityGroup{},
"securityGroupRule": &openstacktasks.SecurityGroupRule{},
// BlockStorage
"volume": &openstacktasks.Volume{},
// LB
"lb": &openstacktasks.LB{},
})
if len(sshPublicKeys) == 0 {
return fmt.Errorf("SSH public key must be specified when running with Openstack (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
modelContext.SSHPublicKeys = sshPublicKeys
if len(sshPublicKeys) != 1 {
return fmt.Errorf("Exactly one 'admin' SSH public key can be specified when running with Openstack; please delete a key using `kops delete secret`")
}
}
default:
return fmt.Errorf("unknown CloudProvider %q", cluster.Spec.CloudProvider)
}
modelContext.Region = region
if dns.IsGossipHostname(cluster.ObjectMeta.Name) {
glog.Infof("Gossip DNS: skipping DNS validation")
} else {
err = validateDNS(cluster, cloud)
if err != nil {
return err
}
}
clusterTags, err := buildCloudupTags(cluster)
if err != nil {
return err
}
tf := &TemplateFunctions{
cluster: cluster,
instanceGroups: c.InstanceGroups,
tags: clusterTags,
region: region,
modelContext: modelContext,
}
l.Tags = clusterTags
l.WorkDir = c.OutDir
l.ModelStore = modelStore
var fileModels []string
for _, m := range c.Models {
switch m {
case "proto":
// No proto code options; no file model
case "cloudup":
templates, err := templates.LoadTemplates(cluster, models.NewAssetPath("cloudup/resources"))
if err != nil {
return fmt.Errorf("error loading templates: %v", err)
}
err = tf.AddTo(templates.TemplateFunctions, secretStore)
if err != nil {
return err
}
l.Builders = append(l.Builders,
&BootstrapChannelBuilder{
Lifecycle: &clusterLifecycle,
assetBuilder: assetBuilder,
cluster: cluster,
templates: templates,
},
&model.PKIModelBuilder{
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
&etcdmanager.EtcdManagerBuilder{
AssetBuilder: assetBuilder,
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
)
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
awsModelContext := &awsmodel.AWSModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&awsmodel.APILoadBalancerBuilder{AWSModelContext: awsModelContext, Lifecycle: &clusterLifecycle, SecurityLifecycle: &securityLifecycle},
&model.BastionModelBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle, SecurityLifecycle: &securityLifecycle},
&model.DNSModelBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&model.ExternalAccessModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
&model.FirewallModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
&model.SSHKeyModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
)
l.Builders = append(l.Builders,
&model.NetworkModelBuilder{KopsModelContext: modelContext, Lifecycle: &networkLifecycle},
)
l.Builders = append(l.Builders,
&model.IAMModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
)
case kops.CloudProviderDO:
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
)
case kops.CloudProviderGCE:
gceModelContext := &gcemodel.GCEModelContext{
KopsModelContext: modelContext,
}
storageAclLifecycle := securityLifecycle
if storageAclLifecycle != fi.LifecycleIgnore {
// This is a best-effort permissions fix
storageAclLifecycle = fi.LifecycleWarnIfInsufficientAccess
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&gcemodel.APILoadBalancerBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.ExternalAccessModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.FirewallModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.NetworkModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &networkLifecycle},
)
l.Builders = append(l.Builders,
&gcemodel.StorageAclBuilder{GCEModelContext: gceModelContext, Cloud: cloud.(gce.GCECloud), Lifecycle: &storageAclLifecycle},
)
case kops.CloudProviderALI:
aliModelContext := &alimodel.ALIModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&alimodel.APILoadBalancerModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.NetworkModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.RAMModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.SSHKeyModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.FirewallModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.ExternalAccessModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
)
case kops.CloudProviderVSphere:
// No special settings (yet!)
case kops.CloudProviderBareMetal:
// No special settings (yet!)
case kops.CloudProviderOpenstack:
openstackModelContext := &openstackmodel.OpenstackModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
// &openstackmodel.APILBModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &clusterLifecycle},
&openstackmodel.NetworkModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &networkLifecycle},
&openstackmodel.SSHKeyModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &securityLifecycle},
&openstackmodel.FirewallModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &securityLifecycle},
)
default:
return fmt.Errorf("unknown cloudprovider %q", cluster.Spec.CloudProvider)
}
fileModels = append(fileModels, m)
default:
fileModels = append(fileModels, m)
}
}
l.TemplateFunctions["CA"] = func() fi.CAStore {
return keyStore
}
l.TemplateFunctions["Secrets"] = func() fi.SecretStore {
return secretStore
}
bootstrapScriptBuilder := &model.BootstrapScript{
NodeUpConfigBuilder: func(ig *kops.InstanceGroup) (*nodeup.Config, error) { return c.BuildNodeUpConfig(assetBuilder, ig) },
NodeUpSource: c.NodeUpSource,
NodeUpSourceHash: c.NodeUpHash,
}
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
awsModelContext := &awsmodel.AWSModelContext{
KopsModelContext: modelContext,
}
if featureflag.Spotinst.Enabled() {
l.Builders = append(l.Builders, &spotinstmodel.ElastigroupModelBuilder{
AWSModelContext: awsModelContext,
BootstrapScript: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
SecurityLifecycle: &securityLifecycle,
})
} else {
l.Builders = append(l.Builders, &awsmodel.AutoscalingGroupModelBuilder{
AWSModelContext: awsModelContext,
BootstrapScript: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
SecurityLifecycle: &securityLifecycle,
})
}
case kops.CloudProviderDO:
doModelContext := &domodel.DOModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &domodel.DropletBuilder{
DOModelContext: doModelContext,
BootstrapScript: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
case kops.CloudProviderGCE:
{
gceModelContext := &gcemodel.GCEModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &gcemodel.AutoscalingGroupModelBuilder{
GCEModelContext: gceModelContext,
BootstrapScript: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
}
case kops.CloudProviderALI:
{
aliModelContext := &alimodel.ALIModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &alimodel.ScalingGroupModelBuilder{
ALIModelContext: aliModelContext,
BootstrapScript: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
}
case kops.CloudProviderVSphere:
{
vsphereModelContext := &vspheremodel.VSphereModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &vspheremodel.AutoscalingGroupModelBuilder{
VSphereModelContext: vsphereModelContext,
BootstrapScript: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
}
case kops.CloudProviderBareMetal:
// BareMetal tasks will go here
case kops.CloudProviderOpenstack:
openstackModelContext := &openstackmodel.OpenstackModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &openstackmodel.ServerGroupModelBuilder{
OpenstackModelContext: openstackModelContext,
BootstrapScript: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
default:
return fmt.Errorf("unknown cloudprovider %q", cluster.Spec.CloudProvider)
}
l.TemplateFunctions["Masters"] = tf.modelContext.MasterInstanceGroups
err = tf.AddTo(l.TemplateFunctions, secretStore)
if err != nil {
return err
}
taskMap, err := l.BuildTasks(modelStore, fileModels, assetBuilder, &stageAssetsLifecycle, c.LifecycleOverrides)
if err != nil {
return fmt.Errorf("error building tasks: %v", err)
}
c.TaskMap = taskMap
var target fi.Target
dryRun := false
shouldPrecreateDNS := true
switch c.TargetName {
case TargetDirect:
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderGCE:
target = gce.NewGCEAPITarget(cloud.(gce.GCECloud))
case kops.CloudProviderAWS:
target = awsup.NewAWSAPITarget(cloud.(awsup.AWSCloud))
case kops.CloudProviderDO:
target = do.NewDOAPITarget(cloud.(*digitalocean.Cloud))
case kops.CloudProviderVSphere:
target = vsphere.NewVSphereAPITarget(cloud.(*vsphere.VSphereCloud))
case kops.CloudProviderBareMetal:
target = baremetal.NewTarget(cloud.(*baremetal.Cloud))
case kops.CloudProviderOpenstack:
target = openstack.NewOpenstackAPITarget(cloud.(openstack.OpenstackCloud))
case kops.CloudProviderALI:
target = aliup.NewALIAPITarget(cloud.(aliup.ALICloud))
default:
return fmt.Errorf("direct configuration not supported with CloudProvider:%q", cluster.Spec.CloudProvider)
}
case TargetTerraform:
checkExisting = false
outDir := c.OutDir
tf := terraform.NewTerraformTarget(cloud, region, project, outDir, cluster.Spec.Target)
// We include a few "util" variables in the TF output
if err := tf.AddOutputVariable("region", terraform.LiteralFromStringValue(region)); err != nil {
return err
}
if project != "" {
if err := tf.AddOutputVariable("project", terraform.LiteralFromStringValue(project)); err != nil {
return err
}
}
if err := tf.AddOutputVariable("cluster_name", terraform.LiteralFromStringValue(cluster.ObjectMeta.Name)); err != nil {
return err
}
target = tf
// Can cause conflicts with terraform management
shouldPrecreateDNS = false
case TargetCloudformation:
checkExisting = false
outDir := c.OutDir
target = cloudformation.NewCloudformationTarget(cloud, region, project, outDir)
// Can cause conflicts with cloudformation management
shouldPrecreateDNS = false
case TargetDryRun:
target = fi.NewDryRunTarget(assetBuilder, os.Stdout)
dryRun = true
// Avoid making changes on a dry-run
shouldPrecreateDNS = false
default:
return fmt.Errorf("unsupported target type %q", c.TargetName)
}
c.Target = target
if !dryRun {
err = registry.WriteConfigDeprecated(cluster, configBase.Join(registry.PathClusterCompleted), c.Cluster)
if err != nil {
return fmt.Errorf("error writing completed cluster spec: %v", err)
}
vfsMirror := vfsclientset.NewInstanceGroupMirror(cluster, configBase)
for _, g := range c.InstanceGroups {
// TODO: We need to update the mirror (below), but do we need to update the primary?
_, err := c.Clientset.InstanceGroupsFor(c.Cluster).Update(g)
if err != nil {
return fmt.Errorf("error writing InstanceGroup %q to registry: %v", g.ObjectMeta.Name, err)
}
// TODO: Don't write if vfsMirror == c.ClientSet
if err := vfsMirror.WriteMirror(g); err != nil {
return fmt.Errorf("error writing instance group spec to mirror: %v", err)
}
}
}
context, err := fi.NewContext(target, cluster, cloud, keyStore, secretStore, configBase, checkExisting, taskMap)
if err != nil {
return fmt.Errorf("error building context: %v", err)
}
defer context.Close()
var options fi.RunTasksOptions
if c.RunTasksOptions != nil {
options = *c.RunTasksOptions
} else {
options.InitDefaults()
}
err = context.RunTasks(options)
if err != nil {
return fmt.Errorf("error running tasks: %v", err)
}
if dns.IsGossipHostname(cluster.Name) {
shouldPrecreateDNS = false
}
if shouldPrecreateDNS {
if err := precreateDNS(cluster, cloud); err != nil {
glog.Warningf("unable to pre-create DNS records - cluster startup may be slower: %v", err)
}
}
err = target.Finish(taskMap) //This will finish the apply, and print the changes
if err != nil {
return fmt.Errorf("error closing target: %v", err)
}
return nil
}
// upgradeSpecs ensures that fields are fully populated / defaulted
func (c *ApplyClusterCmd) upgradeSpecs(assetBuilder *assets.AssetBuilder) error {
fullCluster, err := PopulateClusterSpec(c.Clientset, c.Cluster, assetBuilder)
if err != nil {
return err
}
c.Cluster = fullCluster
for i, g := range c.InstanceGroups {
fullGroup, err := PopulateInstanceGroupSpec(fullCluster, g, c.channel)
if err != nil {
return err
}
c.InstanceGroups[i] = fullGroup
}
return nil
}
// validateKopsVersion ensures that kops meet the version requirements / recommendations in the channel
func (c *ApplyClusterCmd) validateKopsVersion() error {
kopsVersion, err := semver.ParseTolerant(kopsbase.Version)
if err != nil {
glog.Warningf("unable to parse kops version %q", kopsbase.Version)
// Not a hard-error
return nil
}
versionInfo := kops.FindKopsVersionSpec(c.channel.Spec.KopsVersions, kopsVersion)
if versionInfo == nil {
glog.Warningf("unable to find version information for kops version %q in channel", kopsVersion)
// Not a hard-error
return nil
}
recommended, err := versionInfo.FindRecommendedUpgrade(kopsVersion)
if err != nil {
glog.Warningf("unable to parse version recommendation for kops version %q in channel", kopsVersion)
}
required, err := versionInfo.IsUpgradeRequired(kopsVersion)
if err != nil {
glog.Warningf("unable to parse version requirement for kops version %q in channel", kopsVersion)
}
if recommended != nil && !required {
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
fmt.Printf("A new kops version is available: %s\n", recommended)
fmt.Printf("\n")
fmt.Printf("Upgrading is recommended\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_kops", recommended.String()))
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
} else if required {
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
if recommended != nil {
fmt.Printf("A new kops version is available: %s\n", recommended)
}
fmt.Printf("\n")
fmt.Printf("This version of kops (%s) is no longer supported; upgrading is required\n", kopsbase.Version)
fmt.Printf("(you can bypass this check by exporting KOPS_RUN_OBSOLETE_VERSION)\n")
fmt.Printf("\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_kops", recommended.String()))
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
}
if required {
if os.Getenv("KOPS_RUN_OBSOLETE_VERSION") == "" {
return fmt.Errorf("kops upgrade is required")
}
}
return nil
}
// validateKubernetesVersion ensures that kubernetes meet the version requirements / recommendations in the channel
func (c *ApplyClusterCmd) validateKubernetesVersion() error {
parsed, err := util.ParseKubernetesVersion(c.Cluster.Spec.KubernetesVersion)
if err != nil {
glog.Warningf("unable to parse kubernetes version %q", c.Cluster.Spec.KubernetesVersion)
// Not a hard-error
return nil
}
// TODO: make util.ParseKubernetesVersion not return a pointer
kubernetesVersion := *parsed
versionInfo := kops.FindKubernetesVersionSpec(c.channel.Spec.KubernetesVersions, kubernetesVersion)
if versionInfo == nil {
glog.Warningf("unable to find version information for kubernetes version %q in channel", kubernetesVersion)
// Not a hard-error
return nil
}
recommended, err := versionInfo.FindRecommendedUpgrade(kubernetesVersion)
if err != nil {
glog.Warningf("unable to parse version recommendation for kubernetes version %q in channel", kubernetesVersion)
}
required, err := versionInfo.IsUpgradeRequired(kubernetesVersion)
if err != nil {
glog.Warningf("unable to parse version requirement for kubernetes version %q in channel", kubernetesVersion)
}
if recommended != nil && !required {
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
fmt.Printf("A new kubernetes version is available: %s\n", recommended)
fmt.Printf("Upgrading is recommended (try kops upgrade cluster)\n")
fmt.Printf("\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", recommended.String()))
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
} else if required {
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
if recommended != nil {
fmt.Printf("A new kubernetes version is available: %s\n", recommended)
}
fmt.Printf("\n")
fmt.Printf("This version of kubernetes is no longer supported; upgrading is required\n")
fmt.Printf("(you can bypass this check by exporting KOPS_RUN_OBSOLETE_VERSION)\n")
fmt.Printf("\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", recommended.String()))
fmt.Printf("\n")
fmt.Printf(starline)
fmt.Printf("\n")
}
if required {
if os.Getenv("KOPS_RUN_OBSOLETE_VERSION") == "" {
return fmt.Errorf("kubernetes upgrade is required")
}
}
return nil
}
// AddFileAssets adds the file assets within the assetBuilder
func (c *ApplyClusterCmd) AddFileAssets(assetBuilder *assets.AssetBuilder) error {
var baseURL string
var err error
if components.IsBaseURL(c.Cluster.Spec.KubernetesVersion) {
baseURL = c.Cluster.Spec.KubernetesVersion
} else {
baseURL = "https://storage.googleapis.com/kubernetes-release/release/v" + c.Cluster.Spec.KubernetesVersion
}
k8sAssetsNames := []string{
"/bin/linux/amd64/kubelet",
"/bin/linux/amd64/kubectl",
}
if needsMounterAsset(c.Cluster, c.InstanceGroups) {
k8sVersion, err := util.ParseKubernetesVersion(c.Cluster.Spec.KubernetesVersion)
if err != nil {
return fmt.Errorf("unable to determine kubernetes version from %q", c.Cluster.Spec.KubernetesVersion)
} else if util.IsKubernetesGTE("1.9", *k8sVersion) {
// Available directly
k8sAssetsNames = append(k8sAssetsNames, "/bin/linux/amd64/mounter")
} else {
// Only available in the kubernetes-manifests.tar.gz directory
k8sAssetsNames = append(k8sAssetsNames, "/kubernetes-manifests.tar.gz")
}
}
for _, a := range k8sAssetsNames {
k, err := url.Parse(baseURL)
if err != nil {
return err
}
k.Path = path.Join(k.Path, a)
u, hash, err := assetBuilder.RemapFileAndSHA(k)
if err != nil {
return err
}
c.Assets = append(c.Assets, hash.Hex()+"@"+u.String())
}
if usesCNI(c.Cluster) {
cniAsset, cniAssetHashString, err := findCNIAssets(c.Cluster, assetBuilder)
if err != nil {
return err
}
c.Assets = append(c.Assets, cniAssetHashString+"@"+cniAsset.String())
}
if c.Cluster.Spec.Networking.LyftVPC != nil {
lyftVPCDownloadURL := os.Getenv("LYFT_VPC_DOWNLOAD_URL")
if lyftVPCDownloadURL == "" {
lyftVPCDownloadURL = "bfdc65028a3bf8ffe14388fca28ede3600e7e2dee4e781908b6a23f9e79f86ad@https://github.com/lyft/cni-ipvlan-vpc-k8s/releases/download/v0.4.2/cni-ipvlan-vpc-k8s-v0.4.2.tar.gz"
} else {
glog.Warningf("Using url from LYFT_VPC_DOWNLOAD_URL env var: %q", lyftVPCDownloadURL)
}
c.Assets = append(c.Assets, lyftVPCDownloadURL)
}
// TODO figure out if we can only do this for CoreOS only and GCE Container OS
// TODO It is very difficult to pre-determine what OS an ami is, and if that OS needs socat
// At this time we just copy the socat and conntrack binaries to all distros.
// Most distros will have their own socat and conntrack binary.
// Container operating systems like CoreOS need to have socat and conntrack added to them.
{
utilsLocation, hash, err := KopsFileUrl("linux/amd64/utils.tar.gz", assetBuilder)
if err != nil {
return err
}
c.Assets = append(c.Assets, hash.Hex()+"@"+utilsLocation.String())
}
n, hash, err := NodeUpLocation(assetBuilder)
if err != nil {
return err
}
c.NodeUpSource = n.String()
c.NodeUpHash = hash.Hex()
// Explicitly add the protokube image,
// otherwise when the Target is DryRun this asset is not added
// Is there a better way to call this?
_, _, err = ProtokubeImageSource(assetBuilder)
if err != nil {
return err
}
return nil
}
// buildPermalink returns a link to our "permalink docs", to further explain an error message
func buildPermalink(key, anchor string) string {
url := "https://github.com/kubernetes/kops/blob/master/permalinks/" + key + ".md"
if anchor != "" {
url += "#" + anchor
}
return url
}
func ChannelForCluster(c *kops.Cluster) (*kops.Channel, error) {
channelLocation := c.Spec.Channel
if channelLocation == "" {
channelLocation = kops.DefaultChannel
}
return kops.LoadChannel(channelLocation)
}
// needsMounterAsset checks if we need the mounter program
// This is only needed currently on ContainerOS i.e. GCE, but we don't have a nice way to detect it yet
func needsMounterAsset(c *kops.Cluster, instanceGroups []*kops.InstanceGroup) bool {
// TODO: Do real detection of ContainerOS (but this has to work with image names, and maybe even forked images)
switch kops.CloudProviderID(c.Spec.CloudProvider) {
case kops.CloudProviderGCE:
return true
default:
return false
}
}
// BuildNodeUpConfig returns the NodeUp config, in YAML format
func (c *ApplyClusterCmd) BuildNodeUpConfig(assetBuilder *assets.AssetBuilder, ig *kops.InstanceGroup) (*nodeup.Config, error) {
if ig == nil {
return nil, fmt.Errorf("instanceGroup cannot be nil")
}
cluster := c.Cluster
configBase, err := vfs.Context.BuildVfsPath(cluster.Spec.ConfigBase)
if err != nil {
return nil, fmt.Errorf("error parsing config base %q: %v", cluster.Spec.ConfigBase, err)
}
// TODO: Remove
clusterTags, err := buildCloudupTags(cluster)
if err != nil {
return nil, err
}
channels := []string{
configBase.Join("addons", "bootstrap-channel.yaml").Path(),
}
for i := range c.Cluster.Spec.Addons {
channels = append(channels, c.Cluster.Spec.Addons[i].Manifest)
}
role := ig.Spec.Role
if role == "" {
return nil, fmt.Errorf("cannot determine role for instance group: %v", ig.ObjectMeta.Name)
}
nodeUpTags, err := buildNodeupTags(role, cluster, clusterTags)
if err != nil {
return nil, err
}
config := &nodeup.Config{}
for _, tag := range nodeUpTags.List() {
config.Tags = append(config.Tags, tag)
}
config.Assets = c.Assets
config.ClusterName = cluster.ObjectMeta.Name
config.ConfigBase = fi.String(configBase.Path())
config.InstanceGroupName = ig.ObjectMeta.Name
var images []*nodeup.Image
if components.IsBaseURL(cluster.Spec.KubernetesVersion) {
// When using a custom version, we want to preload the images over http
components := []string{"kube-proxy"}
if role == kops.InstanceGroupRoleMaster {
components = append(components, "kube-apiserver", "kube-controller-manager", "kube-scheduler")
}
for _, component := range components {
baseURL, err := url.Parse(c.Cluster.Spec.KubernetesVersion)
if err != nil {
return nil, err
}
baseURL.Path = path.Join(baseURL.Path, "/bin/linux/amd64/", component+".tar")
u, hash, err := assetBuilder.RemapFileAndSHA(baseURL)
if err != nil {
return nil, err
}
image := &nodeup.Image{
Source: u.String(),
Hash: hash.Hex(),
}
images = append(images, image)
}
}
{
location, hash, err := ProtokubeImageSource(assetBuilder)
if err != nil {
return nil, err
}
config.ProtokubeImage = &nodeup.Image{
Name: kopsbase.DefaultProtokubeImageName(),
Source: location.String(),
Hash: hash.Hex(),
}
}
if role == kops.InstanceGroupRoleMaster {
for _, etcdCluster := range cluster.Spec.EtcdClusters {
if etcdCluster.Provider == kops.EtcdProviderTypeManager {
p := configBase.Join("manifests/etcd/" + etcdCluster.Name + ".yaml").Path()
config.EtcdManifests = append(config.EtcdManifests, p)
}
}
}
config.Images = images
config.Channels = channels
return config, nil
}
| [
"\"KOPS_RUN_OBSOLETE_VERSION\"",
"\"KOPS_RUN_OBSOLETE_VERSION\"",
"\"LYFT_VPC_DOWNLOAD_URL\""
]
| []
| [
"KOPS_RUN_OBSOLETE_VERSION",
"LYFT_VPC_DOWNLOAD_URL"
]
| [] | ["KOPS_RUN_OBSOLETE_VERSION", "LYFT_VPC_DOWNLOAD_URL"] | go | 2 | 0 | |
packages/gtmapi/service.py | #!/usr/bin/python3
import shutil
import os
import base64
from time import sleep
import flask
import requests.exceptions
import blueprint
from flask_cors import CORS
from confhttpproxy import ProxyRouter, ProxyRouterException
from flask import Flask, jsonify
import rest_routes
from lmsrvcore.utilities.migrate import migrate_work_dir_structure_v2
from gtmcore.dispatcher import Dispatcher
from gtmcore.dispatcher.jobs import update_environment_repositories
from gtmcore.configuration import Configuration
from gtmcore.logging import LMLogger
from gtmcore.auth.identity import AuthenticationError, get_identity_manager_class
from gtmcore.labbook.lock import reset_all_locks
logger = LMLogger.get_logger()
def configure_chp(proxy_dict: dict, is_hub_client: bool) -> str:
"""Set up the configurable HTTP proxy (CHP)
Args:
proxy_dict: obtained from the config dict inside the config instance
is_hub_client: are we running on the hub? (also obtained from config instance)
Returns:
the final api_prefix used by the router
We define this as a function mostly so we can optionally wrap it in a try block below
"""
# /api by default
api_prefix = proxy_dict["labmanager_api_prefix"]
proxy_router = ProxyRouter.get_proxy(proxy_dict)
# Wait up to 10 seconds for the CHP to be available
for _ in range(20):
try:
# This property raises an exception if the underlying request doesn't yield a status code of 200
proxy_router.routes # noqa
except (requests.exceptions.ConnectionError, ProxyRouterException):
sleep(0.5)
continue
# If there was no exception, the CHP is up and responding
break
else:
# We exhausted our for-loop
logger.error("Could not reach router after 20 tries (10 seconds), proxy_router.add() will likely fail")
if is_hub_client:
# Use full route prefix, including run/<client_id> if running in the Hub
api_target = f"run/{os.environ['GIGANTUM_CLIENT_ID']}{api_prefix}"
api_prefix = f"/{api_target}"
# explicit routes for UI with full route prefix
proxy_router.add("http://localhost:10002", f"run/{os.environ['GIGANTUM_CLIENT_ID']}")
else:
api_target = "api"
proxy_router.add("http://localhost:10001", api_target)
logger.info(f"Proxy routes ({type(proxy_router)}): {proxy_router.routes}")
return api_prefix
def configure_default_server(config_instance: Configuration) -> None:
"""Function to check if a server has been configured, and if not, configure and select the default server"""
try:
# Load the server configuration. If you get a FileNotFoundError there is no configured server
config_instance.get_server_configuration()
except FileNotFoundError:
default_server = config_instance.config['core']['default_server']
logger.info(f"Configuring Client with default server via auto-discovery: {default_server}")
try:
server_id = config_instance.add_server(default_server)
config_instance.set_current_server(server_id)
# Migrate any user dirs if needed. Here we assume all projects belong to the default server, since
# at the time it was the only available server.
migrate_work_dir_structure_v2(server_id)
except Exception as err:
logger.exception(f"Failed to configure default server! Restart Client to try again: {err}")
# Re-raise the exception so the API doesn't come up
raise
# Start Flask Server Initialization and app configuration
app = Flask("lmsrvlabbook")
random_bytes = os.urandom(32)
app.config["SECRET_KEY"] = base64.b64encode(random_bytes).decode('utf-8')
app.config["LABMGR_CONFIG"] = config = Configuration(wait_for_cache=10)
configure_default_server(config)
app.config["ID_MGR_CLS"] = get_identity_manager_class(config)
# Set Debug mode
app.config['DEBUG'] = config.config["flask"]["DEBUG"]
app.register_blueprint(blueprint.complete_labbook_service)
# Set starting flags
# If flask is run in debug mode the service will restart when code is changed, and some tasks
# we only want to happen once (ON_FIRST_START)
# The WERKZEUG_RUN_MAIN environmental variable is set only when running under debugging mode
ON_FIRST_START = app.config['DEBUG'] is False or os.environ.get('WERKZEUG_RUN_MAIN') != 'true'
ON_RESTART = os.environ.get('WERKZEUG_RUN_MAIN') == 'true'
if os.environ.get('CIRCLECI') == 'true':
try:
url_prefix = configure_chp(config.config['proxy'], config.is_hub_client)
except requests.exceptions.ConnectionError:
url_prefix = config.config['proxy']["labmanager_api_prefix"]
else:
url_prefix = configure_chp(config.config['proxy'], config.is_hub_client)
# Add rest routes
app.register_blueprint(rest_routes.rest_routes, url_prefix=url_prefix)
if config.config["flask"]["allow_cors"]:
# Allow CORS
CORS(app, max_age=7200)
if ON_FIRST_START:
# Empty container-container share dir as it is ephemeral
share_dir = os.path.join(os.path.sep, 'mnt', 'share')
logger.info("Emptying container-container share folder: {}.".format(share_dir))
try:
for item in os.listdir(share_dir):
item_path = os.path.join(share_dir, item)
if os.path.isfile(item_path):
os.unlink(item_path)
else:
shutil.rmtree(item_path)
except Exception as e:
logger.error(f"Failed to empty share folder: {e}.")
raise
post_save_hook_code = """
import subprocess, os
def post_save_hook(os_path, model, contents_manager, **kwargs):
try:
client_ip = os.environ.get('GIGANTUM_CLIENT_IP')
if os.environ.get('HUB_CLIENT_ID'):
# Running in the Hub
service_route = "run/{}/api/savehook".format(os.environ.get('HUB_CLIENT_ID'))
else:
# Running locally
service_route = "api/savehook"
tokens = open('/home/giguser/jupyter_token').read().strip()
username, owner, lbname, jupyter_token = tokens.split(',')
url_args = "file={}&jupyter_token={}&email={}".format(os.path.basename(os_path), jupyter_token, os.environ['GIGANTUM_EMAIL'])
url = "http://{}:10001/{}/{}/{}/{}?{}".format(client_ip,service_route,username,owner,lbname,url_args)
subprocess.run(['wget', '--spider', url], cwd='/tmp')
except Exception as e:
print(e)
"""
os.makedirs(os.path.join(share_dir, 'jupyterhooks'))
with open(os.path.join(share_dir, 'jupyterhooks', '__init__.py'), 'w') as initpy:
initpy.write(post_save_hook_code)
# Reset distributed lock, if desired
if config.config["lock"]["reset_on_start"]:
logger.info("Resetting ALL distributed locks")
reset_all_locks(config.config['lock'])
# Create local data (for local dataset types) dir if it doesn't exist
local_data_dir = os.path.join(config.config['git']['working_directory'], 'local_data')
if os.path.isdir(local_data_dir) is False:
os.makedirs(local_data_dir, exist_ok=True)
logger.info(f'Created `local_data` dir for Local Filesystem Dataset Type: {local_data_dir}')
# Create certificates file directory for custom CA certificate support.
certificate_dir = os.path.join(config.config['git']['working_directory'], 'certificates', 'ssl')
if os.path.isdir(certificate_dir) is False:
os.makedirs(certificate_dir, exist_ok=True)
logger.info(f'Created `certificates` dir for SSL and custom CA certificates: {certificate_dir}')
# make sure temporary upload directory exists and is empty
tempdir = config.upload_dir
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
logger.info(f'Cleared upload temp dir: {tempdir}')
os.makedirs(tempdir)
# Start background startup tasks
d = Dispatcher()
# Make sure the queue is up before we start using RQ
for _ in range(20):
if d.ready_for_job(update_environment_repositories):
break
sleep(0.5)
else:
# We exhausted our for-loop
err_message = "Worker queue not ready after 20 tries (10 seconds) - fatal error"
logger.error(err_message)
raise RuntimeError(err_message)
# Run job to update Base images in the background
d.dispatch_task(update_environment_repositories, persist=True)
# Set auth error handler
@app.errorhandler(AuthenticationError)
def handle_auth_error(ex):
response = jsonify(ex.error)
response.status_code = ex.status_code
return response
# TEMPORARY KLUDGE
# Due to GitPython implementation, resources leak. This block deletes all GitPython instances at the end of the request
# Future work will remove GitPython, at which point this block should be removed.
@app.after_request
def cleanup_git(response):
loader = getattr(flask.request, 'labbook_loader', None)
if loader:
for key in loader.__dict__["_promise_cache"]:
try:
lb = loader.__dict__["_promise_cache"][key].value
lb.git.repo.__del__()
except AttributeError:
continue
return response
# TEMPORARY KLUDGE
def main(debug=False) -> None:
try:
# Run app on 0.0.0.0, assuming not an issue since it should be in a container
# Please note: Debug mode must explicitly be set to False when running integration
# tests, due to properties of Flask werkzeug dynamic package reloading.
if debug:
# This is to support integration tests, which will call main
# with debug=False in order to avoid runtime reloading of Python code
# which causes the interpreter to crash.
app.run(host="0.0.0.0", port=10001, debug=debug)
else:
# If debug arg is not explicitly given then it is loaded from config
app.run(host="0.0.0.0", port=10001)
except Exception as err:
logger.exception(err)
raise
if __name__ == '__main__':
main()
| []
| []
| [
"CIRCLECI",
"HUB_CLIENT_ID",
"GIGANTUM_EMAIL",
"WERKZEUG_RUN_MAIN",
"GIGANTUM_CLIENT_IP",
"GIGANTUM_CLIENT_ID"
]
| [] | ["CIRCLECI", "HUB_CLIENT_ID", "GIGANTUM_EMAIL", "WERKZEUG_RUN_MAIN", "GIGANTUM_CLIENT_IP", "GIGANTUM_CLIENT_ID"] | python | 6 | 0 | |
cmd/nmi/main.go | package main
import (
"os"
"github.com/Azure/aad-pod-identity/pkg/k8s"
server "github.com/Azure/aad-pod-identity/pkg/nmi/server"
"github.com/Azure/aad-pod-identity/pkg/probes"
"github.com/Azure/aad-pod-identity/version"
log "github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
defaultMetadataIP = "169.254.169.254"
defaultMetadataPort = "80"
defaultNmiPort = "2579"
defaultIPTableUpdateTimeIntervalInSeconds = 60
defaultlistPodIDsRetryAttemptsForCreated = 16
defaultlistPodIDsRetryAttemptsForAssigned = 4
defaultlistPodIDsRetryIntervalInSeconds = 5
)
var (
debug = pflag.Bool("debug", true, "sets log to debug level")
versionInfo = pflag.Bool("version", false, "prints the version information")
nmiPort = pflag.String("nmi-port", defaultNmiPort, "NMI application port")
metadataIP = pflag.String("metadata-ip", defaultMetadataIP, "instance metadata host ip")
metadataPort = pflag.String("metadata-port", defaultMetadataPort, "instance metadata host ip")
hostIP = pflag.String("host-ip", "", "host IP address")
nodename = pflag.String("node", "", "node name")
ipTableUpdateTimeIntervalInSeconds = pflag.Int("ipt-update-interval-sec", defaultIPTableUpdateTimeIntervalInSeconds, "update interval of iptables")
forceNamespaced = pflag.Bool("forceNamespaced", false, "Forces mic to namespace identities, binding, and assignment")
micNamespace = pflag.String("MICNamespace", "default", "MIC namespace to short circuit MIC token requests")
httpProbePort = pflag.String("http-probe-port", "8080", "Http health and liveness probe port")
retryAttemptsForCreated = pflag.Int("retry-attempts-for-created", defaultlistPodIDsRetryAttemptsForCreated, "Number of retries in NMI to find assigned identity in CREATED state")
retryAttemptsForAssigned = pflag.Int("retry-attempts-for-assigned", defaultlistPodIDsRetryAttemptsForAssigned, "Number of retries in NMI to find assigned identity in ASSIGNED state")
findIdentityRetryIntervalInSeconds = pflag.Int("find-identity-retry-interval", defaultlistPodIDsRetryIntervalInSeconds, "Retry interval to find assigned identities in seconds")
)
func main() {
pflag.Parse()
if *versionInfo {
version.PrintVersionAndExit()
}
if *debug {
log.SetLevel(log.DebugLevel)
}
log.Infof("Starting nmi process. Version: %v. Build date: %v", version.NMIVersion, version.BuildDate)
client, err := k8s.NewKubeClient()
if err != nil {
log.Fatalf("%+v", err)
}
*forceNamespaced = *forceNamespaced || "true" == os.Getenv("FORCENAMESPACED")
s := server.NewServer(*forceNamespaced, *micNamespace)
s.KubeClient = client
s.MetadataIP = *metadataIP
s.MetadataPort = *metadataPort
s.NMIPort = *nmiPort
s.HostIP = *hostIP
s.NodeName = *nodename
s.IPTableUpdateTimeIntervalInSeconds = *ipTableUpdateTimeIntervalInSeconds
s.ListPodIDsRetryAttemptsForCreated = *retryAttemptsForCreated
s.ListPodIDsRetryAttemptsForAssigned = *retryAttemptsForAssigned
s.ListPodIDsRetryIntervalInSeconds = *findIdentityRetryIntervalInSeconds
// Health probe will always report success once its started. The contents
// will report "Active" once the iptables rules are set
probes.InitAndStart(*httpProbePort, &s.Initialized, &server.Log{})
if err := s.Run(); err != nil {
log.Fatalf("%s", err)
}
}
| [
"\"FORCENAMESPACED\""
]
| []
| [
"FORCENAMESPACED"
]
| [] | ["FORCENAMESPACED"] | go | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.